diff --git a/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/INSTALLER b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/LICENSE b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7a66c512bd209f1994d601bfdfa9d947654d98d3 --- /dev/null +++ b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Kota Yamaguchi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/METADATA b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..5508866af760265f8f0fbe1aee3715bba45b2be7 --- /dev/null +++ b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/METADATA @@ -0,0 +1,111 @@ +Metadata-Version: 2.2 +Name: faiss-cpu +Version: 1.10.0 +Summary: A library for efficient similarity search and clustering of dense vectors. +Author-email: Kota Yamaguchi +License: MIT License +Project-URL: Repository, https://github.com/kyamagu/faiss-wheels +Keywords: faiss,similarity search,clustering,machine learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy<3.0,>=1.25.0 +Requires-Dist: packaging + +# faiss-wheels + +[![Build](https://github.com/kyamagu/faiss-wheels/actions/workflows/build.yml/badge.svg)](https://github.com/kyamagu/faiss-wheels/actions/workflows/build.yml) +[![PyPI](https://img.shields.io/pypi/v/faiss-cpu?label=faiss-cpu)](https://pypi.org/project/faiss-cpu/) + +faiss python wheel packages. + +- [faiss](https://github.com/facebookresearch/faiss) + +## Overview + +This repository provides scripts to build wheel packages for the +[faiss](https://github.com/facebookresearch/faiss) library. + +- Builds CPU-only version with [cibuildwheel](https://github.com/pypa/cibuildwheel/). +- Bundles OpenBLAS in Linux/Windows +- Uses Accelerate framework in macOS + +There is also a source package to customize the build process. + +> **Note** +> GPU binary package is discontinued as of 1.7.3 release. Build a source package to support GPU features. + +### Install + +Install the CPU-only binary package by: + +```bash +pip install faiss-cpu +``` + +Note that the package name is `faiss-cpu`. + +## Supporting GPU or customized build configuration + +The PyPI binary package does not support GPU. +To support GPU methods or use faiss with different build configuration, build a source package. +For building the source package, swig 3.0.12 or later needs to be available. +Also, there should be all the required prerequisites for building faiss itself, such as `nvcc` and CUDA toolkit. + +## Building faiss + +The source package assumes faiss is already built and installed in the system. +If not done so elsewhere, build and install the faiss library first. +The following example builds and installs faiss with GPU support and avx512 instruction set. + +```bash +git clone https://github.com/facebookresearch/faiss.git +cd faiss +cmake . -B build -DFAISS_ENABLE_GPU=ON -DFAISS_ENABLE_PYTHON=OFF -DFAISS_OPT_LEVEL=avx512 +cmake --build build --config Release -j +cmake --install build install +cd .. +``` + +See the official +[faiss installation instruction](https://github.com/facebookresearch/faiss/blob/master/INSTALL.md) +for more on how to build and install faiss. + +### Building a source package + +Once faiss is built and installed, build the source package. +The following builds and installs the faiss-cpu source package with GPU and AVX512. + +```bash +export FAISS_ENABLE_GPU=ON FAISS_OPT_LEVEL=avx512 +pip install --no-binary :all: faiss-cpu +``` + +There are a few environment variables that specifies build-time options. +- `FAISS_INSTALL_PREFIX`: Specifies the install location of faiss library, default to `/usr/local`. +- `FAISS_OPT_LEVEL`: Faiss SIMD optimization, one of `generic`, `avx2`, `avx512`. Note that AVX option is only available in x86_64 arch. +- `FAISS_ENABLE_GPU`: Setting this variable to `ON` builds GPU wrappers. Set this variable if faiss is built with GPU support. +- `CUDA_HOME`: Specifies CUDA install location for building GPU wrappers, default to `/usr/local/cuda`. + +## Development + +This repository is intended to support PyPI distribution for the official [faiss](https://github.com/facebookresearch/faiss) library. +The repository contains the CI workflow based on [cibuildwheel](https://github.com/pypa/cibuildwheel/). +Feel free to make a pull request to fix packaging problems. + +Other relevant resources: + +- [Packaging projects with GPU code](https://pypackaging-native.github.io/key-issues/gpus/) diff --git a/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/RECORD b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e94a71de1a765acd6cc1869e6f677f347c2ee612 --- /dev/null +++ b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/RECORD @@ -0,0 +1,42 @@ +faiss/__init__.py,sha256=tA8_xJ3pt4LyKMXEgiwwuiqc9c2q4F9ZGBOlOTyj9J8,12352 +faiss/_swigfaiss.cpython-310-x86_64-linux-gnu.so,sha256=hLiUl7bZ6Ppd_g6Xe55pQ19es3e42AAOSr4qTGu8haw,41262577 +faiss/_swigfaiss_avx2.cpython-310-x86_64-linux-gnu.so,sha256=mbyaghOqhxRQEOTx6REQRmDaBH-Vf989ZYWpVAQs9Cg,41041497 +faiss/_swigfaiss_avx512.cpython-310-x86_64-linux-gnu.so,sha256=Gl4QVU_KNxLoxaV4wPy7POz8bnaAyoKcTeh69lRH-Yk,42048937 +faiss/array_conversions.py,sha256=D4nCV39t03NR46z_DutDUnzO3JZi7_4w0D5F6HKVFhA,5103 +faiss/class_wrappers.py,sha256=ReYh4z4HoAVlRW4rZ1cQy0Hi2z0Tz4aDU_DwIKvOfCg,48267 +faiss/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +faiss/contrib/big_batch_search.py,sha256=L24CEMtqbls_4nnFYxmdoTG9Odt26N4z6yRw7QjdrZA,17640 +faiss/contrib/client_server.py,sha256=vryOc223DtDbIl-5dI9AS2_zLe9PSyyvS8HUkIiJlIs,2774 +faiss/contrib/clustering.py,sha256=yqnMrpmOk2Ds_3AVt4Ka54sfb5On0azsO0qcdx0pXms,12699 +faiss/contrib/datasets.py,sha256=W-Y_T8JBXSaTqKwaNl_DvtVepd71OS34Dp4UpVIyaFQ,12273 +faiss/contrib/evaluation.py,sha256=YCK_WdDOmuk3YTywCyVk_7bwMjrOwz5LS7o3XCO064U,14995 +faiss/contrib/exhaustive_search.py,sha256=Hw7S_DOjYp2BQ2RGbG5TcJVK6MCbFKpwCgq3aAK3yd8,12380 +faiss/contrib/factory_tools.py,sha256=KChfcCgd5n8VOT669x1LKB035o0u3yMwwMlyf3FgnFU,5085 +faiss/contrib/inspect_tools.py,sha256=adznxU6EFhVrBXuyyol5M-j5baUVlhJCOoz7cFDFVdQ,3749 +faiss/contrib/ivf_tools.py,sha256=yZVz2UFivUUkur27SeeHfAfwM422tJpwVd6uT55w2yk,4874 +faiss/contrib/ondisk.py,sha256=o75LX2UDSVb0WaKWHKVwfVoXXmIGt-qblIMaVP3VRYQ,2069 +faiss/contrib/rpc.py,sha256=AFSNIhDWU6M4PErmYShL9U2DVVTypkJR5C3VpcSZS98,7305 +faiss/contrib/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +faiss/contrib/torch/clustering.py,sha256=fuHiTKrpgH3iXoyaqvuij4j2SD7hKS_VGPITbprIGj8,1622 +faiss/contrib/torch/quantization.py,sha256=co4f6SYN2OcDwIv_QlhbTUosmKT77CbDfP3qH7jCDGA,2796 +faiss/contrib/torch_utils.py,sha256=xiJneS8gCQ-Dgoh4wotaYzWMMe8fWwUPOTVrOjPoCaw,26826 +faiss/contrib/vecs_io.py,sha256=mBCzkcL0g1P438hur9OrTriQTuFl2CDsidFU5LPHIkU,1383 +faiss/extra_wrappers.py,sha256=B_C7O8HC0rfYvIeecWBEZxjIGueIjWgyGt0k5v6JFoE,20492 +faiss/gpu_wrappers.py,sha256=r2XcE_WO_URnFVq01zPgHbyKhfRHFvlw7nlX6-eObc8,9196 +faiss/loader.py,sha256=Ih3mYPcDahHzFjJerQeEadUSwe4r0u2D3H7WKnUtrjY,5715 +faiss/python_callbacks.h,sha256=Di3GvEZb1aJxQxJagsmUS_xNFNfIm4BtbVVaSqB8rdw,1771 +faiss/setup.py,sha256=hyyzmNocXW8-zRziXlGSLS3QoZDwkCNHeBfii_WZEn4,4823 +faiss/swigfaiss.i,sha256=pmiBVlt0CZByBV9iO3gssdxiSOvCIPbvk5XkiECKTJU,34916 +faiss/swigfaiss.py,sha256=Vthkry0pdtS5XxgFWRFqW08f1xC-AjKWL8XH3OsiGLg,522419 +faiss/swigfaiss_avx2.py,sha256=Yc9XDqj1x4Lhfpl-gO_dOvxZlD0EAHDwEMdZ7Fu5OTU,543874 +faiss/swigfaiss_avx512.py,sha256=XbDzP8_tDnKW4gwZE4GQ5bSLb9UzXUsPqpg4DszojIc,552456 +faiss_cpu-1.10.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +faiss_cpu-1.10.0.dist-info/LICENSE,sha256=WIeseFb8XlKsip07StlCDHeYTCME9qlvgxSryQpfc-4,1071 +faiss_cpu-1.10.0.dist-info/METADATA,sha256=k8Oqa9qO1d_KljrW_e7B5oiV073ICJ5CwOci41vp1dk,4433 +faiss_cpu-1.10.0.dist-info/RECORD,, +faiss_cpu-1.10.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +faiss_cpu-1.10.0.dist-info/WHEEL,sha256=0LtizVCPjmS43VfmdyNZBTcke7HWiGSz2g6gpFQfJJc,113 +faiss_cpu-1.10.0.dist-info/top_level.txt,sha256=nr2S-1YAhqAuAlFJukntDvRGXfI1KmwnUNs_iFlNiig,6 +faiss_cpu.libs/libgfortran-93980b03.so.5.0.0,sha256=VRPmUjgQd_BY9MZtJxiNd9ToNjdBD8kmV1-G0G2SSKU,2714697 +faiss_cpu.libs/libgomp-24e2ab19.so.1.0.0,sha256=7DVDy2-hHzQlj-PVkILkvZdAoYNJ4Jf8Qqe8aSohvyw,253289 +faiss_cpu.libs/libquadmath-776d53b6.so.0.0.0,sha256=3FY9LfNiF_4qtQ_ZZncl3g5v7__d2mmyG-o2kSRS-8Q,272193 diff --git a/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/REQUESTED b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/WHEEL b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..6d6ced05d69b11a4730ba6c08060bec67f223f85 --- /dev/null +++ b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.8.0) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_28_x86_64 + diff --git a/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/top_level.txt b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..a38fbdc9b38998b943720997ac2c9f9f4e5ffcb2 --- /dev/null +++ b/lib/python3.10/site-packages/faiss_cpu-1.10.0.dist-info/top_level.txt @@ -0,0 +1 @@ +faiss diff --git a/lib/python3.10/site-packages/optax/__init__.py b/lib/python3.10/site-packages/optax/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0b3f590ff23cb59a71d13f5b097d48a6a0702bfc --- /dev/null +++ b/lib/python3.10/site-packages/optax/__init__.py @@ -0,0 +1,349 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Optax: composable gradient processing and optimization, in JAX.""" + +from optax import experimental +from optax._src.alias import adabelief +from optax._src.alias import adafactor +from optax._src.alias import adagrad +from optax._src.alias import adam +from optax._src.alias import adamax +from optax._src.alias import adamaxw +from optax._src.alias import adamw +from optax._src.alias import amsgrad +from optax._src.alias import dpsgd +from optax._src.alias import fromage +from optax._src.alias import lamb +from optax._src.alias import lars +from optax._src.alias import MaskOrFn +from optax._src.alias import noisy_sgd +from optax._src.alias import novograd +from optax._src.alias import optimistic_gradient_descent +from optax._src.alias import radam +from optax._src.alias import rmsprop +from optax._src.alias import ScalarOrSchedule +from optax._src.alias import sgd +from optax._src.alias import sm3 +from optax._src.alias import yogi +from optax._src.base import EmptyState +from optax._src.base import GradientTransformation +from optax._src.base import identity +from optax._src.base import OptState +from optax._src.base import Params +from optax._src.base import Schedule +from optax._src.base import set_to_zero +from optax._src.base import stateless +from optax._src.base import stateless_with_tree_map +from optax._src.base import TransformInitFn +from optax._src.base import TransformUpdateFn +from optax._src.base import Updates +from optax._src.clipping import adaptive_grad_clip +from optax._src.clipping import AdaptiveGradClipState +from optax._src.clipping import clip +from optax._src.clipping import clip_by_block_rms +from optax._src.clipping import clip_by_global_norm +from optax._src.clipping import ClipByGlobalNormState +from optax._src.clipping import ClipState +from optax._src.clipping import per_example_global_norm_clip +from optax._src.combine import chain +from optax._src.combine import multi_transform +from optax._src.combine import MultiTransformState +from optax._src.constrain import keep_params_nonnegative +from optax._src.constrain import NonNegativeParamsState +from optax._src.constrain import zero_nans +from optax._src.constrain import ZeroNansState +from optax._src.control_variates import control_delta_method +from optax._src.control_variates import control_variates_jacobians +from optax._src.control_variates import moving_avg_baseline +from optax._src.factorized import FactoredState +from optax._src.factorized import scale_by_factored_rms +from optax._src.linear_algebra import global_norm +from optax._src.linear_algebra import matrix_inverse_pth_root +from optax._src.linear_algebra import power_iteration +from optax._src.lookahead import lookahead +from optax._src.lookahead import LookaheadParams +from optax._src.lookahead import LookaheadState +from optax._src.loss import cosine_distance +from optax._src.loss import cosine_similarity +from optax._src.loss import ctc_loss +from optax._src.loss import ctc_loss_with_forward_probs +from optax._src.loss import hinge_loss +from optax._src.loss import huber_loss +from optax._src.loss import l2_loss +from optax._src.loss import log_cosh +from optax._src.loss import sigmoid_binary_cross_entropy +from optax._src.loss import smooth_labels +from optax._src.loss import softmax_cross_entropy +from optax._src.loss import softmax_cross_entropy_with_integer_labels +from optax._src.numerics import safe_int32_increment +from optax._src.numerics import safe_norm +from optax._src.numerics import safe_root_mean_squares +from optax._src.privacy import differentially_private_aggregate +from optax._src.privacy import DifferentiallyPrivateAggregateState +from optax._src.schedule import constant_schedule +from optax._src.schedule import cosine_decay_schedule +from optax._src.schedule import cosine_onecycle_schedule +from optax._src.schedule import exponential_decay +from optax._src.schedule import inject_hyperparams +from optax._src.schedule import InjectHyperparamsState +from optax._src.schedule import join_schedules +from optax._src.schedule import linear_onecycle_schedule +from optax._src.schedule import linear_schedule +from optax._src.schedule import piecewise_constant_schedule +from optax._src.schedule import piecewise_interpolate_schedule +from optax._src.schedule import polynomial_schedule +from optax._src.schedule import sgdr_schedule +from optax._src.schedule import warmup_cosine_decay_schedule +from optax._src.schedule import warmup_exponential_decay_schedule +from optax._src.second_order import fisher_diag +from optax._src.second_order import hessian_diag +from optax._src.second_order import hvp +from optax._src.stochastic_gradient_estimators import measure_valued_jacobians +from optax._src.stochastic_gradient_estimators import pathwise_jacobians +from optax._src.stochastic_gradient_estimators import score_function_jacobians +from optax._src.transform import add_decayed_weights +from optax._src.transform import add_noise +from optax._src.transform import AddDecayedWeightsState +from optax._src.transform import additive_weight_decay +from optax._src.transform import AdditiveWeightDecayState +from optax._src.transform import AddNoiseState +from optax._src.transform import apply_every +from optax._src.transform import ApplyEvery +from optax._src.transform import bias_correction +from optax._src.transform import centralize +from optax._src.transform import ema +from optax._src.transform import EmaState +from optax._src.transform import scale +from optax._src.transform import scale_by_adam +from optax._src.transform import scale_by_adamax +from optax._src.transform import scale_by_amsgrad +from optax._src.transform import scale_by_belief +from optax._src.transform import scale_by_novograd +from optax._src.transform import scale_by_optimistic_gradient +from optax._src.transform import scale_by_param_block_norm +from optax._src.transform import scale_by_param_block_rms +from optax._src.transform import scale_by_radam +from optax._src.transform import scale_by_rms +from optax._src.transform import scale_by_rss +from optax._src.transform import scale_by_schedule +from optax._src.transform import scale_by_sm3 +from optax._src.transform import scale_by_stddev +from optax._src.transform import scale_by_trust_ratio +from optax._src.transform import scale_by_yogi +from optax._src.transform import ScaleByAdamState +from optax._src.transform import ScaleByAmsgradState +from optax._src.transform import ScaleByBeliefState +from optax._src.transform import ScaleByFromageState +from optax._src.transform import ScaleByNovogradState +from optax._src.transform import ScaleByRmsState +from optax._src.transform import ScaleByRssState +from optax._src.transform import ScaleByRStdDevState +from optax._src.transform import ScaleByScheduleState +from optax._src.transform import ScaleBySM3State +from optax._src.transform import ScaleByTrustRatioState +from optax._src.transform import ScaleState +from optax._src.transform import trace +from optax._src.transform import TraceState +from optax._src.transform import update_infinity_moment +from optax._src.transform import update_moment +from optax._src.transform import update_moment_per_elem_norm +from optax._src.update import apply_updates +from optax._src.update import incremental_update +from optax._src.update import periodic_update +from optax._src.utils import multi_normal +from optax._src.utils import scale_gradient +from optax._src.wrappers import apply_if_finite +from optax._src.wrappers import ApplyIfFiniteState +from optax._src.wrappers import flatten +from optax._src.wrappers import masked +from optax._src.wrappers import MaskedNode +from optax._src.wrappers import MaskedState +from optax._src.wrappers import maybe_update +from optax._src.wrappers import MaybeUpdateState +from optax._src.wrappers import MultiSteps +from optax._src.wrappers import MultiStepsState +from optax._src.wrappers import ShouldSkipUpdateFunction +from optax._src.wrappers import skip_large_updates +from optax._src.wrappers import skip_not_finite + +__version__ = "0.1.4" + +__all__ = ( + "adabelief", + "adafactor", + "adagrad", + "adam", + "adamax", + "adamaxw", + "adamw", + "adaptive_grad_clip", + "AdaptiveGradClipState", + "add_decayed_weights", + "add_noise", + "AddDecayedWeightsState", + "additive_weight_decay", + "AdditiveWeightDecayState", + "AddNoiseState", + "amsgrad", + "apply_every", + "apply_if_finite", + "apply_updates", + "ApplyEvery", + "ApplyIfFiniteState", + "centralize", + "chain", + "clip_by_block_rms", + "clip_by_global_norm", + "clip", + "ClipByGlobalNormState", + "ClipState", + "constant_schedule", + "ctc_loss", + "ctc_loss_with_forward_probs", + "control_delta_method", + "control_variates_jacobians", + "cosine_decay_schedule", + "cosine_distance", + "cosine_onecycle_schedule", + "cosine_similarity", + "differentially_private_aggregate", + "DifferentiallyPrivateAggregateState", + "dpsgd", + "ema", + "EmaState", + "EmptyState", + "exponential_decay", + "FactoredState", + "fisher_diag", + "flatten", + "fromage", + "global_norm", + "GradientTransformation", + "hinge_loss", + "hessian_diag", + "huber_loss", + "hvp", + "identity", + "incremental_update", + "inject_hyperparams", + "InjectHyperparamsState", + "join_schedules", + "keep_params_nonnegative", + "l2_loss", + "lamb", + "lars", + "linear_onecycle_schedule", + "linear_schedule", + "log_cosh", + "lookahead", + "LookaheadParams", + "LookaheadState", + "masked", + "MaskOrFn", + "MaskedState", + "matrix_inverse_pth_root", + "maybe_update", + "MaybeUpdateState", + "measure_valued_jacobians", + "moving_avg_baseline", + "multi_normal", + "multi_transform", + "MultiSteps", + "MultiStepsState", + "MultiTransformState", + "noisy_sgd", + "novograd", + "NonNegativeParamsState", + "OptState", + "Params", + "pathwise_jacobians", + "periodic_update", + "per_example_global_norm_clip", + "piecewise_constant_schedule", + "piecewise_interpolate_schedule", + "polynomial_schedule", + "power_iteration", + "radam", + "rmsprop", + "safe_int32_increment", + "safe_norm", + "safe_root_mean_squares", + "ScalarOrSchedule", + "scale_by_adam", + "scale_by_adamax", + "scale_by_amsgrad", + "scale_by_belief", + "scale_by_factored_rms", + "scale_by_novograd", + "scale_by_param_block_norm", + "scale_by_param_block_rms", + "scale_by_radam", + "scale_by_rms", + "scale_by_rss", + "scale_by_schedule", + "scale_by_sm3", + "scale_by_stddev", + "scale_by_trust_ratio", + "scale_by_yogi", + "scale_gradient", + "scale", + "ScaleByAdamState", + "ScaleByAmsgradState", + "ScaleByBeliefState", + "ScaleByFromageState", + "ScaleByNovogradState", + "ScaleByRmsState", + "ScaleByRssState", + "ScaleByRStdDevState", + "ScaleByScheduleState", + "ScaleBySM3State", + "ScaleByTrustRatioState", + "ScaleState", + "Schedule", + "score_function_jacobians", + "set_to_zero", + "sgd", + "sgdr_schedule", + "ShouldSkipUpdateFunction", + "sigmoid_binary_cross_entropy", + "skip_large_updates", + "skip_not_finite", + "sm3", + "smooth_labels", + "softmax_cross_entropy", + "stateless", + "stateless_with_tree_map", + "trace", + "TraceState", + "TransformInitFn", + "TransformUpdateFn", + "Updates", + "warmup_cosine_decay_schedule", + "warmup_exponential_decay_schedule", + "yogi", + "zero_nans", + "ZeroNansState", +) + +# _________________________________________ +# / Please don't use symbols in `_src` they \ +# \ are not part of the Optax public API. / +# ----------------------------------------- +# \ ^__^ +# \ (oo)\_______ +# (__)\ )\/\ +# ||----w | +# || || +# diff --git a/lib/python3.10/site-packages/optax/_src/alias.py b/lib/python3.10/site-packages/optax/_src/alias.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1b6f748fa395a132a07ba249fa1507a86fe0e3 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/alias.py @@ -0,0 +1,883 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Aliases for popular optimizers.""" + +from typing import Any, Callable, Optional, Union + +import jax.numpy as jnp + +from optax._src import base +from optax._src import clipping +from optax._src import combine +from optax._src import factorized +from optax._src import privacy +from optax._src import transform +from optax._src import wrappers + + +ScalarOrSchedule = Union[float, base.Schedule] +MaskOrFn = Optional[Union[Any, Callable[[base.Params], Any]]] + + +def _scale_by_learning_rate(learning_rate: ScalarOrSchedule, flip_sign=True): + m = -1 if flip_sign else 1 + if callable(learning_rate): + return transform.scale_by_schedule(lambda count: m * learning_rate(count)) + return transform.scale(m * learning_rate) + + +def adabelief( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-16, + eps_root: float = 1e-16) -> base.GradientTransformation: + """The AdaBelief optimizer. + + AdaBelief is an adaptive learning rate optimizer that focuses on fast + convergence, generalization, and stability. It adapts the step size depending + on its "belief" in the gradient direction — the optimizer adaptively scales + the step size by the difference between the predicted and observed gradients. + AdaBelief is a modified version of Adam and contains the same number of + parameters. + + References: + Zhuang et al, 2020: https://arxiv.org/abs/2010.07468 + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the second moment of past gradients. + eps: Term added to the denominator to improve numerical stability. + eps_root: Term added to the second moment of the prediction error to + improve numerical stability. If backpropagating gradients through the + gradient transformation (e.g. for meta-learning), this must be non-zero. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_belief(b1=b1, b2=b2, eps=eps, eps_root=eps_root), + _scale_by_learning_rate(learning_rate), + ) + + +def adafactor( + learning_rate: Optional[ScalarOrSchedule] = None, + min_dim_size_to_factor: int = 128, + decay_rate: float = 0.8, + decay_offset: int = 0, + multiply_by_parameter_scale: float = True, + clipping_threshold: Optional[float] = 1.0, + momentum: Optional[float] = None, + dtype_momentum: Any = jnp.float32, + weight_decay_rate: Optional[float] = None, + eps: float = 1e-30, + factored: bool = True, + weight_decay_mask: MaskOrFn = None, + ) -> base.GradientTransformation: + """The Adafactor optimizer. + + Adafactor is an adaptive learning rate optimizer that focuses on fast + training of large scale neural networks. It saves memory by using a factored + estimate of the second order moments used to scale gradients. + + References: + Shazeer and Stern, 2018: https://arxiv.org/abs/1804.04235 + + Args: + learning_rate: A fixed global scaling factor. Note: the natural scale for + Adafactor's LR is markedly different from Adam, one doesn't use the + 1/sqrt(hidden) correction for this optim with attention-based models. + min_dim_size_to_factor: Only factor the statistics if two array dimensions + have at least this size. + decay_rate: Controls second-moment exponential decay schedule. + decay_offset: For fine-tuning, one may set this to the starting step + number of the fine-tuning phase. + multiply_by_parameter_scale: If True, then scale learning_rate by + parameter norm. If False, provided learning_rate is absolute step size. + clipping_threshold: Optional clipping threshold. Must be >= 1. If None, + clipping is disabled. + momentum: Optional value between 0 and 1, enables momentum and uses extra + memory if non-None! None by default. + dtype_momentum: Data type of momentum buffers. + weight_decay_rate: Optional rate at which to decay weights. + eps: Regularization constant for root mean squared gradient. + factored: Whether to use factored second-moment estimates. + weight_decay_mask: A tree with same structure as (or a prefix of) + the params PyTree, or a Callable that returns such a pytree given + the params/updates. The leaves should be booleans, `True` + for leaves/subtrees you want to apply the transformation to, + and `False` for those you want to skip. + + Returns: + The corresponding `GradientTransformation`. + """ + # The core of the algorithm is a procedure for rescaling gradients + # by a factored estimate of the root mean squared gradients. + # This reduces memory compared to algorithms such as Adam or RmsProp, + # by not having to hold a separate estimate for each weight. + tx = [ + factorized.scale_by_factored_rms( + factored, decay_rate, decay_offset, min_dim_size_to_factor, eps)] + # This basic rescaling is typically combined with one or more of the following + # transformation (all can be disabled via adafactor's constructor args). + if clipping_threshold is not None: + tx.append(clipping.clip_by_block_rms(clipping_threshold)) + if learning_rate is not None: + tx.append(_scale_by_learning_rate(learning_rate, flip_sign=False)) + if multiply_by_parameter_scale: + tx.append(transform.scale_by_param_block_rms()) + if momentum is not None: + tx.append( + transform.ema(momentum, debias=False, accumulator_dtype=dtype_momentum)) + if weight_decay_rate is not None: + tx.append(transform.add_decayed_weights( + weight_decay_rate, mask=weight_decay_mask)) + # In gradient "descent" we follow the negative gradient. + tx.append(transform.scale(-1)) + return combine.chain(*tx) + + +def adagrad( + learning_rate: ScalarOrSchedule, + initial_accumulator_value: float = 0.1, + eps: float = 1e-7 +) -> base.GradientTransformation: + """The Adagrad optimizer. + + Adagrad is an algorithm for gradient based optimization that anneals the + learning rate for each parameter during the course of training. + + WARNING: Adagrad's main limit is the monotonic accumulation of squared + gradients in the denominator: since all terms are >0, the sum keeps growing + during training and the learning rate eventually becomes vanishingly small. + + References: + Duchi et al, 2011: https://jmlr.org/papers/v12/duchi11a.html + + Args: + learning_rate: A fixed global scaling factor. + initial_accumulator_value: Initial value for the accumulator. + eps: A small constant applied to denominator inside of the square root + (as in RMSProp) to avoid dividing by zero when rescaling. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_rss( + initial_accumulator_value=initial_accumulator_value, eps=eps), + _scale_by_learning_rate(learning_rate), + ) + + +def adam( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, + eps_root: float = 0.0, + mu_dtype: Optional[Any] = None, +) -> base.GradientTransformation: + r"""The classic Adam optimizer. + + Adam is an SGD variant with gradient scaling adaptation. The scaling + used for each parameter is computed from estimates of first and second-order + moments of the gradients (using suitable exponential moving averages). + + Let :math:`\alpha_t` represent the learning rate and :math:`\beta_1, \beta_2`, + :math:`\varepsilon`, :math:`\bar{\varepsilon}` represent the arguments + ``b1``, ``b2``, ``eps`` and ``eps_root`` respectievly. The learning rate is + indexed by :math:`t` since the learning rate may also be provided by a + schedule function. + + The ``init`` function of this optimizer initializes an internal state + :math:`S_0 := (m_0, v_0) = (0, 0)`, representing initial estimates for the + first and second moments. In practice these values are stored as pytrees + containing all zeros, with the same shape as the model updates. + At step :math:`t`, the ``update`` function of this optimizer takes as + arguments the incoming gradients :math:`g_t` and optimizer state :math:`S_t` + and computes updates :math:`u_t` and new state :math:`S_{t+1}`. Thus, for + :math:`t > 0`, we have, + + .. math:: + \begin{align*} + m_t &\leftarrow \beta_1 \cdot m_{t-1} + (1-\beta_1) \cdot g_t \\ + v_t &\leftarrow \beta_2 \cdot v_{t-1} + (1-\beta_2) \cdot {g_t}^2 \\ + \hat{m}_t &\leftarrow m_t / {(1-\beta_1^t)} \\ + \hat{v}_t &\leftarrow v_t / {(1-\beta_2^t)} \\ + u_t &\leftarrow \alpha_t \cdot \hat{m}_t / \left({\sqrt{\hat{v}_t + + \bar{\varepsilon}} + \varepsilon} \right)\\ + S_t &\leftarrow (m_t, v_t). + \end{align*} + + References: + Kingma et al, 2014: https://arxiv.org/abs/1412.6980 + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the second moment of past gradients. + eps: A small constant applied to denominator outside of the square root + (as in the Adam paper) to avoid dividing by zero when rescaling. + eps_root: A small constant applied to denominator inside the square root (as + in RMSProp), to avoid dividing by zero when rescaling. This is needed for + example when computing (meta-)gradients through Adam. + mu_dtype: Optional `dtype` to be used for the first order accumulator; if + `None` then the `dtype` is inferred from `params` and `updates`. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_adam( + b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype), + _scale_by_learning_rate(learning_rate), + ) + + +def adamw( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, + eps_root: float = 0.0, + mu_dtype: Optional[Any] = None, + weight_decay: float = 1e-4, + mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None, +) -> base.GradientTransformation: + """Adam with weight decay regularization. + + AdamW uses weight decay to regularize learning towards small weights, as + this leads to better generalization. In SGD you can also use L2 regularization + to implement this as an additive loss term, however L2 regularization + does not behave as intended for adaptive gradient algorithms such as Adam. + + References: + Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101 + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the second moment of past gradients. + eps: A small constant applied to denominator outside of the square root + (as in the Adam paper) to avoid dividing by zero when rescaling. + eps_root: A small constant applied to denominator inside the square root (as + in RMSProp), to avoid dividing by zero when rescaling. This is needed for + instance when computing (meta-)gradients through Adam. + mu_dtype: Optional `dtype` to be used for the first order accumulator; if + `None` then the `dtype` is inferred from `params` and `updates`. + weight_decay: Strength of the weight decay regularization. Note that this + weight decay is multiplied with the learning rate. This is consistent + with other frameworks such as PyTorch, but different from + (Loshchilov et al, 2019) where the weight decay is only multiplied with + the "schedule multiplier", but not the base learning rate. + mask: A tree with same structure as (or a prefix of) the params PyTree, + or a Callable that returns such a pytree given the params/updates. + The leaves should be booleans, `True` for leaves/subtrees you want to + apply the weight decay to, and `False` for those you want to skip. Note + that the Adam gradient transformations are applied to all parameters. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_adam( + b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype), + transform.add_decayed_weights(weight_decay, mask), + _scale_by_learning_rate(learning_rate), + ) + + +def amsgrad( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, + eps_root: float = 0.0, + mu_dtype: Optional[Any] = None, +) -> base.GradientTransformation: + """The AMSGrad optimiser. + + The original Adam can fail to converge to the optimal solution in some cases. + AMSGrad guarantees convergence by using a long-term memory of past gradients. + + References: + Reddi et al, 2018: https://openreview.net/forum?id=ryQu7f-RZ + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the second moment of past gradients. + eps: A small constant applied to denominator outside of the square root + (as in the Adam paper) to avoid dividing by zero when rescaling. + eps_root: A small constant applied to denominator inside the square root (as + in RMSProp), to avoid dividing by zero when rescaling. This is needed for + instance when computing (meta-)gradients through Adam. + mu_dtype: Optional `dtype` to be used for the first order accumulator; if + `None` then the `dtype` is inferred from `params` and `updates`. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_amsgrad( + b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype), + _scale_by_learning_rate(learning_rate), + ) + + +def fromage( + learning_rate: float, + min_norm: float = 1e-6 +) -> base.GradientTransformation: + """The Frobenius matched gradient descent (Fromage) optimizer. + + Fromage is a learning algorithm that does not require learning rate tuning. + The optimizer is based on modeling neural network gradients via deep relative + trust (a distance function on deep neural networks). Fromage is similar to the + LARS optimizer and can work on a range of standard neural network benchmarks, + such as natural language Transformers and generative adversarial networks. + + References: + Bernstein et al, 2020: https://arxiv.org/abs/2002.03432 + + Args: + learning_rate: A fixed global scaling factor. + min_norm: A minimum value that the norm of the gradient updates and the norm + of the layer parameters can be clipped to to avoid dividing by zero when + computing the trust ratio (as in the LARS paper). + + Returns: + The corresponding `GradientTransformation`. + """ + mult = 1 / jnp.sqrt(1 + learning_rate ** 2) + return combine.chain( + transform.scale_by_trust_ratio(min_norm), + _scale_by_learning_rate(learning_rate * mult), + transform.add_decayed_weights((mult - 1)), + ) + + +def lars( + learning_rate: ScalarOrSchedule, + weight_decay: float = 0., + weight_decay_mask: MaskOrFn = True, + trust_coefficient: float = 0.001, + eps: float = 0., + trust_ratio_mask: MaskOrFn = True, + momentum: float = 0.9, + nesterov: bool = False, +) -> base.GradientTransformation: + """The LARS optimizer. + + LARS is a layer-wise adaptive optimizer introduced to help scale SGD to + larger batch sizes. LARS later inspired the LAMB optimizer. + + References: + You et al, 2017: https://arxiv.org/abs/1708.03888 + + Args: + learning_rate: A fixed global scaling factor. + weight_decay: Strength of the weight decay regularization. + weight_decay_mask: A tree with same structure as (or a prefix of) the params + PyTree, or a Callable that returns such a pytree given the params/updates. + The leaves should be booleans, `True` for leaves/subtrees you want to + apply the transformation to, and `False` for those you want to skip. + trust_coefficient: A multiplier for the trust ratio. + eps: Optional additive constant in the trust ratio denominator. + trust_ratio_mask: A tree with same structure as (or a prefix of) the params + PyTree, or a Callable that returns such a pytree given the params/updates. + The leaves should be booleans, `True` for leaves/subtrees you want to + apply the transformation to, and `False` for those you want to skip. + momentum: Decay rate for momentum. + nesterov: Whether to use Nesterov momentum. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.add_decayed_weights(weight_decay, mask=weight_decay_mask), + wrappers.masked( + inner=transform.scale_by_trust_ratio( + trust_coefficient=trust_coefficient, eps=eps), + mask=trust_ratio_mask), + _scale_by_learning_rate(learning_rate), + transform.trace(decay=momentum, nesterov=nesterov), + ) + + +def lamb( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-6, + eps_root: float = 0.0, + weight_decay: float = 0., + mask: MaskOrFn = None, +) -> base.GradientTransformation: + """The LAMB optimizer. + + LAMB is a general purpose layer-wise adaptive large batch optimizer designed + to provide consistent training performance across a wide range of tasks, + including those that use attention-based models (such as Transformers) and + ResNet-50. The optimizer is able to work with small and large batch sizes. + LAMB was inspired by the LARS learning algorithm. + + References: + You et al, 2019: https://arxiv.org/abs/1904.00962 + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the second moment of past gradients. + eps: A small constant applied to denominator outside of the square root + (as in the Adam paper) to avoid dividing by zero when rescaling. + eps_root: A small constant applied to denominator inside the square root (as + in RMSProp), to avoid dividing by zero when rescaling. This is needed for + instance when computing (meta-)gradients through Adam. + weight_decay: Strength of the weight decay regularization. + mask: A tree with same structure as (or a prefix of) the params PyTree, + or a Callable that returns such a pytree given the params/updates. + The leaves should be booleans, `True` for leaves/subtrees you want to + apply the transformation to, and `False` for those you want to skip. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root), + transform.add_decayed_weights(weight_decay=weight_decay, mask=mask), + transform.scale_by_trust_ratio(), + _scale_by_learning_rate(learning_rate), + ) + + +def noisy_sgd( + learning_rate: ScalarOrSchedule, + eta: float = 0.01, + gamma: float = 0.55, + seed: int = 0 +) -> base.GradientTransformation: + r"""A variant of SGD with added noise. + + It has been found that adding noise to the gradients can improve + both the training error and the generalization error in very deep networks. + + References: + Neelakantan et al, 2014: https://arxiv.org/abs/1511.06807 + + Args: + learning_rate: A fixed global scaling factor. + eta: Initial variance for the Gaussian noise added to gradients. + gamma: A parameter controlling the annealing of noise over time, the + variance decays according to `(1+t)^-\gamma`. + seed: Seed for the pseudo-random generation process. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.add_noise(eta, gamma, seed), + _scale_by_learning_rate(learning_rate), + ) + + +def novograd( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.25, + eps: float = 1e-6, + eps_root: float = 0.0, + weight_decay: float = 0., +) -> base.GradientTransformation: + """NovoGrad optimizer. + + NovoGrad is more robust to the initial learning rate and + weight initialization than other methods. For example, + NovoGrad works well without LR warm-up, while other methods require it. + NovoGrad performs exceptionally well for large batch training, e.g. it + outperforms other methods for ResNet-50 for all batches up to 32K. + In addition, NovoGrad requires half the memory compared to Adam. + It was introduced together with Jasper ASR model. + + References: + Ginsburg et al, 2019: https://arxiv.org/abs/1905.11286 + Li et al, 2019: https://arxiv.org/abs/1904.03288 + + Args: + learning_rate: A fixed global scaling factor. + b1: An exponential decay rate to track the first moment of past gradients. + b2: An exponential decay rate to track the second moment of past gradients. + eps: A small constant applied to denominator outside of the square root (as + in the Adam paper) to avoid dividing by zero when rescaling. + eps_root: A small constant applied to denominator inside + the square root (as in RMSProp), to avoid dividing by zero when rescaling. + This is needed for instance when computing (meta-)gradients through Adam. + weight_decay: Strength of the weight decay regularization. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_novograd( + b1=b1, b2=b2, eps=eps, eps_root=eps_root, weight_decay=weight_decay), + _scale_by_learning_rate(learning_rate), + ) + + +def optimistic_gradient_descent( + learning_rate: ScalarOrSchedule, + alpha: ScalarOrSchedule = 1.0, + beta: ScalarOrSchedule = 1.0 +) -> base.GradientTransformation: + """An Optimistic Gradient Descent optimizer. + + Optimistic gradient descent is an approximation of extra-gradient methods + which require multiple gradient calls to compute the next update. It has + strong formal guarantees for last-iterate convergence in min-max games, for + which standard gradient descent can oscillate or even diverge. + + References: + [Mokhtari et al, 2019](https://arxiv.org/abs/1901.08511v2) + + Args: + learning_rate: A fixed global scaling factor. + alpha: Coefficient for generalized OGD. + beta: Coefficient for generalized OGD negative momentum. + + Returns: + A `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_optimistic_gradient(alpha=alpha, beta=beta), + _scale_by_learning_rate(learning_rate) + ) + + +def radam( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, + eps_root: float = 0.0, + threshold: float = 5.0 +) -> base.GradientTransformation: + """The Rectified Adam optimizer. + + The adaptive learning rate in Adam has undesirably large variance in early + stages of training, due to the limited number of training samples used to + estimate the optimizer's statistics. Rectified Adam addresses this issue + by analytically reducing the large variance. + + References: + Kingma et al, 2014: https://arxiv.org/abs/1412.6980 + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the second moment of past gradients. + eps: A small constant applied to denominator outside of the square root + (as in the Adam paper) to avoid dividing by zero when rescaling. + eps_root: A small constant applied to denominator inside the square root (as + in RMSProp), to avoid dividing by zero when rescaling. This is needed for + instance when computing (meta-)gradients through Adam. + threshold: Threshold for variance tractability. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_radam( + b1=b1, b2=b2, eps=eps, eps_root=eps_root, threshold=threshold), + _scale_by_learning_rate(learning_rate), + ) + + +def rmsprop( + learning_rate: ScalarOrSchedule, + decay: float = 0.9, + eps: float = 1e-8, + initial_scale: float = 0., + centered: bool = False, + momentum: Optional[float] = None, + nesterov: bool = False +) -> base.GradientTransformation: + # pylint: disable=line-too-long + """A flexible RMSProp optimizer. + + RMSProp is an SGD variant with learning rate adaptation. The `learning_rate` + used for each weight is scaled by a suitable estimate of the magnitude of the + gradients on previous steps. Several variants of RMSProp can be found + in the literature. This alias provides an easy to configure RMSProp + optimizer that can be used to switch between several of these variants. + + References: + Tieleman and Hinton, 2012: http://www.cs.toronto.edu/~hinton/coursera/lecture6/lec6.pdf + Graves, 2013: https://arxiv.org/abs/1308.0850 + + Args: + learning_rate: A fixed global scaling factor. + decay: Decay used to track the magnitude of previous gradients. + eps: A small numerical constant to avoid dividing by zero when rescaling. + initial_scale: Initial value of accumulators tracking the magnitude of + previous updates. PyTorch uses `0`, TF1 uses `1`. When reproducing results + from a paper, verify the value used by the authors. + centered: Whether the second moment or the variance of the past gradients is + used to rescale the latest gradients. + momentum: Decay rate used by the momentum term, when it is set to `None`, + then momentum is not used at all. + nesterov: Whether Nesterov momentum is used. + + Returns: + The corresponding `GradientTransformation`. + """ + # pylint: enable=line-too-long + if centered: + return combine.chain( + transform.scale_by_stddev( + decay=decay, eps=eps, initial_scale=initial_scale), + _scale_by_learning_rate(learning_rate), + (transform.trace(decay=momentum, nesterov=nesterov) + if momentum is not None else base.identity()) + ) + return combine.chain( + transform.scale_by_rms( + decay=decay, eps=eps, initial_scale=initial_scale), + _scale_by_learning_rate(learning_rate), + (transform.trace(decay=momentum, nesterov=nesterov) + if momentum is not None else base.identity()) + ) + + +def sgd( + learning_rate: ScalarOrSchedule, + momentum: Optional[float] = None, + nesterov: bool = False, + accumulator_dtype: Optional[Any] = None, +) -> base.GradientTransformation: + """A canonical Stochastic Gradient Descent optimizer. + + This implements stochastic gradient descent. It also includes support for + momentum, and nesterov acceleration, as these are standard practice when + using stochastic gradient descent to train deep neural networks. + + References: + Sutskever et al, 2013: http://proceedings.mlr.press/v28/sutskever13.pdf + + Args: + learning_rate: A fixed global scaling factor. + momentum: Decay rate used by the momentum term, when it is set to `None`, + then momentum is not used at all. + nesterov: Whether Nesterov momentum is used. + accumulator_dtype: Optional `dtype` to be used for the accumulator; if + `None` then the `dtype` is inferred from `params` and `updates`. + + Returns: + A `GradientTransformation`. + """ + return combine.chain( + (transform.trace(decay=momentum, nesterov=nesterov, + accumulator_dtype=accumulator_dtype) + if momentum is not None else base.identity()), + _scale_by_learning_rate(learning_rate) + ) + + +def sm3( + learning_rate: float, + momentum: float = 0.9 +) -> base.GradientTransformation: + """The SM3 optimizer. + + SM3 (Square-root of Minima of Sums of Maxima of Squared-gradients Method) is a + memory-efficient adaptive optimizer designed to decrease memory overhead when + training very large models, such as the Transformer for machine translation, + BERT for language modeling, and AmoebaNet-D for image classification. SM3: 1) + applies to tensors of arbitrary dimensions and any predefined cover of the + parameters; 2) adapts the learning rates in an adaptive and data-driven manner + (like Adagrad and unlike Adafactor); and 3) comes with rigorous convergence + guarantees in stochastic convex optimization settings. + + References: + Anil et al, 2019: https://arxiv.org/abs/1901.11150 + + Args: + learning_rate: A fixed global scaling factor. + momentum: Decay rate used by the momentum term (when it is not set to + `None`, then momentum is not used at all). + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_sm3(momentum), + transform.scale(-learning_rate), + ) + + +def yogi( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-3, +) -> base.GradientTransformation: + """The Yogi optimizer. + + Yogi is an adaptive optimizer, which provides control in tuning the effective + learning rate to prevent it from increasing. By doing so, it focuses on + addressing the issues of convergence and generalization in exponential moving + average-based adaptive methods (such as Adam and RMSprop). Yogi is a + modification of Adam and uses the same parameters. + + References: + Zaheer et al, 2020: http://www.sanjivk.com/yogi_nips2018.pdf + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the second moment of past gradients. + eps: A small constant applied to denominator outside of the square root + (as in the Adam paper) to avoid dividing by zero when rescaling. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_yogi(b1=b1, b2=b2, eps=eps), + _scale_by_learning_rate(learning_rate), + ) + + +def dpsgd( + learning_rate: ScalarOrSchedule, + l2_norm_clip: float, + noise_multiplier: float, + seed: int, + momentum: Optional[float] = None, + nesterov: bool = False +) -> base.GradientTransformation: + """The DPSGD optimizer. + + Differential privacy is a standard for privacy guarantees of algorithms + learning from aggregate databases including potentially sensitive information. + DPSGD offers protection against a strong adversary with full knowledge of the + training mechanism and access to the model’s parameters. + + WARNING: This `GradientTransformation` expects input updates to have a batch + dimension on the 0th axis. That is, this function expects per-example + gradients as input (which are easy to obtain in JAX using `jax.vmap`). + + References: + Abadi et al, 2016: https://arxiv.org/abs/1607.00133 + + Args: + learning_rate: A fixed global scaling factor. + l2_norm_clip: Maximum L2 norm of the per-example gradients. + noise_multiplier: Ratio of standard deviation to the clipping norm. + seed: Initial seed used for the jax.random.PRNGKey + momentum: Decay rate used by the momentum term, when it is set to `None`, + then momentum is not used at all. + nesterov: Whether Nesterov momentum is used. + + Returns: + A `GradientTransformation`. + """ + return combine.chain( + privacy.differentially_private_aggregate( + l2_norm_clip=l2_norm_clip, + noise_multiplier=noise_multiplier, + seed=seed), + (transform.trace(decay=momentum, nesterov=nesterov) + if momentum is not None else base.identity()), + _scale_by_learning_rate(learning_rate) + ) + + +def adamax( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, +) -> base.GradientTransformation: + """A variant of the Adam optimizer that uses the infinity norm. + + References: + Kingma et al, 2014: https://arxiv.org/abs/1412.6980 + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the maximum of past gradients. + eps: A small constant applied to denominator to avoid dividing by zero when + rescaling. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_adamax(b1=b1, b2=b2, eps=eps,), + _scale_by_learning_rate(learning_rate), + ) + + +def adamaxw( + learning_rate: ScalarOrSchedule, + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, + weight_decay: float = 1e-4, + mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None, +) -> base.GradientTransformation: + """Adamax with weight decay regularization. + + AdamaxW uses weight decay to regularize learning towards small weights, as + this leads to better generalization. In SGD you can also use L2 regularization + to implement this as an additive loss term, however L2 regularization + does not behave as intended for adaptive gradient algorithms such as Adam. + + WARNING: Sometimes you may want to skip weight decay for BatchNorm scale or + for the bias parameters. You can use `optax.masked` to make your own AdamaxW + variant where `additive_weight_decay` is applied only to a subset of `params`. + + References: + Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101 + + Args: + learning_rate: A fixed global scaling factor. + b1: Exponential decay rate to track the first moment of past gradients. + b2: Exponential decay rate to track the maximum of past gradients. + eps: A small constant applied to denominator to avoid dividing by zero when + rescaling. + weight_decay: Strength of the weight decay regularization. Note that this + weight decay is multiplied with the learning rate. This is consistent + with other frameworks such as PyTorch, but different from + (Loshchilov et al, 2019) where the weight decay is only multiplied with + the "schedule multiplier", but not the base learning rate. + mask: A tree with same structure as (or a prefix of) the params PyTree, + or a Callable that returns such a pytree given the params/updates. + The leaves should be booleans, `True` for leaves/subtrees you want to + apply the weight decay to, and `False` for those you want to skip. Note + that the Adamax gradient transformations are applied to all parameters. + + Returns: + The corresponding `GradientTransformation`. + """ + return combine.chain( + transform.scale_by_adamax(b1=b1, b2=b2, eps=eps), + transform.add_decayed_weights(weight_decay, mask), + _scale_by_learning_rate(learning_rate), + ) diff --git a/lib/python3.10/site-packages/optax/_src/alias_test.py b/lib/python3.10/site-packages/optax/_src/alias_test.py new file mode 100644 index 0000000000000000000000000000000000000000..be1a68b3043292ac7b3b2f6b40e6f8b884365505 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/alias_test.py @@ -0,0 +1,186 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `alias.py`.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +import jax.numpy as jnp + +from optax._src import alias +from optax._src import numerics +from optax._src import schedule +from optax._src import update + +_OPTIMIZERS_UNDER_TEST = ( + dict(opt_name='sgd', opt_kwargs=dict(learning_rate=1e-3, momentum=0.9)), + dict(opt_name='adafactor', opt_kwargs=dict(learning_rate=5e-3)), + dict(opt_name='adagrad', opt_kwargs=dict(learning_rate=1.0)), + dict(opt_name='adam', opt_kwargs=dict(learning_rate=1e-1)), + dict(opt_name='adamw', opt_kwargs=dict(learning_rate=1e-1)), + dict(opt_name='adamax', opt_kwargs=dict(learning_rate=1e-1)), + dict(opt_name='adamaxw', opt_kwargs=dict(learning_rate=1e-1)), + dict(opt_name='amsgrad', opt_kwargs=dict(learning_rate=1e-1)), + dict(opt_name='lars', opt_kwargs=dict(learning_rate=1.0)), + dict(opt_name='lamb', opt_kwargs=dict(learning_rate=1e-3)), + dict(opt_name='noisy_sgd', opt_kwargs=dict(learning_rate=1e-3, eta=1e-4)), + dict(opt_name='novograd', opt_kwargs=dict(learning_rate=1e-3)), + dict( + opt_name='optimistic_gradient_descent', + opt_kwargs=dict(learning_rate=2e-3, alpha=0.7, beta=0.1)), + dict(opt_name='rmsprop', opt_kwargs=dict(learning_rate=5e-3)), + dict(opt_name='rmsprop', opt_kwargs=dict(learning_rate=5e-3, momentum=0.9)), + dict(opt_name='fromage', opt_kwargs=dict(learning_rate=5e-3)), + dict(opt_name='adabelief', opt_kwargs=dict(learning_rate=1e-2)), + dict(opt_name='radam', opt_kwargs=dict(learning_rate=5e-3)), + dict(opt_name='sm3', opt_kwargs=dict(learning_rate=1.0)), + dict(opt_name='yogi', opt_kwargs=dict(learning_rate=1e-1)), + dict( + opt_name='dpsgd', + opt_kwargs=dict( + learning_rate=1e-3, + l2_norm_clip=10., + noise_multiplier=1e-3, + seed=0, + momentum=0.2)), +) + + +def _setup_parabola(dtype): + """Quadratic function as an optimization target.""" + initial_params = jnp.array([-1.0, 10.0, 1.0], dtype=dtype) + final_params = jnp.array([1.0, -1.0, 1.0], dtype=dtype) + + if jnp.iscomplexobj(dtype): + final_params *= 1 + 1j + + @jax.grad + def get_updates(params): + return jnp.sum(numerics.abs_sq(params - final_params)) + + return initial_params, final_params, get_updates + + +def _setup_rosenbrock(dtype): + """Rosenbrock function as an optimization target.""" + a = 1.0 + b = 100.0 + + if jnp.iscomplexobj(dtype): + a *= 1 + 1j + + initial_params = jnp.array([0.0, 0.0], dtype=dtype) + final_params = jnp.array([a, a**2], dtype=dtype) + + @jax.grad + def get_updates(params): + return (numerics.abs_sq(a - params[0]) + + b * numerics.abs_sq(params[1] - params[0]**2)) + + return initial_params, final_params, get_updates + + +class AliasTest(chex.TestCase): + + @parameterized.product( + _OPTIMIZERS_UNDER_TEST, + target=(_setup_parabola, _setup_rosenbrock), + dtype=(jnp.float32, jnp.complex64), + ) + def test_optimization(self, opt_name, opt_kwargs, target, dtype): + if (opt_name + in ('fromage', 'noisy_sgd', 'sm3', 'optimistic_gradient_descent') and + jnp.iscomplexobj(dtype)): + raise absltest.SkipTest( + f'{opt_name} does not support complex parameters.') + + opt = getattr(alias, opt_name)(**opt_kwargs) + initial_params, final_params, get_updates = target(dtype) + + @jax.jit + def step(params, state): + updates = get_updates(params) + if opt_name == 'dpsgd': + updates = updates[None] + # Complex gradients need to be conjugated before being added to parameters + # https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29 + updates = jax.tree_util.tree_map(lambda x: x.conj(), updates) + updates, state = opt.update(updates, state, params) + params = update.apply_updates(params, updates) + return params, state + + params = initial_params + state = opt.init(params) + for _ in range(10000): + params, state = step(params, state) + + chex.assert_trees_all_close(params, final_params, rtol=3e-2, atol=3e-2) + + @chex.all_variants + @parameterized.product(_OPTIMIZERS_UNDER_TEST) + def test_optimizers_can_be_wrapped_in_inject_hyperparams( + self, opt_name, opt_kwargs): + """Checks that optimizers can be wrapped in inject_hyperparams.""" + # See also https://github.com/deepmind/optax/issues/412. + opt_factory = getattr(alias, opt_name) + opt = opt_factory(**opt_kwargs) + if opt_name == 'adafactor': + # Adafactor wrapped in inject_hyperparams currently needs a static + # argument to be specified in order to be jittable. See issue + # https://github.com/deepmind/optax/issues/412. + opt_inject = schedule.inject_hyperparams( + opt_factory, static_args=('min_dim_size_to_factor',))(**opt_kwargs) + else: + opt_inject = schedule.inject_hyperparams(opt_factory)(**opt_kwargs) + + params = [-jnp.ones((2, 3)), jnp.ones((2, 5, 2))] + grads = [jnp.ones((2, 3)), -jnp.ones((2, 5, 2))] + + state = self.variant(opt.init)(params) + updates, new_state = self.variant(opt.update)(grads, state, params) + + state_inject = self.variant(opt_inject.init)(params) + updates_inject, new_state_inject = self.variant(opt_inject.update)( + grads, state_inject, params) + + with self.subTest('Equality of updates.'): + chex.assert_trees_all_close(updates_inject, updates, rtol=1e-4) + with self.subTest('Equality of new optimizer states.'): + chex.assert_trees_all_close( + new_state_inject.inner_state, new_state, rtol=1e-4) + + @parameterized.named_parameters([ + ('float32', 'float32'), + ('bfloat16', 'bfloat16'), + ('complex64', 'complex64'), + ('None', None), + ]) + def test_explicit_dtype(self, dtype): + expected_dtype = jax.dtypes.canonicalize_dtype(dtype) # None -> float32 + tx = alias.sgd(0.1, momentum=0.9, accumulator_dtype=dtype) + trace_state, _ = tx.init(jnp.array([0.0, 0.0])) + self.assertEqual(expected_dtype, trace_state.trace.dtype) + tx = alias.adam(0.1, mu_dtype=dtype) + adam_state, _ = tx.init(jnp.array([0.0, 0.0])) + self.assertEqual(expected_dtype, adam_state.mu.dtype) + tx = alias.adamw(0.1, mu_dtype=dtype) + adam_state, _, _ = tx.init(jnp.array([0.0, 0.0])) + self.assertEqual(expected_dtype, adam_state.mu.dtype) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/base.py b/lib/python3.10/site-packages/optax/_src/base.py new file mode 100644 index 0000000000000000000000000000000000000000..97ff04a670db948555b4348102e221042102b3fc --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/base.py @@ -0,0 +1,233 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base interfaces and datatypes.""" + +from typing import Any, Callable, NamedTuple, Optional, Sequence, Tuple + +import chex +import jax +import jax.numpy as jnp +import typing_extensions + +NO_PARAMS_MSG = ( + 'You are using a transformation that requires the current value of ' + 'parameters, but you are not passing `params` when calling `update`.') + +PyTree = Any +Shape = Sequence[int] + +OptState = chex.ArrayTree # States are arbitrary nests of `jnp.ndarrays`. +Params = chex.ArrayTree # Parameters are arbitrary nests of `jnp.ndarrays`. +Updates = Params # Gradient updates are of the same type as parameters. + +Schedule = Callable[[chex.Numeric], chex.Numeric] + + +class TransformInitFn(typing_extensions.Protocol): + """A callable type for the `init` step of a `GradientTransformation`. + + The `init` step takes a tree of `params` and uses these to construct an + arbitrary structured initial `state` for the gradient transformation. This + may hold statistics of the past updates or any other non static information. + """ + + def __call__(self, params: Params) -> OptState: + """The `init` function. + + Args: + params: The initial value of the parameters. + + Returns: + The initial state of the gradient transformation. + """ + + +class TransformUpdateFn(typing_extensions.Protocol): + """A callable type for the `update` step of a `GradientTransformation`. + + The `update` step takes a tree of candidate parameter `updates` (e.g. their + gradient with respect to some loss), an arbitrary structured `state`, and the + current `params` of the model being optimised. The `params` argument is + optional, it must however be provided when using transformations that require + access to the current values of the parameters. + """ + + def __call__( + self, + updates: Updates, + state: OptState, + params: Optional[Params] = None + ) -> Tuple[Updates, OptState]: + """The `update` function. + + Args: + updates: A tree of candidate updates. + state: The state of the gradient transformation. + params: (Optionally) the current value of the parameters. + + Returns: + The transformed updates, and the updated state. + """ + + +class GradientTransformation(NamedTuple): + """A pair of pure functions implementing a gradient transformation. + + Optax optimizers are all implemented as _gradient transformations_. + A gradient transformation is defined to be a pair of pure functions, which + are combined together in a `NamedTuple` so that they can be referred to by + name. + + Since gradient transformations do not contain any internal state, all stateful + optimizer properties (such as the current step count when using optimizer + scheduels, or momemtum values) are passed through optax gradient + transformations by using the optimizer _state_ pytree. Each time a gradient + transformation is applied, a new state is computed and returned, ready to be + passed to the next call to the gradient transformation. + + Since gradient transformations are pure, idempotent functions, the only way + to change the behaviour of a gradient transformation between steps, is to + change the values in the optimizer state. To see an example of mutating the + optimizer state in order to control the behaviour of an optax gradient + transformation, see the meta-learning example in the optax documentation. + + Attributes: + init: A pure function which, when called with an example instance of the + parameters whose gradients will be transformed, returns a pytree + containing the initial value for the optimizer state. + update: A pure function which takes as input a pytree of updates (with the + same tree structure as the original params pytree passed to init), the + previous optimizer state (which may have been initialized using the init + function), and optionally the current params. The update function then + returns the computed gradient updates, and a new optimizer state. + """ + init: TransformInitFn + update: TransformUpdateFn + + +class EmptyState(NamedTuple): + """An empty state for the simplest stateless transformations.""" + + +def identity() -> GradientTransformation: + """Stateless identity transformation that leaves input gradients untouched. + + This function passes through the *gradient updates* unchanged. + + Note, this should not to be confused with `set_to_zero`, which maps the input + updates to zero - which is the transform required for the *model parameters* + to be left unchanged when the updates are applied to them. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(_): + return EmptyState() + + def update_fn(updates, state, params=None): + del params + return updates, state + + return GradientTransformation(init_fn, update_fn) + + +def set_to_zero() -> GradientTransformation: + """Stateless transformation that maps input gradients to zero. + + The resulting update function, when called, will return a tree of zeros + matching the shape of the input gradients. This means that when the updates + returned from this transformation are applied to the model parameters, the + model parameters will remain unchanged. + + This can be used in combination with `multi_transform` or `masked` to freeze + (i.e. keep fixed) some parts of the tree of model parameters while applying + gradient updates to other parts of the tree. + + When updates are set to zero inside the same jit-compiled function as the + calculation of gradients, optax transformations, and application of updates to + parameters, unnecessary computations will in general be dropped. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return EmptyState() + + def update_fn(updates, state, params=None): + del params # Unused by the zero transform. + return jax.tree_util.tree_map(jnp.zeros_like, updates), state + + return GradientTransformation(init_fn, update_fn) + + +def stateless( + f: Callable[[Updates, Optional[Params]], Updates], +) -> GradientTransformation: + """Creates a stateless transformation from an update-like function. + + This wrapper eliminates the boilerplate needed to create a transformation that + does not require saved state between iterations. + + Args: + f: Update function that takes in updates (e.g. gradients) and parameters + and returns updates. The parameters may be `None`. + + Returns: + An `optax.GradientTransformation`. + """ + + def init_fn(_): + return EmptyState() + + def update_fn(updates, state, params=None): + del state + return f(updates, params), EmptyState() + + return GradientTransformation(init_fn, update_fn) + + +def stateless_with_tree_map( + f: Callable[[chex.Array, Optional[chex.Array]], chex.Array], +) -> GradientTransformation: + """Creates a stateless transformation from an update-like function for arrays. + + This wrapper eliminates the boilerplate needed to create a transformation that + does not require saved state between iterations, just like optax.stateless. + In addition, this function will apply the tree_map over update/params for you. + + Args: + f: Update function that takes in an update array (e.g. gradients) and + parameter array and returns an update array. The parameter array may be + `None`. + + Returns: + An `optax.GradientTransformation`. + """ + + def init_fn(_): + return EmptyState() + + def update_fn(updates, state, params=None): + del state + if params is not None: + return jax.tree_util.tree_map(f, updates, params), EmptyState() + else: + f_ = lambda u: f(u, None) + return jax.tree_util.tree_map(f_, updates), EmptyState() + + return GradientTransformation(init_fn, update_fn) diff --git a/lib/python3.10/site-packages/optax/_src/combine_test.py b/lib/python3.10/site-packages/optax/_src/combine_test.py new file mode 100644 index 0000000000000000000000000000000000000000..64fe667e1aecbbeafe9bf5c3f0b7fe0fcbdb9621 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/combine_test.py @@ -0,0 +1,152 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `combine.py`.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +import jax.numpy as jnp + +from optax._src import alias +from optax._src import combine +from optax._src import transform +from optax._src import update + + +STEPS = 50 +LR = 1e-2 + + +class ComposeTest(chex.TestCase): + + def setUp(self): + super().setUp() + self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.])) + self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.])) + + @chex.all_variants + def test_chain(self): + transformations = [ + transform.scale_by_adam(), + transform.trace(decay=0, nesterov=False), + transform.scale(-LR)] + + # Apply updates with chain. + chain_params = self.init_params + chained_transforms = combine.chain(*transformations) + state = chained_transforms.init(chain_params) + self.assertIsInstance(state, tuple) + + @self.variant + def update_fn(updates, state): + return chained_transforms.update(updates, state) + + for _ in range(STEPS): + updates, state = update_fn(self.per_step_updates, state) + self.assertIsInstance(state, tuple) + chain_params = update.apply_updates(chain_params, updates) + + # Manually apply sequence of transformations. + manual_params = self.init_params + states = [t.init(manual_params) for t in transformations] + for _ in range(STEPS): + updates = self.per_step_updates + new_states = [] + for t, s in zip(transformations, states): + updates, state = t.update(updates, s) + new_states.append(state) + manual_params = update.apply_updates(manual_params, updates) + states = new_states + + # Check equivalence. + chex.assert_tree_all_close(manual_params, chain_params, rtol=1e-4) + + +def _map_keys_fn(fn): + def map_fn(nested_dict): + return {k: (map_fn(v) if isinstance(v, dict) else fn(k, v)) + for k, v in nested_dict.items()} + return map_fn + + +class MultiTransformTest(chex.TestCase): + """Tests for the multi_transform wrapper.""" + + @chex.all_variants + @parameterized.parameters(True, False) + def test_multi_transform(self, use_fn): + params = {'a1': 1., 'b1': 2., 'z1': {'a2': 3., 'z2': {'c1': 4.}}} + params = jax.tree_util.tree_map(jnp.asarray, params) + input_updates = jax.tree_util.tree_map(lambda x: x / 10.0, params) + tx_dict = {'a': transform.scale(-1.0), + 'b': transform.ema(0.0), # stateful + 'c': transform.scale(2.0)} + param_labels = _map_keys_fn(lambda k, _: k[0]) + if not use_fn: + param_labels = param_labels(params) + tx = combine.multi_transform(tx_dict, param_labels) + update_fn = self.variant(tx.update) + state = self.variant(tx.init)(params) + + correct_update_fn = _map_keys_fn( + lambda k, v: {'a': -v, 'b': v, 'c': 2.0*v}[k[0]]) + + updates, state = update_fn(input_updates, state, params) + correct_updates = correct_update_fn(input_updates) + chex.assert_tree_all_close(updates, correct_updates) + + # Check repeated application, this time with no params. + correct_updates = correct_update_fn(correct_updates) + updates, state = update_fn(updates, state) + chex.assert_tree_all_close(updates, correct_updates) + + @parameterized.parameters(list, tuple, dict) + def test_empty(self, container): + init_fn, update_fn = combine.multi_transform( + {0: alias.sgd(1.)}, lambda _: 0) + updates, _ = update_fn(container(), init_fn(container())) + self.assertEqual(updates, container()) + + @chex.all_variants + @parameterized.parameters( + (False, False), (False, True), (True, False), (True, True)) + def test_labels_mismatch(self, use_extra_label, use_fn): + # The labels from label_fn must be a subet of the keys for the tx. + params = {'a': 1., 'b': [2., 3.], 'c': {'d': 4., 'e': (5., 6.)}} + params = jax.tree_util.tree_map(jnp.asarray, params) + label_tree = {'a': 0, 'b': [1, 0], 'c': 1} # prefix of params + + if use_extra_label: + label_tree['a'] = 3 + + transforms = {0: alias.sgd(1.), + 1: alias.adam(1., b1=0., b2=0.), + 2: transform.trace(1.0)} + init_fn, update_fn = combine.multi_transform( + transforms, (lambda _: label_tree) if use_fn else label_tree) + + if use_extra_label: + with self.assertRaises(ValueError): + self.variant(init_fn)(params) + else: + state = self.variant(init_fn)(params) + updates = jax.tree_util.tree_map(lambda x: x / 10.0, params) + self.variant(update_fn)(updates, state) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/constrain.py b/lib/python3.10/site-packages/optax/_src/constrain.py new file mode 100644 index 0000000000000000000000000000000000000000..e98d12abb9f8c0d498d2ab357e58a214b692643c --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/constrain.py @@ -0,0 +1,97 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gradient transformations used to enforce specific constraints.""" + +from typing import Any, NamedTuple + +import jax +import jax.numpy as jnp + +from optax._src import base + +# pylint:disable=no-value-for-parameter + + +NonNegativeParamsState = base.EmptyState + + +def keep_params_nonnegative() -> base.GradientTransformation: + """Modifies the updates to keep parameters non-negative, i.e. >= 0. + + This transformation ensures that parameters after the update will be + larger than or equal to zero. + In a chain of transformations, this should be the last one. + + WARNING: the transformation expects input params to be non-negative. + When params is negative the transformed update will move them to 0. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return NonNegativeParamsState() + + def update_fn(updates, state, params): + if params is None: + raise ValueError(base.NO_PARAMS_MSG) + + updates = jax.tree_util.tree_map( + lambda p, u: jnp.where((p + u) < 0., -p, u), params, updates) + return updates, state + + return base.GradientTransformation(init_fn, update_fn) + + +class ZeroNansState(NamedTuple): + """Contains a tree. + + The entry `found_nan` has the same tree structure as that of the parameters. + Each leaf is a single boolean which contains True iff a NaN was detected in + the corresponding parameter array at the last call to `update`. + """ + found_nan: Any + + +def zero_nans() -> base.GradientTransformation: + """A transformation which replaces NaNs with 0. + + Zeroing values in gradients is guaranteed to produce a direction of + non-increasing loss. + + The state of the transformation has the same tree structure as that of the + parameters. Each leaf is a single boolean which contains True iff a NaN was + detected in the corresponding parameter array at the last call to `update`. + This state is not used by the transformation internally, but lets users be + aware when NaNs have been zeroed out. + + Returns: + A `GradientTransformation`. + """ + + def init_fn(params): + return ZeroNansState(jax.tree_util.tree_map( + lambda p: jnp.array(False, dtype=jnp.bool_), params)) + + def update_fn(updates, opt_state, params=None): + del params + opt_state = ZeroNansState( + jax.tree_util.tree_map(lambda p: jnp.any(jnp.isnan(p)), updates)) + updates = jax.tree_util.tree_map( + lambda p: jnp.where(jnp.isnan(p), jnp.zeros_like(p), p), updates) + return updates, opt_state + + return base.GradientTransformation(init=init_fn, update=update_fn) diff --git a/lib/python3.10/site-packages/optax/_src/constrain_test.py b/lib/python3.10/site-packages/optax/_src/constrain_test.py new file mode 100644 index 0000000000000000000000000000000000000000..44fef4c18c6333c68e2367f5d7c8936187a98a19 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/constrain_test.py @@ -0,0 +1,115 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for optax._src.constrain.""" + +from absl.testing import absltest + +import chex +import jax.numpy as jnp + +from optax._src import combine +from optax._src import constrain +from optax._src import transform +from optax._src import update + +STEPS = 50 +LR = 1e-2 + + +class ConstraintsTest(chex.TestCase): + + def test_keep_params_nonnegative(self): + grads = (jnp.array([500., -500., 0.]), + jnp.array([500., -500., 0.]), + jnp.array([500., -500., 0.])) + + params = (jnp.array([-1., -1., -1.]), + jnp.array([1., 1., 1.]), + jnp.array([0., 0., 0.])) + + # vanilla sgd + opt = combine.chain( + transform.trace(decay=0, nesterov=False), transform.scale(-LR)) + opt_state = opt.init(params) + + updates, _ = opt.update(grads, opt_state, params) + new_params = update.apply_updates(params, updates) + + chex.assert_tree_all_close(new_params, (jnp.array([-6., 4., -1.]), + jnp.array([-4., 6., 1.]), + jnp.array([-5., 5., 0.]))) + + # sgd with keeping parameters non-negative + opt = combine.chain( + transform.trace(decay=0, nesterov=False), transform.scale(-LR), + constrain.keep_params_nonnegative()) + opt_state = opt.init(params) + + updates, _ = opt.update(grads, opt_state, params) + new_params = update.apply_updates(params, updates) + + chex.assert_tree_all_close(new_params, (jnp.array([0., 4., 0.]), + jnp.array([0., 6., 1.]), + jnp.array([0., 5., 0.]))) + + @chex.all_variants + def test_zero_nans(self): + params = (jnp.zeros([3]), jnp.zeros([3]), jnp.zeros([3])) + + opt = constrain.zero_nans() + opt_state = self.variant(opt.init)(params) + update_fn = self.variant(opt.update) + + chex.assert_tree_all_close(opt_state, + constrain.ZeroNansState((jnp.array(False),) * 3)) + + # Check an upate with nans + grads_with_nans = (jnp.ones([3]), + jnp.array([1., float('nan'), float('nan')]), + jnp.array([float('nan'), 1., 1.])) + updates, opt_state = update_fn(grads_with_nans, opt_state) + chex.assert_tree_all_close( + opt_state, + constrain.ZeroNansState( + (jnp.array(False), jnp.array(True), jnp.array(True)))) + chex.assert_tree_all_close( + updates, + (jnp.ones([3]), jnp.array([1., 0., 0.]), jnp.array([0., 1., 1.]))) + + # Check an upate with nans and infs + grads_with_nans_infs = (jnp.ones([3]), + jnp.array([1., float('nan'), + float('nan')]), + jnp.array([float('inf'), 1., 1.])) + updates, opt_state = update_fn(grads_with_nans_infs, opt_state) + chex.assert_tree_all_close( + opt_state, + constrain.ZeroNansState( + (jnp.array(False), jnp.array(True), jnp.array(False)))) + chex.assert_tree_all_close(updates, (jnp.ones([3]), jnp.array( + [1., 0., 0.]), jnp.array([float('inf'), 1., 1.]))) + + # Check an upate with only good values + grads = (jnp.ones([3]), jnp.ones([3]), jnp.ones([3])) + updates, opt_state = update_fn(grads, opt_state) + chex.assert_tree_all_close( + opt_state, + constrain.ZeroNansState( + (jnp.array(False), jnp.array(False), jnp.array(False)))) + chex.assert_tree_all_close(updates, grads) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/control_variates_test.py b/lib/python3.10/site-packages/optax/_src/control_variates_test.py new file mode 100644 index 0000000000000000000000000000000000000000..945a184950d39ecebc8c803902621c20f7125bd1 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/control_variates_test.py @@ -0,0 +1,595 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `control_variates.py`.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +import jax.numpy as jnp +import numpy as np + +from optax._src import control_variates +from optax._src import stochastic_gradient_estimators as sge +from optax._src import utils + + +# Set seed for deterministic sampling. +np.random.seed(42) + + +def _assert_equal(actual, expected, rtol=1e-2, atol=1e-2): + """Asserts that arrays are equal.""" + # Note: assert_allclose does not check shapes + chex.assert_equal_shape((actual, expected)) + + # Scalar. + if not actual.shape: + np.testing.assert_allclose( + np.asarray(actual), np.asarray(expected), rtol, atol) + return + + # We get around the bug https://github.com/numpy/numpy/issues/13801 + zero_indices = np.argwhere(expected == 0) + if not np.all(np.abs(actual[zero_indices]) <= atol): + raise AssertionError(f'Larger than {atol} diff in {actual[zero_indices]}') + + non_zero_indices = np.argwhere(expected != 0) + np.testing.assert_allclose( + np.asarray(actual)[non_zero_indices], + expected[non_zero_indices], rtol, atol) + + +def _map(cv, params, samples, state=None): + return jax.vmap(lambda x: cv(params, x, state))(samples) + + +def _map_variant(variant): + return variant(_map, static_argnums=0) + + +def _cv_jac_variant(variant): + return variant( + control_variates.control_variates_jacobians, + static_argnums=(0, 1, 2, 4, 6, 7, 8)) + + +class DeltaControlVariateTest(chex.TestCase): + + @chex.all_variants + @parameterized.parameters([(1.0, 0.5)]) + def testQuadraticFunction(self, effective_mean, effective_log_scale): + data_dims = 20 + num_samples = 10**6 + rng = jax.random.PRNGKey(1) + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + params = [mean, log_scale] + + dist = utils.multi_normal(*params) + dist_samples = dist.sample((num_samples,), rng) + function = lambda x: jnp.sum(x**2) + + cv, expected_cv, _ = control_variates.control_delta_method(function) + avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples)) + expected_cv_value = jnp.sum(dist_samples**2) / num_samples + + # This should be an analytical computation, the result needs to be + # accurate. + _assert_equal(avg_cv, expected_cv_value, rtol=1e-1, atol=1e-3) + _assert_equal(expected_cv(params, None), expected_cv_value, rtol=0.02) + + @chex.all_variants + @parameterized.parameters([(1.0, 1.0)]) + def testPolinomialFunction(self, effective_mean, effective_log_scale): + data_dims = 10 + num_samples = 10**3 + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + params = [mean, log_scale] + + dist = utils.multi_normal(*params) + rng = jax.random.PRNGKey(1) + dist_samples = dist.sample((num_samples,), rng) + function = lambda x: jnp.sum(x**5) + + cv, expected_cv, _ = control_variates.control_delta_method(function) + avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples)) + + # Check that the average value of the control variate is close to the + # expected value. + _assert_equal(avg_cv, expected_cv(params, None), rtol=1e-1, atol=1e-3) + + @chex.all_variants + def testNonPolynomialFunction(self): + data_dims = 10 + num_samples = 10**3 + + mean = jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = jnp.ones(shape=(data_dims), dtype=jnp.float32) + params = [mean, log_scale] + + rng = jax.random.PRNGKey(1) + dist = utils.multi_normal(*params) + dist_samples = dist.sample((num_samples,), rng) + function = lambda x: jnp.sum(jnp.log(x**2)) + + cv, expected_cv, _ = control_variates.control_delta_method(function) + avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples)) + + # Check that the average value of the control variate is close to the + # expected value. + _assert_equal(avg_cv, expected_cv(params, None), rtol=1e-1, atol=1e-3) + + # Second order expansion is log(\mu**2) + 1/2 * \sigma**2 (-2 / \mu**2) + expected_cv_val = - np.exp(1.) ** 2 * data_dims + _assert_equal( + expected_cv(params, None), expected_cv_val, rtol=1e-1, atol=1e-3) + + +class MovingAverageBaselineTest(chex.TestCase): + + @chex.all_variants + @parameterized.parameters( + [(1.0, 0.5, 0.9), + (1.0, 0.5, 0.99)]) + def testLinearFunction( + self, effective_mean, effective_log_scale, decay): + weights = jnp.array([1., 2., 3.], dtype=jnp.float32) + num_samples = 10**4 + data_dims = len(weights) + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + + params = [mean, log_scale] + function = lambda x: jnp.sum(weights * x) + + rng = jax.random.PRNGKey(1) + dist = utils.multi_normal(*params) + dist_samples = dist.sample((num_samples,), rng) + + cv, expected_cv, update_state = control_variates.moving_avg_baseline( + function, decay=decay, zero_debias=False, + use_decay_early_training_heuristic=False) + + state_1 = jnp.array(1.) + avg_cv = jnp.mean(_map_variant(self.variant)( + cv, params, dist_samples, (state_1, 0))) + _assert_equal(avg_cv, state_1) + _assert_equal(expected_cv(params, (state_1, 0)), state_1) + + state_2 = jnp.array(2.) + avg_cv = jnp.mean( + _map_variant(self.variant)(cv, params, dist_samples, (state_2, 0))) + _assert_equal(avg_cv, state_2) + _assert_equal(expected_cv(params, (state_2, 0)), state_2) + + update_state_1 = update_state(params, dist_samples, (state_1, 0))[0] + _assert_equal( + update_state_1, + decay * state_1 + (1 - decay) * function(mean)) + + update_state_2 = update_state(params, dist_samples, (state_2, 0))[0] + _assert_equal( + update_state_2, + decay * state_2 + (1 - decay) * function(mean)) + + @chex.all_variants + @parameterized.parameters( + [(1.0, 0.5, 0.9), + (1.0, 0.5, 0.99)]) + def testLinearFunctionWithHeuristic( + self, effective_mean, effective_log_scale, decay): + weights = jnp.array([1., 2., 3.], dtype=jnp.float32) + num_samples = 10**5 + data_dims = len(weights) + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + + params = [mean, log_scale] + function = lambda x: jnp.sum(weights * x) + + rng = jax.random.PRNGKey(1) + dist = utils.multi_normal(*params) + dist_samples = dist.sample((num_samples,), rng) + + cv, expected_cv, update_state = control_variates.moving_avg_baseline( + function, decay=decay, zero_debias=False, + use_decay_early_training_heuristic=True) + + state_1 = jnp.array(1.) + avg_cv = jnp.mean(_map_variant(self.variant)( + cv, params, dist_samples, (state_1, 0))) + _assert_equal(avg_cv, state_1) + _assert_equal(expected_cv(params, (state_1, 0)), state_1) + + state_2 = jnp.array(2.) + avg_cv = jnp.mean( + _map_variant(self.variant)(cv, params, dist_samples, (state_2, 0))) + _assert_equal(avg_cv, state_2) + _assert_equal(expected_cv(params, (state_2, 0)), state_2) + + first_step_decay = 0.1 + update_state_1 = update_state(params, dist_samples, (state_1, 0))[0] + _assert_equal( + update_state_1, + first_step_decay * state_1 + (1 - first_step_decay) * function(mean)) + + second_step_decay = 2. / 11 + update_state_2 = update_state(params, dist_samples, (state_2, 1))[0] + _assert_equal( + update_state_2, + second_step_decay * state_2 + (1 - second_step_decay) * function(mean)) + + @parameterized.parameters( + [(1.0, 0.5, 0.9), + (1.0, 0.5, 0.99)]) + def testLinearFunctionZeroDebias( + self, effective_mean, effective_log_scale, decay): + weights = jnp.array([1., 2., 3.], dtype=jnp.float32) + num_samples = 10**5 + data_dims = len(weights) + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + + params = [mean, log_scale] + function = lambda x: jnp.sum(weights * x) + + rng = jax.random.PRNGKey(1) + dist = utils.multi_normal(*params) + dist_samples = dist.sample((num_samples,), rng) + + update_state = control_variates.moving_avg_baseline( + function, decay=decay, zero_debias=False, + use_decay_early_training_heuristic=False)[-1] + + update_state_zero_debias = control_variates.moving_avg_baseline( + function, decay=decay, zero_debias=True, + use_decay_early_training_heuristic=False)[-1] + + updated_state = update_state(params, dist_samples, (jnp.array(0.), 0))[0] + _assert_equal(updated_state, (1 - decay) * function(mean)) + + updated_state_zero_debias = update_state_zero_debias( + params, dist_samples, (jnp.array(0.), 0))[0] + _assert_equal( + updated_state_zero_debias, function(mean)) + + +class DeltaMethodAnalyticalExpectedGrads(chex.TestCase): + """Tests for grads approximations.""" + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians), + ('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians), + ('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians), + ], [ + ('estimate_cv_coeffs', True), + ('no_estimate_cv_coeffs', False), + ], + named=True)) + def testQuadraticFunction(self, effective_mean, effective_log_scale, + grad_estimator, estimate_cv_coeffs): + data_dims = 3 + num_samples = 10**3 + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + + params = [mean, log_scale] + function = lambda x: jnp.sum(x**2) + rng = jax.random.PRNGKey(1) + + jacobians = _cv_jac_variant(self.variant)( + function, + control_variates.control_delta_method, + grad_estimator, + params, + utils.multi_normal, # dist_builder + rng, + num_samples, + None, # No cv state. + estimate_cv_coeffs)[0] + + expected_mean_grads = 2 * effective_mean * np.ones( + data_dims, dtype=np.float32) + expected_log_scale_grads = 2 * np.exp(2 * effective_log_scale) * np.ones( + data_dims, dtype=np.float32) + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0) + + _assert_equal(mean_grads_from_jacobian, expected_mean_grads, + rtol=1e-1, atol=1e-3) + _assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads, + rtol=1e-1, atol=1e-3) + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians), + ('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians), + ('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians), + ], [ + ('estimate_cv_coeffs', True), + ('no_estimate_cv_coeffs', False), + ], + named=True)) + def testCubicFunction( + self, effective_mean, effective_log_scale, grad_estimator, + estimate_cv_coeffs): + data_dims = 1 + num_samples = 10**5 + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + + params = [mean, log_scale] + function = lambda x: jnp.sum(x**3) + rng = jax.random.PRNGKey(1) + + jacobians = _cv_jac_variant(self.variant)( + function, + control_variates.control_delta_method, + grad_estimator, + params, + utils.multi_normal, + rng, + num_samples, + None, # No cv state. + estimate_cv_coeffs)[0] + + # The third order uncentered moment of the Gaussian distribution is + # mu**3 + 2 mu * sigma **2. We use that to compute the expected value + # of the gradients. Note: for the log scale we need use the chain rule. + expected_mean_grads = ( + 3 * effective_mean**2 + 3 * np.exp(effective_log_scale)**2) + expected_mean_grads *= np.ones(data_dims, dtype=np.float32) + expected_log_scale_grads = ( + 6 * effective_mean * np.exp(effective_log_scale) ** 2) + expected_log_scale_grads *= np.ones(data_dims, dtype=np.float32) + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0) + + _assert_equal(mean_grads_from_jacobian, expected_mean_grads, + rtol=1e-1, atol=1e-3) + + _assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads, + rtol=1e-1, atol=1e-3) + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians), + ('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians), + ('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians), + ], [ + ('estimate_cv_coeffs', True), + ('no_estimate_cv_coeffs', False), + ], + named=True)) + def testForthPowerFunction( + self, effective_mean, effective_log_scale, grad_estimator, + estimate_cv_coeffs): + data_dims = 1 + num_samples = 10**5 + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + + params = [mean, log_scale] + function = lambda x: jnp.sum(x**4) + rng = jax.random.PRNGKey(1) + + jacobians = _cv_jac_variant(self.variant)( + function, + control_variates.control_delta_method, + grad_estimator, + params, + utils.multi_normal, + rng, + num_samples, + None, # No cv state + estimate_cv_coeffs)[0] + # The third order uncentered moment of the Gaussian distribution is + # mu**4 + 6 mu **2 sigma **2 + 3 sigma**4. We use that to compute the + # expected value of the gradients. + # Note: for the log scale we need use the chain rule. + expected_mean_grads = ( + 3 * effective_mean**3 + + 12 * effective_mean * np.exp(effective_log_scale)**2) + expected_mean_grads *= np.ones(data_dims, dtype=np.float32) + expected_log_scale_grads = 12 * ( + effective_mean**2 * np.exp(effective_log_scale) + + np.exp(effective_log_scale) ** 3) * np.exp(effective_log_scale) + expected_log_scale_grads *= np.ones(data_dims, dtype=np.float32) + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0) + + _assert_equal(mean_grads_from_jacobian, expected_mean_grads, + rtol=1e-1, atol=1e-3) + + _assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads, + rtol=1e-1, atol=1e-3) + + +class ConsistencyWithStandardEstimators(chex.TestCase): + """Tests for consistency between estimators.""" + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', 1, 1, sge.score_function_jacobians, + 10**6), + ('_pathwise_jacobians', 1, 1, sge.pathwise_jacobians, 10**5), + ('_measure_valued_jacobians', 1, 1, sge.measure_valued_jacobians, + 10**5), + ], [ + ('control_delta_method', control_variates.control_delta_method), + ('moving_avg_baseline', control_variates.moving_avg_baseline), + ], + named=True)) + def testWeightedLinearFunction(self, effective_mean, effective_log_scale, + grad_estimator, num_samples, + control_variate_from_function): + """Check that the gradients are consistent between estimators.""" + weights = jnp.array([1., 2., 3.], dtype=jnp.float32) + data_dims = len(weights) + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + + params = [mean, log_scale] + function = lambda x: jnp.sum(weights * x) + rng = jax.random.PRNGKey(1) + cv_rng, ge_rng = jax.random.split(rng) + + jacobians = _cv_jac_variant(self.variant)( + function, + control_variate_from_function, + grad_estimator, + params, + utils.multi_normal, # dist_builder + cv_rng, # rng + num_samples, + (0., 0), # control_variate_state + False)[0] + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads = jnp.mean(mean_jacobians, axis=0) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads = jnp.mean(log_scale_jacobians, axis=0) + + # We use a different random number generator for the gradient estimator + # without the control variate. + no_cv_jacobians = grad_estimator( + function, [mean, log_scale], + utils.multi_normal, ge_rng, num_samples=num_samples) + + no_cv_mean_jacobians = no_cv_jacobians[0] + chex.assert_shape(no_cv_mean_jacobians, (num_samples, data_dims)) + no_cv_mean_grads = jnp.mean(no_cv_mean_jacobians, axis=0) + + no_cv_log_scale_jacobians = no_cv_jacobians[1] + chex.assert_shape(no_cv_log_scale_jacobians, (num_samples, data_dims)) + no_cv_log_scale_grads = jnp.mean(no_cv_log_scale_jacobians, axis=0) + + _assert_equal(mean_grads, no_cv_mean_grads, rtol=1e-1, atol=5e-2) + _assert_equal(log_scale_grads, no_cv_log_scale_grads, rtol=1, atol=5e-2) + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', 1, 1, sge.score_function_jacobians, + 10**5), + ('_pathwise_jacobians', 1, 1, sge.pathwise_jacobians, 10**5), + ('_measure_valued_jacobians', 1, 1, sge.measure_valued_jacobians, + 10**5), + ], [ + ('control_delta_method', control_variates.control_delta_method), + ('moving_avg_baseline', control_variates.moving_avg_baseline), + ], + named=True)) + def testNonPolynomialFunction( + self, effective_mean, effective_log_scale, + grad_estimator, num_samples, control_variate_from_function): + """Check that the gradients are consistent between estimators.""" + data_dims = 3 + + mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32) + log_scale = effective_log_scale * jnp.ones( + shape=(data_dims), dtype=jnp.float32) + + params = [mean, log_scale] + function = lambda x: jnp.log(jnp.sum(x**2)) + rng = jax.random.PRNGKey(1) + cv_rng, ge_rng = jax.random.split(rng) + + jacobians = _cv_jac_variant(self.variant)( + function, + control_variate_from_function, + grad_estimator, + params, + utils.multi_normal, + cv_rng, + num_samples, + (0., 0), # control_variate_state + False)[0] + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads = jnp.mean(mean_jacobians, axis=0) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads = jnp.mean(log_scale_jacobians, axis=0) + + # We use a different random number generator for the gradient estimator + # without the control variate. + no_cv_jacobians = grad_estimator( + function, [mean, log_scale], + utils.multi_normal, ge_rng, num_samples=num_samples) + + no_cv_mean_jacobians = no_cv_jacobians[0] + chex.assert_shape(no_cv_mean_jacobians, (num_samples, data_dims)) + no_cv_mean_grads = jnp.mean(no_cv_mean_jacobians, axis=0) + + no_cv_log_scale_jacobians = no_cv_jacobians[1] + chex.assert_shape(no_cv_log_scale_jacobians, (num_samples, data_dims)) + no_cv_log_scale_grads = jnp.mean(no_cv_log_scale_jacobians, axis=0) + + _assert_equal(mean_grads, no_cv_mean_grads, rtol=1e-1, atol=5e-2) + _assert_equal(log_scale_grads, no_cv_log_scale_grads, rtol=1e-1, atol=5e-2) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/factorized.py b/lib/python3.10/site-packages/optax/_src/factorized.py new file mode 100644 index 0000000000000000000000000000000000000000..fbad60fbdeaceb8c6e9338f71e599ac78d4cbc54 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/factorized.py @@ -0,0 +1,199 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Factorized optimizers.""" + +import dataclasses +from typing import NamedTuple, Optional, Tuple, Callable + +import chex +import jax +import jax.numpy as jnp +import numpy as np + +from optax._src import base +from optax._src import numerics +from optax._src import utils + +# pylint:disable=no-value-for-parameter + + +def _decay_rate_pow(i: int, exponent: float = 0.8) -> float: + """Second-order moment decay schedule.""" + t = jnp.array(i, jnp.float32) + 1.0 + return 1.0 - t**(-exponent) + + +def _factored_dims( + shape: base.Shape, + factored: bool, + min_dim_size_to_factor: int +) -> Optional[Tuple[int, int]]: + """Whether to use a factored second moment estimator. + + This function returns a tuple with the two largest axes to reduce over. + If no two dimensions have size >= min_dim_size_to_factor, return None. + + Args: + shape: an input shape + factored: whether to use factored second-moment estimator for 2d vars. + min_dim_size_to_factor: only factor accumulator if two array dimensions + have at least this size. + + Returns: + None or a tuple of ints + """ + if not factored or len(shape) < 2: + return None + sorted_dims = np.argsort(shape) + if shape[sorted_dims[-2]] < min_dim_size_to_factor: + return None + return int(sorted_dims[-2]), int(sorted_dims[-1]) + + +@dataclasses.dataclass +class _UpdateResult: + """Opaque containter that is not traversed by jax.tree_util.tree_map.""" + update: chex.Array # the update to apply to params + v_row: chex.Array # used for factored params. + v_col: chex.Array # used for factored params. + v: chex.Array # used for params where factoring is skipped. + + +class FactoredState(NamedTuple): + """Overall state of the gradient transformation.""" + count: chex.Array # number of update steps. + v_row: chex.ArrayTree # Tree of factored params. + v_col: chex.ArrayTree # Tree of factored params. + v: chex.ArrayTree # Tree for params where factoring is skipped. + + +def scale_by_factored_rms( + factored: bool = True, + decay_rate: float = 0.8, + step_offset: int = 0, + min_dim_size_to_factor: int = 128, + epsilon: float = 1e-30, + decay_rate_fn: Callable[[int, float], chex.Array] = _decay_rate_pow): + """Scaling by a factored estimate of the gradient rms (as in Adafactor). + + This is a so-called "1+epsilon" scaling algorithms, that is extremely memory + efficient compared to RMSProp/Adam, and has had wide success when applied to + large-scale training of attention-based models. + + References: + [Shazeer et al, 2018](https://arxiv.org/abs/1804.04235) + + Args: + factored: boolean: whether to use factored second-moment estimates.. + decay_rate: float: controls second-moment exponential decay schedule. + step_offset: for finetuning, one may set this to the starting step-number + of the fine tuning phase. + min_dim_size_to_factor: only factor accumulator if two array dimensions + are at least this size. + epsilon: Regularization constant for squared gradient. + decay_rate_fn: A function that accepts the current step, the decay rate + parameter and controls the schedule for the second momentum. Defaults to + the original adafactor's power decay schedule. One potential shortcoming + of the orignal schedule is the fact that second momentum converges to 1, + which effectively freezes the second momentum. To prevent this the user + can opt for a custom schedule that sets an upper bound for the second + momentum, like in [Zhai et al., 2021](https://arxiv.org/abs/2106.04560). + + Returns: + the corresponding `GradientTransformation`. + """ + + def _to_state(count: chex.Array, result_tree): + """Maps from a tree of (factored) values to separate trees of values.""" + return FactoredState( + count=count, + v_row=jax.tree_util.tree_map(lambda o: o.v_row, result_tree), + v_col=jax.tree_util.tree_map(lambda o: o.v_col, result_tree), + v=jax.tree_util.tree_map(lambda o: o.v, result_tree)) + + def init_fn(params): + """Initialise the optimiser's state.""" + + def _init(param): + shape = param.shape + factored_dims = _factored_dims(shape, factored, min_dim_size_to_factor) + if factored_dims is not None: + d1, d0 = factored_dims + vr_shape = np.delete(shape, d0) + vc_shape = np.delete(shape, d1) + return _UpdateResult( + update=jnp.zeros((1,)), + v_row=jnp.zeros(vr_shape), + v_col=jnp.zeros(vc_shape), + v=jnp.zeros((1,))) + else: + return _UpdateResult( + update=jnp.zeros((1,)), + v_row=jnp.zeros((1,)), + v_col=jnp.zeros((1,)), + v=jnp.zeros(param.shape)) + + return _to_state( + jnp.zeros([], jnp.int32), jax.tree_util.tree_map(_init, params)) + + def update_fn(grads, state, params): + """Apply gradient transformation.""" + if params is None: + raise ValueError(base.NO_PARAMS_MSG) + + def _update(grad, v_row, v_col, v, param, step): + shape = param.shape + decay_rate_t = decay_rate_fn(step - step_offset, decay_rate) + + # Scaled by factorized second moment statistics. + new_v_row = jnp.zeros((1,)) + new_v_col = jnp.zeros((1,)) + new_v = jnp.zeros((1,)) + + factored_dims = _factored_dims(shape, factored, min_dim_size_to_factor) + if factored_dims is not None: + d1, d0 = factored_dims + grad_sqr = numerics.abs_sq(grad) + epsilon + new_v_row = ( + decay_rate_t * v_row + + (1. - decay_rate_t) * jnp.mean(grad_sqr, axis=d0)) + new_v_col = ( + decay_rate_t * v_col + + (1. - decay_rate_t) * jnp.mean(grad_sqr, axis=d1)) + reduced_d1 = d1-1 if d1 > d0 else d1 + row_col_mean = jnp.mean(new_v_row, axis=reduced_d1, keepdims=True) + row_factor = (new_v_row / row_col_mean) ** -0.5 + col_factor = (new_v_col) ** -0.5 + update = ( + grad * + jnp.expand_dims(row_factor, axis=d0) * + jnp.expand_dims(col_factor, axis=d1)) + else: + grad_sqr = numerics.abs_sq(grad) + epsilon + new_v = decay_rate_t * v + (1. - decay_rate_t) * grad_sqr + update = grad * (new_v)**-0.5 + + return _UpdateResult(update, new_v_row, new_v_col, new_v) + + # Transform grad and compute new per-parameter stats. + output = jax.tree_util.tree_map( + lambda *args: _update(*args, state.count), + grads, state.v_row, state.v_col, state.v, params) + + # Unpack updates / stats and return. + updates = jax.tree_util.tree_map(lambda o: o.update, output) + return updates, _to_state(utils.safe_int32_increment(state.count), output) + + return base.GradientTransformation(init_fn, update_fn) diff --git a/lib/python3.10/site-packages/optax/_src/float64_test.py b/lib/python3.10/site-packages/optax/_src/float64_test.py new file mode 100644 index 0000000000000000000000000000000000000000..70fee32a34f03429fe78f3c1850f7315b26e2646 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/float64_test.py @@ -0,0 +1,94 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests that types are preserved by the `update` calls when jax_enbable_x64.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +from jax.config import config +import jax.numpy as jnp + +from optax._src import alias +from optax._src import base +from optax._src import clipping +from optax._src import transform +from optax._src import update + + +ALL_MODULES = [ + ('identity', base.identity, {}), + ('clip', clipping.clip, dict(max_delta=1.0)), + ('clip_by_global_norm', clipping.clip_by_global_norm, dict(max_norm=1.0)), + ('trace', transform.trace, dict(decay=0.5, nesterov=False)), + ('trace_with_nesterov', transform.trace, dict(decay=0.5, nesterov=True)), + ('scale_by_rss', transform.scale_by_rss, {}), + ('scale_by_rms', transform.scale_by_rms, {}), + ('scale_by_stddev', transform.scale_by_stddev, {}), + ('adam', transform.scale_by_adam, {}), + ('scale', transform.scale, dict(step_size=3.0)), + ('additive_weight_decay', transform.additive_weight_decay, + dict(weight_decay=0.1)), + ('scale_by_schedule', transform.scale_by_schedule, + dict(step_size_fn=lambda x: x * 0.1)), + ('scale_by_trust_ratio', transform.scale_by_trust_ratio, {}), + ('add_noise', transform.add_noise, dict(eta=1.0, gamma=0.1, seed=42)), + ('apply_every_k', transform.apply_every, {}), + ('adagrad', alias.adagrad, dict(learning_rate=0.1)), + ('adam', alias.adam, dict(learning_rate=0.1)), + ('adamw', alias.adamw, dict(learning_rate=0.1)), + ('fromage', alias.fromage, dict(learning_rate=0.1)), + ('lamb', alias.lamb, dict(learning_rate=0.1)), + ('noisy_sgd', alias.noisy_sgd, dict(learning_rate=0.1)), + ('rmsprop', alias.rmsprop, dict(learning_rate=0.1)), + ('sgd', alias.sgd, dict(learning_rate=0.1)), + ('dpsgd', alias.dpsgd, + dict(learning_rate=0.1, l2_norm_clip=0.9, noise_multiplier=1.1, seed=42)), +] + + +class Float64Test(parameterized.TestCase): + + def _assert_dtype_equals(self, tree1, tree2): + tree1_types = jax.tree_util.tree_map(lambda t: t.dtype, tree1) + tree2_types = jax.tree_util.tree_map(lambda t: t.dtype, tree2) + self.assertEqual(tree1_types, tree2_types) + + @chex.all_variants + @parameterized.named_parameters(ALL_MODULES) + def test_mixed_dtype_input_outputs(self, transform_constr, transform_kwargs): + initial_params = ( + jnp.array([1., 2.], dtype=jnp.float32), + jnp.array([3., 4.], dtype=jnp.float64)) + updates = ( + jnp.array([10., 21.], dtype=jnp.float32), + jnp.array([33., 42.], dtype=jnp.float64)) + scaler = transform_constr(**transform_kwargs) + init_fn = self.variant(scaler.init) + update_fn = self.variant(scaler.update) + + initial_state = init_fn(initial_params) + updates, new_state = update_fn( + updates, initial_state, params=initial_params) + new_params = update.apply_updates(initial_params, updates) + + self._assert_dtype_equals(initial_state, new_state) + self._assert_dtype_equals(initial_params, new_params) + + +if __name__ == '__main__': + config.update('jax_enable_x64', True) + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/linear_algebra.py b/lib/python3.10/site-packages/optax/_src/linear_algebra.py new file mode 100644 index 0000000000000000000000000000000000000000..420aff36feb0b996f89a8cf392e43e38fdc6cd87 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/linear_algebra.py @@ -0,0 +1,201 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Linear algebra utilities used in optimisation.""" + +import chex +import jax +from jax import lax +import jax.numpy as jnp +import numpy as np + +from optax._src import base +from optax._src import numerics + + +def global_norm(updates: base.Updates) -> base.Updates: + """Compute the global norm across a nested structure of tensors.""" + return jnp.sqrt(sum( + jnp.sum(numerics.abs_sq(x)) for x in jax.tree_util.tree_leaves(updates))) + + +def power_iteration(matrix: chex.Array, + num_iters: int = 100, + error_tolerance: float = 1e-6, + precision: lax.Precision = lax.Precision.HIGHEST): + r"""Power iteration algorithm. + + The power iteration algorithm takes a symmetric PSD matrix `A`, and produces + a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue + of `A`, and a vector v, which is the corresponding eigenvector of `A`. + + References: + [Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration) + + Args: + matrix: the symmetric PSD matrix. + num_iters: Number of iterations. + error_tolerance: Iterative exit condition. + precision: precision XLA related flag, the available options are: + a) lax.Precision.DEFAULT (better step time, but not precise); + b) lax.Precision.HIGH (increased precision, slower); + c) lax.Precision.HIGHEST (best possible precision, slowest). + + Returns: + eigen vector, eigen value + """ + matrix_size = matrix.shape[-1] + def _iter_condition(state): + i, unused_v, unused_s, unused_s_v, run_step = state + return jnp.logical_and(i < num_iters, run_step) + + def _iter_body(state): + """One step of power iteration.""" + i, new_v, s, s_v, unused_run_step = state + new_v = new_v / jnp.linalg.norm(new_v) + + s_v = jnp.einsum('ij,j->i', matrix, new_v, precision=precision) + s_new = jnp.einsum('i,i->', new_v, s_v, precision=precision) + return (i + 1, s_v, s_new, s_v, + jnp.greater(jnp.abs(s_new - s), error_tolerance)) + + # Figure out how to use step as seed for random. + v_0 = np.random.uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype) + + init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True]) + _, v_out, s_out, _, _ = lax.while_loop( + _iter_condition, _iter_body, init_state) + v_out = v_out / jnp.linalg.norm(v_out) + return v_out, s_out + + +def matrix_inverse_pth_root(matrix: chex.Array, + p: int, + num_iters: int = 100, + ridge_epsilon: float = 1e-6, + error_tolerance: float = 1e-6, + precision: lax.Precision = lax.Precision.HIGHEST): + """Computes `matrix^(-1/p)`, where `p` is a positive integer. + + This function uses the Coupled newton iterations algorithm for + the computation of a matrix's inverse pth root. + + + References: + [Functions of Matrices, Theory and Computation, + Nicholas J Higham, Pg 184, Eq 7.18]( + https://epubs.siam.org/doi/book/10.1137/1.9780898717778) + + Args: + matrix: the symmetric PSD matrix whose power it to be computed + p: exponent, for p a positive integer. + num_iters: Maximum number of iterations. + ridge_epsilon: Ridge epsilon added to make the matrix positive definite. + error_tolerance: Error indicator, useful for early termination. + precision: precision XLA related flag, the available options are: + a) lax.Precision.DEFAULT (better step time, but not precise); + b) lax.Precision.HIGH (increased precision, slower); + c) lax.Precision.HIGHEST (best possible precision, slowest). + + Returns: + matrix^(-1/p) + """ + + # We use float32 for the matrix inverse pth root. + # Switch to f64 if you have hardware that supports it. + matrix_size = matrix.shape[0] + alpha = jnp.asarray(-1.0 / p, jnp.float32) + identity = jnp.eye(matrix_size, dtype=jnp.float32) + _, max_ev = power_iteration( + matrix=matrix, num_iters=100, + error_tolerance=1e-6, precision=precision) + ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-16) + + def _unrolled_mat_pow_1(mat_m): + """Computes mat_m^1.""" + return mat_m + + def _unrolled_mat_pow_2(mat_m): + """Computes mat_m^2.""" + return jnp.matmul(mat_m, mat_m, precision=precision) + + def _unrolled_mat_pow_4(mat_m): + """Computes mat_m^4.""" + mat_pow_2 = _unrolled_mat_pow_2(mat_m) + return jnp.matmul( + mat_pow_2, mat_pow_2, precision=precision) + + def _unrolled_mat_pow_8(mat_m): + """Computes mat_m^4.""" + mat_pow_4 = _unrolled_mat_pow_4(mat_m) + return jnp.matmul( + mat_pow_4, mat_pow_4, precision=precision) + + def mat_power(mat_m, p): + """Computes mat_m^p, for p == 1, 2, 4 or 8. + + Args: + mat_m: a square matrix + p: a positive integer + + Returns: + mat_m^p + """ + # We unrolled the loop for performance reasons. + exponent = jnp.round(jnp.log2(p)) + return lax.switch( + jnp.asarray(exponent, jnp.int32), [ + _unrolled_mat_pow_1, + _unrolled_mat_pow_2, + _unrolled_mat_pow_4, + _unrolled_mat_pow_8, + ], (mat_m)) + + def _iter_condition(state): + (i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, + run_step) = state + error_above_threshold = jnp.logical_and( + error > error_tolerance, run_step) + return jnp.logical_and(i < num_iters, error_above_threshold) + + def _iter_body(state): + (i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state + mat_m_i = (1 - alpha) * identity + alpha * mat_m + new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision) + new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision) + new_error = jnp.max(jnp.abs(new_mat_m - identity)) + # sometimes error increases after an iteration before decreasing and + # converging. 1.2 factor is used to bound the maximal allowed increase. + return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, + new_error < error * 1.2) + + if matrix_size == 1: + resultant_mat_h = (matrix + ridge_epsilon)**alpha + error = 0 + else: + damped_matrix = matrix + ridge_epsilon * identity + + z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix)) + new_mat_m_0 = damped_matrix * z + new_error = jnp.max(jnp.abs(new_mat_m_0 - identity)) + new_mat_h_0 = identity * jnp.power(z, 1.0 / p) + init_state = tuple( + [0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True]) + _, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop( + _iter_condition, _iter_body, init_state) + error = jnp.max(jnp.abs(mat_m - identity)) + is_converged = jnp.asarray(convergence, old_mat_h.dtype) + resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h + resultant_mat_h = jnp.asarray(resultant_mat_h, matrix.dtype) + return resultant_mat_h, error diff --git a/lib/python3.10/site-packages/optax/_src/lookahead.py b/lib/python3.10/site-packages/optax/_src/lookahead.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c42023f46831d4acb4249d5524b46ab02def46 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/lookahead.py @@ -0,0 +1,192 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A lookahead optimization wrapper.""" + +from typing import NamedTuple, Tuple + +from absl import logging +import jax +import jax.numpy as jnp + +from optax._src import base + +# pylint:disable=no-value-for-parameter + + +class LookaheadState(NamedTuple): + """State of the `GradientTransformation` returned by `lookahead`. + + Attributes: + fast_state: Optimizer state of the fast optimizer. + steps_since_sync: Number of fast optimizer steps taken since slow and fast + parameters were synchronized. + """ + fast_state: base.OptState + steps_since_sync: jnp.ndarray + + +class LookaheadParams(NamedTuple): + """Holds a pair of slow and fast parameters for the lookahead optimizer. + + Gradients should always be calculated with the fast parameters. The slow + parameters should be used for testing and inference as they generalize better. + See the reference for a detailed discussion. + + References: + [Zhang et al, 2019](https://arxiv.org/pdf/1907.08610v1.pdf) + + Attributes: + fast: Fast parameters. + slow: Slow parameters. + """ + fast: base.Params + slow: base.Params + + @classmethod + def init_synced(cls, params: base.Params) -> 'LookaheadParams': + """Initialize a pair of synchronized lookahead parameters.""" + return cls(slow=params, fast=params) + + +def lookahead( + fast_optimizer: base.GradientTransformation, + sync_period: int, + slow_step_size: float, + reset_state: bool = False +) -> base.GradientTransformation: + """Lookahead optimizer. + + Performs steps with a fast optimizer and periodically updates a set of slow + parameters. Optionally resets the fast optimizer state after synchronization + by calling the init function of the fast optimizer. + + Updates returned by the lookahead optimizer should not be modified before they + are applied, otherwise fast and slow parameters are not synchronized + correctly. + + References: + [Zhang et al, 2019](https://arxiv.org/pdf/1907.08610v1.pdf) + + Args: + fast_optimizer: The optimizer to use in the inner loop of lookahead. + sync_period: Number of fast optimizer steps to take before synchronizing + parameters. Must be >= 1. + slow_step_size: Step size of the slow parameter updates. + reset_state: Whether to reset the optimizer state of the fast opimizer after + each synchronization. + + Returns: + A `GradientTransformation` with init and update functions. The updates + passed to the update function should be calculated using the fast lookahead + parameters only. + """ + if sync_period < 1: + raise ValueError('Synchronization period must be >= 1.') + + def init_fn(params: base.Params) -> LookaheadState: + try: + fast_params = params.fast + except AttributeError: + # Allowing init_fn to be called with fast parameters reduces the + # modifications necessary to adapt code to use lookahead in some cases. + logging.warning( + '`params` has no attribute `fast`. Continuing by assuming that ' + 'only fast parameters were passed to lookahead init.') + fast_params = params + + return LookaheadState( + fast_state=fast_optimizer.init(fast_params), + steps_since_sync=jnp.zeros(shape=(), dtype=jnp.int32)) + + def update_fn( + updates: base.Updates, state: LookaheadState, + params: LookaheadParams) -> Tuple[LookaheadParams, LookaheadState]: + updates, fast_state = fast_optimizer.update(updates, state.fast_state, + params.fast) + + sync_next = (state.steps_since_sync == sync_period - 1) + updates = _lookahead_update(updates, sync_next, params, slow_step_size) + if reset_state: + # Jittable way of resetting the fast optimizer state if parameters will be + # synchronized after this update step. + initial_state = fast_optimizer.init(params.fast) + fast_state = jax.tree_util.tree_map( + lambda current, init: (1 - sync_next) * current + sync_next * init, + fast_state, initial_state) + + steps_since_sync = (state.steps_since_sync + 1) % sync_period + return updates, LookaheadState(fast_state, steps_since_sync) + + return base.GradientTransformation(init_fn, update_fn) + + +def _lookahead_update( + updates: base.Updates, sync_next: bool, params: LookaheadParams, + slow_step_size: float) -> LookaheadParams: + """Returns the updates corresponding to one lookahead step. + + References: + [Zhang et al, 2019](https://arxiv.org/pdf/1907.08610v1.pdf) + + Args: + updates: Updates returned by the fast optimizer. + sync_next: Wether fast and slow parameters should be synchronized after the + fast optimizer step. + params: Current fast and slow parameters as `LookaheadParams` object. + slow_step_size: Step size of the slow optimizer. + + Returns: + The updates for the lookahead parameters. + """ + # In the paper, lookahead is presented as two nested loops. To write lookahead + # as optax wrapper, these loops have to be broken into successive updates. + # This leads to two types of update steps: + # + # Non-synchronization steps (sync_next == False): + # The updates returned by the fast optimizer are used for the fast parameters + # without change and the slow parameter updates are zero (i.e. fast_updates = + # updates, slow_updates = 0). + # + # Synchronisation step (sync_next == True): + # This consists of two substeps: a last fast optimizer step and the + # synchronization. + # Substep 1 (last fast optimizer step): + # last_fast_params = fast_params + updates + # Substep 2 (synchronization): + # new_slow_params = slow_params + slow_step_size * ( + # last_fast_params - slow_params) + # new_fast_params = new_slow_params + # + # Merging into a single update step we get the update rules: + # slow_updates = slow_step_size * (fast_params + updates - slow_params) + # fast_updates = new_slow_params - fast_params = updates - (1 - + # slow_step_size) * (fast_params + updates - slow_params) + # + # To make the equations jittable, the two types of steps are merged. Defining + # last_difference = fast_params + updates - slow_params, this yields the + # following equtions which are implemented below: + # slow_updates = slow_step_size * sync_next * last_difference + # fast_updates = updates - ( + # 1 - slow_step_size) * sync_next * last_difference + last_difference = jax.tree_util.tree_map( + lambda f, u, s: f + u - s, params.fast, updates, params.slow) + slow_updates = jax.tree_util.tree_map( + lambda diff: slow_step_size * sync_next * diff, last_difference) + fast_updates = jax.tree_util.tree_map( + lambda up, diff: up - sync_next * (1 - slow_step_size) * diff, updates, + last_difference) + + return LookaheadParams(fast=fast_updates, slow=slow_updates) + diff --git a/lib/python3.10/site-packages/optax/_src/lookahead_test.py b/lib/python3.10/site-packages/optax/_src/lookahead_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f0fad1414d476f0c1d93ff5dc6825114e48c835a --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/lookahead_test.py @@ -0,0 +1,140 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `lookahead.py`.""" + +from typing import NamedTuple + +from absl.testing import absltest +from absl.testing import parameterized +import chex +import jax +import jax.numpy as jnp +import numpy as np +from optax._src import alias +from optax._src import base +from optax._src import lookahead +from optax._src import update + + +def _build_sgd(): + return alias.sgd(1.) + + +class TestOptimizerState(NamedTuple): + """Fast optimizer state for the lookahead tests.""" + aggregate_grads: base.Params + # Include a variable with non-zero initial value to check that it is reset + # correctly by the lookahead optimizer. + is_reset: bool = True + + +def _test_optimizer(step_size: float) -> base.GradientTransformation: + """Fast optimizer for the lookahead tests.""" + + # Use SGD for simplicity but add non-trivial optimizer state so that the + # resetting behaviour of lookahead can be tested. + def init_fn(params): + aggregate_grads = jax.tree_util.tree_map(jnp.zeros_like, params) + return TestOptimizerState(aggregate_grads, is_reset=True) + + def update_fn(updates, state, params): + # The test optimizer does not use the parameters, but we check that they + # have been passed correctly. + chex.assert_trees_all_equal_shapes(updates, params) + aggregate_grads = update.apply_updates(state.aggregate_grads, updates) + updates = jax.tree_util.tree_map(lambda u: step_size * u, updates) + return updates, TestOptimizerState(aggregate_grads, is_reset=False) + + return base.GradientTransformation(init_fn, update_fn) + + +class LookaheadTest(chex.TestCase): + """Tests for the lookahead optimizer.""" + + def setUp(self): + super().setUp() + self.grads = {'x': np.array(2.), 'y': np.array(-2.)} + self.initial_params = {'x': np.array(3.), 'y': np.array(-3.)} + self.synced_initial_params = lookahead.LookaheadParams.init_synced( + self.initial_params) + + def loop(self, optimizer, num_steps, params): + """Performs a given number of optimizer steps.""" + init_fn, update_fn = optimizer + # Use the chex variant to check various function versions (jit, pmap, etc). + step = self.variant(update_fn) + opt_state = self.variant(init_fn)(params) + for _ in range(num_steps): + updates, opt_state = step(self.grads, opt_state, params) + params = update.apply_updates(params, updates) + + return params, opt_state + + @chex.all_variants + def test_lookahead(self): + """Tests the lookahead optimizer in an analytically tractable setting.""" + sync_period = 3 + optimizer = lookahead.lookahead( + _test_optimizer(-0.5), sync_period=sync_period, slow_step_size=1 / 3) + + final_params, _ = self.loop(optimizer, 2 * sync_period, + self.synced_initial_params) + # x steps must be: 3 -> 2 -> 1 -> 2 (sync) -> 1 -> 0 -> 1 (sync). + # Similarly for y (with sign flipped). + correct_final_params = {'x': 1, 'y': -1} + chex.assert_tree_all_close(final_params.slow, correct_final_params) + + @chex.all_variants + @parameterized.parameters([False], [True]) + def test_lookahead_state_reset(self, reset_state): + """Checks that lookahead resets the fast optimizer state correctly.""" + num_steps = sync_period = 3 + fast_optimizer = _test_optimizer(-0.5) + optimizer = lookahead.lookahead( + fast_optimizer, + sync_period=sync_period, + slow_step_size=0.5, + reset_state=reset_state) + + _, opt_state = self.loop(optimizer, num_steps, self.synced_initial_params) + fast_state = opt_state.fast_state + if reset_state: + correct_state = fast_optimizer.init(self.initial_params) + else: + _, correct_state = self.loop(fast_optimizer, num_steps, + self.initial_params) + + chex.assert_tree_all_close(fast_state, correct_state) + + @chex.all_variants + @parameterized.parameters( + [1, 0.5, {'x': np.array(1.), 'y': np.array(-1.)}], + [1, 0, {'x': np.array(3.), 'y': np.array(-3.)}], + [1, 1, {'x': np.array(-1.), 'y': np.array(1.)}], + [2, 1, {'x': np.array(-1.), 'y': np.array(1.)}]) # pyformat: disable + def test_lookahead_edge_cases(self, sync_period, slow_step_size, + correct_result): + """Checks special cases of the lookahed optimizer parameters.""" + # These edge cases are important to check since users might use them as + # simple ways of disabling lookahead in experiments. + optimizer = lookahead.lookahead( + _test_optimizer(-1), sync_period, slow_step_size) + final_params, _ = self.loop( + optimizer, num_steps=2, params=self.synced_initial_params) + chex.assert_tree_all_close(final_params.slow, correct_result) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/loss.py b/lib/python3.10/site-packages/optax/_src/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..53667711448bd2018f5d3d26985fb0331f4e2f1d --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/loss.py @@ -0,0 +1,521 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Standard losses used in optimisation. + +We provide implementations of the most canonical losses used in deep +learning. These operate transparently on batches, and do not perform any +reduction over the batch dimensions, leaving it to the user to, for instance, +mean or sum losses across batch dimensions. +""" + +from typing import Optional, Tuple + +import chex +import jax +import jax.numpy as jnp + +from optax._src import utils + + +def l2_loss( + predictions: chex.Array, + targets: Optional[chex.Array] = None, +) -> chex.Array: + """Calculates the L2 loss for a set of predictions. + + Note: the 0.5 term is standard in "Pattern Recognition and Machine Learning" + by Bishop, but not "The Elements of Statistical Learning" by Tibshirani. + + References: + [Chris Bishop, 2006](https://bit.ly/3eeP0ga) + + Args: + predictions: a vector of arbitrary shape `[...]`. + targets: a vector with shape broadcastable to that of `predictions`; + if not provided then it is assumed to be a vector of zeros. + + Returns: + elementwise squared differences, with same shape as `predictions`. + """ + chex.assert_type([predictions], float) + if targets is not None: + # Avoid broadcasting logic for "-" operator. + chex.assert_equal_shape((predictions, targets)) + errors = (predictions - targets) if (targets is not None) else predictions + return 0.5 * (errors)**2 + + +def huber_loss( + predictions: chex.Array, + targets: Optional[chex.Array] = None, + delta: float = 1.) -> chex.Array: + """Huber loss, similar to L2 loss close to zero, L1 loss away from zero. + + If gradient descent is applied to the `huber loss`, it is equivalent to + clipping gradients of an `l2_loss` to `[-delta, delta]` in the backward pass. + + References: + [Huber, 1964](www.projecteuclid.org/download/pdf_1/euclid.aoms/1177703732) + + Args: + predictions: a vector of arbitrary shape `[...]`. + targets: a vector with shape broadcastable to that of `predictions`; + if not provided then it is assumed to be a vector of zeros. + delta: the bounds for the huber loss transformation, defaults at 1. + + Returns: + elementwise huber losses, with the same shape of `predictions`. + """ + chex.assert_type([predictions], float) + errors = (predictions - targets) if (targets is not None) else predictions + # 0.5 * err^2 if |err| <= d + # 0.5 * d^2 + d * (|err| - d) if |err| > d + abs_errors = jnp.abs(errors) + quadratic = jnp.minimum(abs_errors, delta) + # Same as max(abs_x - delta, 0) but avoids potentially doubling gradient. + linear = abs_errors - quadratic + return 0.5 * quadratic ** 2 + delta * linear + + +def smooth_labels( + labels: chex.Array, + alpha: float, +) -> jnp.ndarray: + """Apply label smoothing. + + Label smoothing is often used in combination with a cross-entropy loss. + Smoothed labels favour small logit gaps, and it has been shown that this can + provide better model calibration by preventing overconfident predictions. + + References: + [Müller et al, 2019](https://arxiv.org/pdf/1906.02629.pdf) + + Args: + labels: one hot labels to be smoothed. + alpha: the smoothing factor, the greedy category with be assigned + probability `(1-alpha) + alpha / num_categories` + + Returns: + a smoothed version of the one hot input labels. + + """ + chex.assert_type([labels], float) + num_categories = labels.shape[-1] + return (1.0 - alpha) * labels + alpha / num_categories + + +def sigmoid_binary_cross_entropy(logits, labels): + """Computes element-wise sigmoid cross entropy given logits and labels. + + This can be used to measure the error in discrete classification tasks in + which each class is an independent binary prediction and different classes + are not mutually exclusive. This may be used for multilabel image + classification for instance a model may predict that an image contains both a + cat and a dog. + + References: + [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html) + + Args: + logits: Each element is the unnormalized log probability of a binary + prediction. + labels: The target probabilities, must have a shape broadcastable to that of + `logits`. + + Returns: + cross entropy for each binary prediction, same shape as `logits`. + """ + chex.assert_type([logits], float) + log_p = jax.nn.log_sigmoid(logits) + # log(1 - sigmoid(x)) = log_sigmoid(-x), the latter more numerically stable + log_not_p = jax.nn.log_sigmoid(-logits) + return -labels * log_p - (1. - labels) * log_not_p + + +def softmax_cross_entropy( + logits: chex.Array, + labels: chex.Array, +) -> chex.Array: + """Computes the softmax cross entropy between sets of logits and labels. + + Measures the probability error in discrete classification tasks in which + the classes are mutually exclusive (each entry is in exactly one class). + For example, each CIFAR-10 image is labeled with one and only one label: + an image can be a dog or a truck, but not both. + + References: + [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html) + + Args: + logits: Unnormalized log probabilities, with shape `[..., num_classes]`. + labels: Valid probability distributions (non-negative, sum to 1), e.g a + one hot encoding specifying the correct class for each input; + must have a shape broadcastable to `[..., num_classes]`` + + Returns: + cross entropy between each prediction and the corresponding target + distributions, with shape `[...]`. + """ + chex.assert_type([logits], float) + return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1) + + +def softmax_cross_entropy_with_integer_labels( + logits: chex.Array, + labels: chex.Array, +) -> chex.Array: + """Computes softmax cross entropy between sets of logits and integer labels. + + Measures the probability error in discrete classification tasks in which + the classes are mutually exclusive (each entry is in exactly one class). + For example, each CIFAR-10 image is labeled with one and only one label: + an image can be a dog or a truck, but not both. + + References: + [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html) + + Args: + logits: Unnormalized log probabilities, with shape `[..., num_classes]`. + labels: Integers specifying the correct class for each input, with shape + `[...]`. + + Returns: + Cross entropy between each prediction and the corresponding target + distributions, with shape `[...]`. + """ + chex.assert_type([logits], float) + chex.assert_type([labels], int) + # This is like jnp.take_along_axis(jax.nn.log_softmax(...), ...) except that + # we avoid subtracting the normalizer from all values, just from the values + # for the correct labels. + logits_max = jnp.max(logits, axis=-1, keepdims=True) + logits -= jax.lax.stop_gradient(logits_max) + label_logits = jnp.take_along_axis(logits, labels[..., None], axis=-1)[..., 0] + log_normalizers = jnp.log(jnp.sum(jnp.exp(logits), axis=-1)) + return log_normalizers - label_logits + + +def cosine_similarity( + predictions: chex.Array, + targets: chex.Array, + epsilon: float = 0., +) -> chex.Array: + r"""Computes the cosine similarity between targets and predictions. + + The cosine **similarity** is a measure of similarity between vectors defined + as the cosine of the angle between them, which is also the inner product of + those vectors normalized to have unit norm. + + References: + [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity) + + Args: + predictions: The predicted vectors, with shape `[..., dim]`. + targets: Ground truth target vectors, with shape `[..., dim]`. + epsilon: minimum norm for terms in the denominator of the cosine similarity. + + Returns: + cosine similarity measures, with shape `[...]`. + """ + chex.assert_type([predictions, targets], float) + # vectorize norm fn, to treat all dimensions except the last as batch dims. + batched_norm_fn = jnp.vectorize( + utils.safe_norm, signature='(k)->()', excluded={1}) + # normalise the last dimension of targets and predictions. + unit_targets = targets / jnp.expand_dims( + batched_norm_fn(targets, epsilon), axis=-1) + unit_predictions = predictions / jnp.expand_dims( + batched_norm_fn(predictions, epsilon), axis=-1) + # return cosine similarity. + return jnp.sum(unit_targets * unit_predictions, axis=-1) + + +def cosine_distance( + predictions: chex.Array, + targets: chex.Array, + epsilon: float = 0., +) -> chex.Array: + r"""Computes the cosine distance between targets and predictions. + + The cosine **distance**, implemented here, measures the **dissimilarity** + of two vectors as the opposite of cosine **similarity**: `1 - cos(\theta)`. + + References: + [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity) + + Args: + predictions: The predicted vectors, with shape `[..., dim]`. + targets: Ground truth target vectors, with shape `[..., dim]`. + epsilon: minimum norm for terms in the denominator of the cosine similarity. + + Returns: + cosine distances, with shape `[...]`. + """ + chex.assert_type([predictions, targets], float) + # cosine distance = 1 - cosine similarity. + return 1. - cosine_similarity(predictions, targets, epsilon) + + +def log_cosh( + predictions: chex.Array, + targets: Optional[chex.Array] = None, +) -> chex.Array: + """Calculates the log-cosh loss for a set of predictions. + + log(cosh(x)) is approximately `(x**2) / 2` for small x and `abs(x) - log(2)` + for large x. It is a twice differentiable alternative to the Huber loss. + + References: + [Chen et al, 2019](https://openreview.net/pdf?id=rkglvsC9Ym) + + Args: + predictions: a vector of arbitrary shape `[...]`. + targets: a vector with shape broadcastable to that of `predictions`; + if not provided then it is assumed to be a vector of zeros. + + Returns: + the log-cosh loss, with same shape as `predictions`. + """ + chex.assert_type([predictions], float) + errors = (predictions - targets) if (targets is not None) else predictions + # log(cosh(x)) = log((exp(x) + exp(-x))/2) = log(exp(x) + exp(-x)) - log(2) + return jnp.logaddexp(errors, -errors) - jnp.log(2.0).astype(errors.dtype) + + +def ctc_loss_with_forward_probs( + logits: chex.Array, + logit_paddings: chex.Array, + labels: chex.Array, + label_paddings: chex.Array, + blank_id: int = 0, + log_epsilon: float = -1e5) -> Tuple[chex.Array, chex.Array, chex.Array]: + r"""Computes CTC loss and CTC forward-probabilities. + + The CTC loss is a loss function based on log-likelihoods of the model that + introduces a special blank symbol :math:`\phi` to represent variable-length + output sequences. + + Forward probabilities returned by this function, as auxiliary results, are + grouped into two part: blank alpha-probability and non-blank alpha + probability. Those are defined as follows: + + .. math:: + \alpha_{\mathrm{BLANK}}(t, n) = + \sum_{\pi_{1:t-1}} p(\pi_t = \phi | \pi_{1:t-1}, y_{1:n-1}, \cdots), \\ + \alpha_{\mathrm{LABEL}}(t, n) = + \sum_{\pi_{1:t-1}} p(\pi_t = y_n | \pi_{1:t-1}, y_{1:n-1}, \cdots). + + Here, :math:`\pi` denotes the alignment sequence in the reference + [Graves et al, 2006] that is blank-inserted representations of ``labels``. + The return values are the logarithms of the above probabilities. + + References: + [Graves et al, 2006](https://dl.acm.org/doi/abs/10.1145/1143844.1143891) + + Args: + logits: (B, T, K)-array containing logits of each class where B denotes + the batch size, T denotes the max time frames in ``logits``, and K + denotes the number of classes including a class for blanks. + logit_paddings: (B, T)-array. Padding indicators for ``logits``. Each + element must be either 1.0 or 0.0, and ``logitpaddings[b, t] == 1.0`` + denotes that ``logits[b, t, :]`` are padded values. + labels: (B, N)-array containing reference integer labels where N denotes + the max time frames in the label sequence. + label_paddings: (B, N)-array. Padding indicators for ``labels``. Each + element must be either 1.0 or 0.0, and ``labelpaddings[b, n] == 1.0`` + denotes that ``labels[b, n]`` is a padded label. In the current + implementation, ``labels`` must be right-padded, i.e. each row + ``labelpaddings[b, :]`` must be repetition of zeroes, followed by + repetition of ones. + blank_id: Id for blank token. ``logits[b, :, blank_id]`` are used as + probabilities of blank symbols. + log_epsilon: Numerically-stable approximation of log(+0). + + Returns: + A tuple ``(loss_value, logalpha_blank, logalpha_nonblank)``. Here, + ``loss_value`` is a (B,)-array containing the loss values for each sequence + in the batch, ``logalpha_blank`` and ``logalpha_nonblank`` are + (T, B, N+1)-arrays where the (t, b, n)-th element denotes + \log \alpha_B(t, n) and \log \alpha_L(t, n), respectively, for ``b``-th + sequence in the batch. + """ + + chex.assert_rank(logits, 3) + chex.assert_rank(labels, 2) + batchsize, unused_maxinputlen, num_classes = logits.shape + batchsize_of_labels, maxlabellen = labels.shape + chex.assert_equal(batchsize, batchsize_of_labels) + chex.assert_equal(labels.shape, label_paddings.shape) + chex.assert_equal(logits.shape[:2], logit_paddings.shape) + + logprobs = jax.nn.log_softmax(logits) + labellens = maxlabellen - jnp.sum(label_paddings, axis=1).astype(jnp.int32) + + # repeat[b, n] == 1.0 when label[b, n] == label[b, n+1]. + repeat = (labels[:, :-1] == labels[:, 1:]).astype(jnp.float32) + repeat = jnp.pad(repeat, ((0, 0), (0, 1))) + + logprobs_phi = logprobs[:, :, blank_id:blank_id + 1] # [B, T, 1] + logprobs_phi = jnp.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1] + + one_hot = jax.nn.one_hot(labels, num_classes=num_classes) # [B, N, K] + logprobs_emit = jnp.einsum('btk,bnk->btn', logprobs, one_hot) + logprobs_emit = jnp.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N] + + logalpha_phi_init = jnp.ones( + (batchsize, maxlabellen + 1)) * log_epsilon # [B, N] + logalpha_phi_init = logalpha_phi_init.at[:, 0].set(0.0) + logalpha_emit_init = jnp.ones((batchsize, maxlabellen)) * log_epsilon + + def update_phi_score(phi, added_score): + # Update `phi[:, 1:]`` with adding `added_score` in log space. + return jnp.concatenate( + [phi[:, :1], jnp.logaddexp(phi[:, 1:], added_score)], axis=-1) + + def loop_body(prev, x): + prev_phi, prev_emit = prev + # emit-to-phi epsilon transition, except if the next label is repetition + prev_phi_orig = prev_phi + prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat) + + logprob_emit, logprob_phi, pad = x + + # phi-to-emit transition + next_emit = jnp.logaddexp(prev_phi[:, :-1] + logprob_emit, + prev_emit + logprob_emit) + # self-loop transition + next_phi = prev_phi + logprob_phi + # emit-to-phi blank transition only when the next label is repetition + next_phi = update_phi_score( + next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat)) + + pad = pad.reshape((batchsize, 1)) + next_emit = pad * prev_emit + (1.0 - pad) * next_emit + next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi + + return (next_phi, next_emit), (next_phi, next_emit) + + xs = (logprobs_emit, logprobs_phi, logit_paddings.transpose((1, 0))) + _, (logalpha_phi, + logalpha_emit) = jax.lax.scan(loop_body, + (logalpha_phi_init, logalpha_emit_init), xs) + + # last row needs to be updated with the last epsilon transition + logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1]) + logalpha_phi = logalpha_phi.at[-1].set(logalpha_phi_last) + + # extract per_seq_loss + one_hot = jax.nn.one_hot(labellens, num_classes=maxlabellen + 1) # [B, N+1] + per_seq_loss = -jnp.einsum('bn,bn->b', logalpha_phi_last, one_hot) + + return per_seq_loss, logalpha_phi, logalpha_emit + + +def ctc_loss(logits: chex.Array, + logit_paddings: chex.Array, + labels: chex.Array, + label_paddings: chex.Array, + blank_id: int = 0, + log_epsilon: float = -1e5) -> chex.Array: + """Computes CTC loss. + + See docstring for ``ctc_loss_with_forward_probs`` for details. + + Args: + logits: (B, T, K)-array containing logits of each class where B denotes + the batch size, T denotes the max time frames in ``logits``, and K + denotes the number of classes including a class for blanks. + logit_paddings: (B, T)-array. Padding indicators for ``logits``. Each + element must be either 1.0 or 0.0, and ``logitpaddings[b, t] == 1.0`` + denotes that ``logits[b, t, :]`` are padded values. + labels: (B, N)-array containing reference integer labels where N denotes + the max time frames in the label sequence. + label_paddings: (B, N)-array. Padding indicators for ``labels``. Each + element must be either 1.0 or 0.0, and ``labelpaddings[b, n] == 1.0`` + denotes that ``labels[b, n]`` is a padded label. In the current + implementation, ``labels`` must be right-padded, i.e. each row + ``labelpaddings[b, :]`` must be repetition of zeroes, followed by + repetition of ones. + blank_id: Id for blank token. ``logits[b, :, blank_id]`` are used as + probabilities of blank symbols. + log_epsilon: Numerically-stable approximation of log(+0). + + Returns: + (B,)-array containing loss values for each sequence in the batch. + """ + per_seq_loss, _, _ = ctc_loss_with_forward_probs( + logits, logit_paddings, labels, label_paddings, + blank_id=blank_id, log_epsilon=log_epsilon) + return per_seq_loss + + +def kl_divergence(log_predictions: chex.Array, + targets: chex.Array) -> chex.Array: + """Computes the Kullback-Leibler divergence (relative entropy) loss. + + Measures the information gain achieved if target probability distribution + would be used instead of predicted probability distribution. + + References: + [Kullback, Leibler, 1951](https://www.jstor.org/stable/2236703) + + Args: + log_predictions: Probabilities of predicted distribution with shape + [..., dim]. Expected to be in the log-space to avoid underflow. + targets: Probabilities of target distribution with shape [..., dim]. + Expected to be strictly positive. + + Returns: + Kullback-Leibler divergence of predicted distribution from target + distribution with shape [...]. + """ + chex.assert_type([log_predictions, targets], float) + loss = targets * (jnp.log(targets) - log_predictions) + return jnp.sum(loss, axis=-1) + + +def kl_divergence_with_log_targets(log_predictions: chex.Array, + log_targets: chex.Array) -> chex.Array: + """Computes the Kullback-Leibler divergence (relative entropy) loss. + + Version of kl_div_loss where targets are given in log-space. + + Args: + log_predictions: Probabilities of predicted distribution with shape + [..., dim]. Expected to be in the log-space to avoid underflow. + log_targets: Probabilities of target distribution with shape [..., dim]. + Expected to be in the log-space. + + Returns: + Kullback-Leibler divergence of predicted distribution from target + distribution with shape [...]. + """ + chex.assert_type([log_predictions, log_targets], float) + loss = jnp.exp(log_targets) * (log_targets - log_predictions) + return jnp.sum(loss, axis=-1) + + +def hinge_loss(predictor_outputs: chex.Array, + targets: chex.Array) -> chex.Array: + """Computes the hinge loss for binary classification. + + Args: + predictor_outputs: Outputs of the decision function. + targets: Target values. Target values should be strictly in the set {-1, 1}. + + Returns: + Binary Hinge Loss. + """ + return jnp.maximum(0, 1 - predictor_outputs * targets) diff --git a/lib/python3.10/site-packages/optax/_src/loss_test.py b/lib/python3.10/site-packages/optax/_src/loss_test.py new file mode 100644 index 0000000000000000000000000000000000000000..eec36e552be4e2cee72a63fdb292da8f8ff55602 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/loss_test.py @@ -0,0 +1,500 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for optax._src.loss.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +import jax.numpy as jnp +import numpy as np + +from optax._src import loss + + +class L2LossTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.ys = jnp.array([-2., -1., 0.5, 1.]) + self.ts = jnp.array([-1.5, 0., -1, 1.]) + # compute expected outputs in numpy. + self.exp = 0.5 * (self.ts - self.ys) ** 2 + + @chex.all_variants + def test_scalar(self): + np.testing.assert_allclose( + self.variant(loss.l2_loss)(self.ys[0], self.ts[0]), self.exp[0]) + + @chex.all_variants + def test_batched(self): + np.testing.assert_allclose( + self.variant(loss.l2_loss)(self.ys, self.ts), self.exp) + + @chex.all_variants + def test_shape_mismatch(self): + with self.assertRaises(AssertionError): + _ = self.variant(loss.l2_loss)(self.ys, jnp.expand_dims(self.ts, axis=-1)) + + +class HuberLossTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.ys = np.array([-2.0, 0.5, 0., 0.5, 2.0, 4.0, 132.]) + self.ts = np.array([0.0, -0.5, 0., 1., 1.0, 2.0, 0.3]) + # computed expected outputs manually. + self.exp = np.array([1.5, 0.5, 0., 0.125, 0.5, 1.5, 131.2]) + + @chex.all_variants + def test_scalar(self): + np.testing.assert_allclose( + self.variant(loss.huber_loss)(self.ys[0], self.ts[0], delta=1.0), + self.exp[0]) + + @chex.all_variants + def test_batched(self): + np.testing.assert_allclose( + self.variant(loss.huber_loss)(self.ys, self.ts, delta=1.0), + self.exp) + + +class SmoothLabelsTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.ts = np.array([[0., 1., 0.], [1., 0., 0.]], dtype=np.float32) + # compute expected outputs in numpy. + self.exp_alpha_zero = self.ts + self.exp_alpha_zero_point_one = 0.9 * self.ts + 0.1 / self.ts.shape[-1] + self.exp_alpha_one = jnp.ones_like(self.ts) / self.ts.shape[-1] + + @chex.all_variants + def test_scalar(self): + """Tests for a full batch.""" + np.testing.assert_allclose( + self.variant(loss.smooth_labels)(self.ts[0], 0.), + self.exp_alpha_zero[0], atol=1e-4) + np.testing.assert_allclose( + self.variant(loss.smooth_labels)(self.ts[0], 0.1), + self.exp_alpha_zero_point_one[0], atol=1e-4) + np.testing.assert_allclose( + self.variant(loss.smooth_labels)(self.ts[0], 1.), + self.exp_alpha_one[0], atol=1e-4) + + @chex.all_variants + def test_batched(self): + """Tests for a full batch.""" + np.testing.assert_allclose( + self.variant(loss.smooth_labels)(self.ts, 0.), + self.exp_alpha_zero, atol=1e-4) + np.testing.assert_allclose( + self.variant(loss.smooth_labels)(self.ts, 0.1), + self.exp_alpha_zero_point_one, atol=1e-4) + np.testing.assert_allclose( + self.variant(loss.smooth_labels)(self.ts, 1.), + self.exp_alpha_one, atol=1e-4) + + +class SoftmaxCrossEntropyTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.ys = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32) + self.ts = np.array([[0., 1., 0.], [1., 0., 0.]], dtype=np.float32) + # taken expected outputs from rlax. + self.exp = np.array([9.00013, 3.0696733], dtype=np.float32) + + @chex.all_variants + def test_scalar(self): + """Tests for a full batch.""" + np.testing.assert_allclose( + self.variant(loss.softmax_cross_entropy)(self.ys[0], self.ts[0]), + self.exp[0], atol=1e-4) + + @chex.all_variants + def test_batched(self): + """Tests for a full batch.""" + np.testing.assert_allclose( + self.variant(loss.softmax_cross_entropy)(self.ys, self.ts), + self.exp, atol=1e-4) + + +class SoftmaxCrossEntropyWithIntegerLabelsTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.ys = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32) + self.ts = np.array([1, 0], dtype=np.int32) + + @chex.all_variants + def test_consistent_with_softmax_cross_entropy_scalar(self): + """Tests for a scalar.""" + exp = loss.softmax_cross_entropy(self.ys[0], jax.nn.one_hot(self.ts[0], 3)) + np.testing.assert_allclose( + self.variant(loss.softmax_cross_entropy_with_integer_labels)( + self.ys[0], self.ts[0]), + exp, rtol=1e-6) + + @chex.all_variants + def test_consistent_with_softmax_cross_entropy_batched(self): + """Tests for a full batch.""" + exp = loss.softmax_cross_entropy(self.ys, jax.nn.one_hot(self.ts, 3)) + np.testing.assert_allclose( + self.variant(loss.softmax_cross_entropy_with_integer_labels)( + self.ys, self.ts), + exp, rtol=1e-6) + + +class SigmoidCrossEntropyTest(parameterized.TestCase): + + @parameterized.parameters( + dict(preds=np.array([-1e+09, -1e-09]), + labels=np.array([1., 0.]), + expected=5e+08), + dict(preds=np.array([-1e+09, -1e-09]), + labels=np.array([0., 1.]), + expected=0.3465736), + dict(preds=np.array([1e+09, 1e-09]), + labels=np.array([1., 0.]), + expected=0.3465736), + dict(preds=np.array([1e+09, 1e-09]), + labels=np.array([0., 1.]), + expected=5e+08), + dict(preds=np.array([-1e+09, 1e-09]), + labels=np.array([1., 0.]), + expected=5e+08), + dict(preds=np.array([-1e+09, 1e-09]), + labels=np.array([0., 1.]), + expected=0.3465736), + dict(preds=np.array([1e+09, -1e-09]), + labels=np.array([1., 0.]), + expected=0.3465736), + dict(preds=np.array([1e+09, -1e-09]), + labels=np.array([0., 1.]), + expected=5e+08), + dict(preds=np.array([0., 0.]), + labels=np.array([1., 0.]), + expected=0.6931472), + dict(preds=np.array([0., 0.]), + labels=np.array([0., 1.]), + expected=0.6931472), + ) + def testSigmoidCrossEntropy(self, preds, labels, expected): + tested = jnp.mean(loss.sigmoid_binary_cross_entropy(preds, labels)) + np.testing.assert_allclose(tested, expected, rtol=1e-6, atol=1e-6) + + +class CosineDistanceTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.ys = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32) + self.ts = np.array([[0., 1.2, 0.2], [1., -0.3, 0.]], dtype=np.float32) + # distance computed expected output from `scipy 1.20`. + self.exp = np.array([0.9358251989, 1.0464068465], dtype=np.float32) + + @chex.all_variants + def test_scalar_distance(self): + """Tests for a full batch.""" + np.testing.assert_allclose( + self.variant(loss.cosine_distance)(self.ys[0], self.ts[0]), + self.exp[0], atol=1e-4) + + @chex.all_variants + def test_scalar_similarity(self): + """Tests for a full batch.""" + np.testing.assert_allclose( + self.variant(loss.cosine_similarity)(self.ys[0], self.ts[0]), + 1. - self.exp[0], atol=1e-4) + + @chex.all_variants + def test_batched_distance(self): + """Tests for a full batch.""" + np.testing.assert_allclose( + self.variant(loss.cosine_distance)(self.ys, self.ts), + self.exp, atol=1e-4) + + @chex.all_variants + def test_batched_similarity(self): + """Tests for a full batch.""" + np.testing.assert_allclose( + self.variant(loss.cosine_similarity)(self.ys, self.ts), + 1. - self.exp, atol=1e-4) + + +# TODO(b/188419459): add test for grad and second order grad. +class LogCoshTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + # Test large values for overflow + self.ys = jnp.array([500, -2., -1., 0.5, 1.]) + self.ts = jnp.array([-200, -1.5, 0., -1, 1.]) + # computed using tensorflow.keras.losses.log_cosh v2.4.1 + self.exp = jnp.array([699.3068, 0.12011445, 0.4337809, 0.85544014, 0.]) + self.exp_ys_only = jnp.array( + [499.30685, 1.3250027, 0.4337809, 0.12011451, 0.43378082]) + + @chex.all_variants + def test_scalar(self): + out = self.variant(loss.log_cosh)(self.ys[0], self.ts[0]) + np.testing.assert_allclose(out, self.exp[0], atol=1e-5) + + @chex.all_variants + def test_batched(self): + out = self.variant(loss.log_cosh)(self.ys, self.ts) + np.testing.assert_allclose(out, self.exp, atol=1e-5) + + @chex.all_variants + def test_scalar_predictions_only(self): + out = self.variant(loss.log_cosh)(self.ys[0]) + np.testing.assert_allclose(out, self.exp_ys_only[0], atol=1e-5) + + @chex.all_variants + def test_batched_predictions_only(self): + out = self.variant(loss.log_cosh)(self.ys) + np.testing.assert_allclose(out, self.exp_ys_only, atol=1e-5) + + +def _lengths_to_paddings(lengths: chex.Array, maxlength: int) -> chex.Array: + indices = jnp.arange(maxlength).reshape((1,) * lengths.ndim + (maxlength,)) + lengths = jnp.expand_dims(lengths, axis=-1) + elem_valid = indices < lengths + return np.logical_not(elem_valid).astype(np.float32) + + +def _average_ctc_loss(logprobs: chex.Array, logprob_paddings: chex.Array, + labels: chex.Array, + label_paddings: chex.Array) -> chex.Array: + return jnp.average( + loss.ctc_loss(logprobs, logprob_paddings, labels, label_paddings)) + + +class CTCTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + np.random.seed(1234) + self._rtol = 5e-3 if jax.default_backend() != 'cpu' else 1e-6 + + @chex.all_variants + def test_with_one_to_one_alignment(self): + # when inputsteps and outputsteps are equal, no blank will be allowed. + batchsize = 8 + steps = 50 + nclasses = 40 + logits = np.random.randn(batchsize, steps, nclasses) + labels = np.random.uniform( + 1, nclasses, size=(batchsize, steps)).astype(np.int32) + + # This function only covers the cases without same-label repetition. + # `test_repeat_with_one_to_one_alignment` below complements those cases. + # So, redraw the samples for satisfying the non-repetition constraint. + for n in range(labels.shape[0]): + for t in range(1, labels.shape[1]): + while labels[n, t] == labels[n, t - 1]: + labels[n, t] = np.random.uniform(1, nclasses) + + results = self.variant(loss.ctc_loss_with_forward_probs)( + logits, np.zeros(logits.shape[:2]), + labels, np.zeros(labels.shape)) + (per_seq_loss, logalpha_blank, logalpha_emit) = results + + logprobs = jax.nn.log_softmax(logits) + for b in range(batchsize): + p = 0.0 + for t in range(steps): + p += logprobs[b, t, labels[b, t]] + np.testing.assert_allclose( + np.array(-p), per_seq_loss[b], rtol=self._rtol) + + # Check forward-probabilities. + # 1. All-phi path: logalpha_blank[-1, b, 0] must be a probability of + # the path that outputs blank symbols for all the frames. + np.testing.assert_allclose(logalpha_blank[-1, b, 0], + np.sum(logprobs[b, :, 0]), + rtol=self._rtol) + + # 2. After emitting all the labels + # the negated loss must be identical with the forward probability of + # paths after consuming all the labels (because one-to-one alignment + # doesn't allow extra blank symbols) + np.testing.assert_allclose(logalpha_emit[-1, b, steps - 1], + -per_seq_loss[b], + rtol=self._rtol) + # and, this forward probability must be copied to the blank forward + # probability of the next step. + np.testing.assert_allclose(logalpha_blank[-1, b, steps], + -per_seq_loss[b], + rtol=self._rtol) + + @chex.all_variants + def test_with_one_to_one_alignment_and_paddings(self): + batch_size = 5 + nclasses = 13 + steps = 7 + logits = np.random.normal(size=[batch_size, steps, nclasses]) + logprobs = jax.nn.log_softmax(logits) + + labels = [] + for n in range(batch_size): + row = list(range(1, nclasses)) + np.random.shuffle(row) + labels.append(row[:steps]) + labels = np.array(labels) + + lengths = np.random.randint(3, 6, size=(batch_size,)) + paddings = _lengths_to_paddings(lengths, steps) + + actual_loss = self.variant(loss.ctc_loss)(logits, paddings, labels, + paddings) + + value_and_grad = self.variant(jax.value_and_grad(_average_ctc_loss)) + unused_avg_loss, actual_gradients = value_and_grad(logits, paddings, labels, + paddings) + + for n in range(batch_size): + expected_loss = -sum(logprobs[n, t, k] + for t, k in enumerate(labels[n, :lengths[n]])) + np.testing.assert_allclose(expected_loss, actual_loss[n], rtol=self._rtol) + + expected_gradients = np.array(jax.nn.softmax(logits[n])) + expected_gradients[lengths[n]:] = 0.0 + for t, k in enumerate(labels[n, :lengths[n]]): + expected_gradients[t, k] -= 1.0 + expected_gradients /= batch_size + np.testing.assert_allclose( + expected_gradients, actual_gradients[n], rtol=self._rtol) + + @chex.all_variants + def test_repeat_with_one_to_one_alignment(self): + # test if it can correctly handle the same-label repetition. + nclasses = 5 + labels = np.array([ + [1, 2, 2, 3], + [2, 3, 4, 4], + [1, 1, 1, 1], + [1, 1, 2, 3], + [1, 1, 1, 2], + ]) + expected_alignment = [ # expected minimal alignment + [1, 2, 0, 2, 3], + [2, 3, 4, 0, 4], + [1, 0, 1, 0, 1, 0, 1], + [1, 0, 1, 2, 3], + [1, 0, 1, 0, 1, 2], + ] + batch_size = len(labels) + label_lens = np.array([4] * batch_size) + label_steps = 6 + # Designed to have two padding elements on the right. + labels = np.pad(labels, [(0, 0), (0, label_steps - labels.shape[1])]) + label_paddings = _lengths_to_paddings(label_lens, label_steps) + + logit_lengths = np.array([len(seq) for seq in expected_alignment]) + logit_steps = max(logit_lengths) + logits = np.random.randn(batch_size, logit_steps, nclasses) + logit_paddings = _lengths_to_paddings(logit_lengths, logit_steps) + + per_seq_loss = self.variant(loss.ctc_loss)(logits, logit_paddings, labels, + label_paddings) + + logprobs = jax.nn.log_softmax(logits) + for n in range(batch_size): + expected_loss = -sum(logprobs[n, t, k] + for t, k in enumerate(expected_alignment[n])) + np.testing.assert_allclose( + jnp.array(expected_loss), per_seq_loss[n], rtol=self._rtol) + + +class KLDivergenceTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.log_ps = np.array( + [[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026], + [-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971]]) + self.qs = np.array([[0.2, 0.2, 0.2, 0.1, 0.15, 0.15], + [0.05, 0.03, 0.02, 0.3, 0.5, 0.1]]) + # Computed kullback-leibler divergence of P from Q. + self.exp = np.array([0.8875625, 0.7187435584901326]) + + @chex.all_variants + def test_scalar(self): + np.testing.assert_allclose( + self.variant(loss.kl_divergence)(self.log_ps[0], self.qs[0]), + self.exp[0], + atol=1e-4) + + @chex.all_variants + def test_batched(self): + np.testing.assert_allclose( + self.variant(loss.kl_divergence)(self.log_ps, self.qs), + self.exp, + atol=1e-4) + + +class KLDivergenceWithLogTargetsTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.log_ps = np.array( + [[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026], + [-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971]]) + self.qs = np.array([[-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971], + [-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026]]) + # Computed kullback-leibler divergence of P from Q. + self.exp = np.array([0.8875625, 0.7187435584901326]) + + @chex.all_variants + def test_scalar(self): + np.testing.assert_allclose( + self.variant(loss.kl_divergence_with_log_targets)(self.log_ps[0], + self.qs[0]), + self.exp[0], + atol=1e-4) + + @chex.all_variants + def test_batched(self): + np.testing.assert_allclose( + self.variant(loss.kl_divergence_with_log_targets)(self.log_ps, self.qs), + self.exp, + atol=1e-4) + + +class HingeLossTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.ys = np.array([ + -0.97740268, -1.01812625, -0.81675726, -0.73605974, 2.08235648, + 1.84101354, -1.0581002 + ]) + self.ts = np.array([-1, -1, -1, -1, 1, 1, -1]) + # Computed expected outputs. + self.correct_result = np.array( + [0.02259731, 0., 0.18324274, 0.26394027, 0., 0., 0.]) + + @chex.all_variants + def test_batched(self): + np.testing.assert_allclose( + self.variant(loss.hinge_loss)(self.ys, self.ts), + self.correct_result, + atol=1e-4) + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/numerics.py b/lib/python3.10/site-packages/optax/_src/numerics.py new file mode 100644 index 0000000000000000000000000000000000000000..d58af2cf165d7511e98cbaf3c628d3abacc56a46 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/numerics.py @@ -0,0 +1,118 @@ +# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities to ensure the implementation is safe wrt numerical issues. + +Note that complex numbers are also supported, see +https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29 +""" + +from typing import Optional, Tuple, Union + +import chex +import jax.numpy as jnp +import numpy as np + + +# TODO(jscholz) Promote these functions to jax core lib? + + +def abs_sq(x: chex.Array) -> chex.Array: + """Returns the squared norm of a (maybe complex) array. + + For real `x`, JAX generates the same HLO from this, `jnp.square(x)`, `x * x`, + or `x**2`. + + Args: + x: a (maybe complex) array. + + Returns: + The squared norm of `x`. + """ + if not isinstance(x, (np.ndarray, jnp.ndarray)): + raise ValueError(f"`abs_sq` accepts only NDarrays, got: {x}.") + return (x.conj() * x).real + + +def safe_norm(x: chex.Array, + min_norm: chex.Numeric, + ord: Optional[Union[int, float, str]] = None, # pylint: disable=redefined-builtin + axis: Union[None, Tuple[int, ...], int] = None, + keepdims: bool = False) -> chex.Array: + """Returns jnp.maximum(jnp.linalg.norm(x), min_norm) with correct gradients. + + The gradients of `jnp.maximum(jnp.linalg.norm(x), min_norm)` at 0.0 is `NaN`, + because jax will evaluate both branches of the `jnp.maximum`. This function + will instead return the correct gradient of 0.0 also in such setting. + + Args: + x: jax array. + min_norm: lower bound for the returned norm. + ord: {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional. Order of the norm. + inf means numpy’s inf object. The default is None. + axis: {None, int, 2-tuple of ints}, optional. If axis is an integer, it + specifies the axis of x along which to compute the vector norms. If axis + is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix + norms of these matrices are computed. If axis is None then either a vector + norm (when x is 1-D) or a matrix norm (when x is 2-D) is returned. The + default is None. + keepdims: bool, optional. If this is set to True, the axes which are normed + over are left in the result as dimensions with size one. With this option + the result will broadcast correctly against the original x. + + Returns: + The safe norm of the input vector, accounting for correct gradient. + """ + norm = jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=True) + x = jnp.where(norm <= min_norm, jnp.ones_like(x), x) + norm = jnp.squeeze(norm, axis=axis) if not keepdims else norm + masked_norm = jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims) + return jnp.where(norm <= min_norm, min_norm, masked_norm) + + +def safe_root_mean_squares(x: chex.Array, min_rms: chex.Numeric) -> chex.Array: + """Returns `maximum(sqrt(mean(abs_sq(x))), min_norm)` with correct grads. + + The gradients of `maximum(sqrt(mean(abs_sq(x))), min_norm)` at 0.0 + is `NaN`, because jax will evaluate both branches of the `jnp.maximum`. This + function will instead return the correct gradient of 0.0 also in such setting. + + Args: + x: jax array. + min_rms: lower bound for the returned norm. + + Returns: + The safe RMS of the input vector, accounting for correct gradient. + """ + rms = jnp.sqrt(jnp.mean(abs_sq(x))) + x = jnp.where(rms <= min_rms, jnp.ones_like(x), x) + return jnp.where(rms <= min_rms, min_rms, jnp.sqrt(jnp.mean(abs_sq(x)))) + + +def safe_int32_increment(count: chex.Numeric) -> chex.Numeric: + """Increments int32 counter by one. + + Normally `max_int + 1` would overflow to `min_int`. This functions ensures + that when `max_int` is reached the counter stays at `max_int`. + + Args: + count: a counter to be incremented. + + Returns: + A counter incremented by 1, or max_int if the maximum precision is reached. + """ + chex.assert_type(count, jnp.int32) + max_int32_value = jnp.iinfo(jnp.int32).max + one = jnp.array(1, dtype=jnp.int32) + return jnp.where(count < max_int32_value, count + one, max_int32_value) diff --git a/lib/python3.10/site-packages/optax/_src/privacy_test.py b/lib/python3.10/site-packages/optax/_src/privacy_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ac603f42b235561b02776894a5beda19c467ddab --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/privacy_test.py @@ -0,0 +1,112 @@ +# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `privacy.py`.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +import jax.numpy as jnp + +from optax._src import privacy + + +class DifferentiallyPrivateAggregateTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.batch_size = 8 + self.params = {'key_a': (jnp.zeros((2, 3, 4)), jnp.zeros([])), + 'key_b': jnp.zeros((6, 7))} + # Example `i`'s grads are full of `i`s. Important to include 0 to ensure + # there are no divisons by 0 (e.g. in norm clipping) + a = jnp.arange(self.batch_size) + self.per_eg_grads = jax.tree_util.tree_map( + lambda p: jnp.moveaxis(a * jnp.ones(p.shape+(self.batch_size,)), -1, 0), + self.params) + + @chex.all_variants + def test_no_privacy(self): + """l2_norm_clip=MAX_FLOAT32 and noise_multiplier=0 should recover SGD.""" + dp_agg = privacy.differentially_private_aggregate( + l2_norm_clip=jnp.finfo(jnp.float32).max, + noise_multiplier=0., + seed=0) + state = dp_agg.init(self.params) + update_fn = self.variant(dp_agg.update) + mean_grads = jax.tree_util.tree_map(lambda g: g.mean(0), self.per_eg_grads) + + for _ in range(3): + updates, state = update_fn(self.per_eg_grads, state) + chex.assert_tree_all_close(updates, mean_grads) + + @chex.all_variants + @parameterized.parameters(0.5, 10.0, 20.0, 40.0, 80.0) + def test_clipping_norm(self, l2_norm_clip): + dp_agg = privacy.differentially_private_aggregate( + l2_norm_clip=l2_norm_clip, + noise_multiplier=0., + seed=42) + state = dp_agg.init(self.params) + update_fn = self.variant(dp_agg.update) + + # Shape of the three arrays below is (self.batch_size, ) + norms = [jnp.linalg.norm(g.reshape(self.batch_size, -1), axis=1) + for g in jax.tree_util.tree_leaves(self.per_eg_grads)] + global_norms = jnp.linalg.norm(jnp.stack(norms), axis=0) + divisors = jnp.maximum(global_norms / l2_norm_clip, 1.) + # Since the values of all the parameters are the same within each example, + # we can easily compute what the values should be: + expected_val = jnp.mean(jnp.arange(self.batch_size) / divisors) + expected_tree = jax.tree_util.tree_map( + lambda p: jnp.broadcast_to(expected_val, p.shape), self.params) + + for _ in range(3): + updates, state = update_fn(self.per_eg_grads, state, self.params) + chex.assert_tree_all_close(updates, expected_tree, rtol=2e-7) + + @chex.all_variants + @parameterized.parameters((3.0, 2.0), (1.0, 5.0), (100.0, 4.0), (1.0, 90.0)) + def test_noise_multiplier(self, l2_norm_clip, noise_multiplier): + """Standard dev. of noise should be l2_norm_clip * noise_multiplier.""" + dp_agg = privacy.differentially_private_aggregate( + l2_norm_clip=l2_norm_clip, + noise_multiplier=noise_multiplier, + seed=1337) + state = dp_agg.init(None) + update_fn = self.variant(dp_agg.update) + expected_std = l2_norm_clip * noise_multiplier + + grads = [jnp.ones((1, 100, 100))] # batch size 1 + for _ in range(3): + updates, state = update_fn(grads, state) + chex.assert_tree_all_close(expected_std, + jnp.std(updates[0]), + atol=0.1 * expected_std) + + def test_aggregated_updates_as_input_fails(self): + """Expect per-example gradients as input to this transform.""" + dp_agg = privacy.differentially_private_aggregate(l2_norm_clip=0.1, + noise_multiplier=1.1, + seed=2021) + state = dp_agg.init(self.params) + mean_grads = jax.tree_util.tree_map(lambda g: g.mean(0), self.per_eg_grads) + with self.assertRaises(ValueError): + dp_agg.update(mean_grads, state, self.params) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/schedule.py b/lib/python3.10/site-packages/optax/_src/schedule.py new file mode 100644 index 0000000000000000000000000000000000000000..dd780e6a263d25a87dad9b935838a3e211df5dda --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/schedule.py @@ -0,0 +1,620 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""JAX Schedules. + +Schedules may be used to anneal the value of a hyper-parameter over time; for +instance, they may be used to anneal the learning rate used to update an agent's +parameters or the exploration factor used to select actions. +""" + +import functools +import inspect +from typing import Callable, Dict, Union, NamedTuple, Optional, Iterable, Sequence + +from absl import logging +import chex +import jax +import jax.numpy as jnp + +from optax._src import base +from optax._src import numerics + + +def constant_schedule( + value: Union[float, int] +) -> base.Schedule: + """Constructs a constant schedule. + + Args: + value: value to be held constant throughout. + + Returns: + schedule: A function that maps step counts to values. + """ + return lambda count: value + + +def polynomial_schedule( + init_value: chex.Scalar, + end_value: chex.Scalar, + power: chex.Scalar, + transition_steps: int, + transition_begin: int = 0 +) -> base.Schedule: + """Constructs a schedule with polynomial transition from init to end value. + + Args: + init_value: initial value for the scalar to be annealed. + end_value: end value of the scalar to be annealed. + power: the power of the polynomial used to transition from init to end. + transition_steps: number of steps over which annealing takes place, + the scalar starts changing at `transition_begin` steps and completes + the transition by `transition_begin + transition_steps` steps. + If `transition_steps <= 0`, then the entire annealing process is disabled + and the value is held fixed at `init_value`. + transition_begin: must be positive. After how many steps to start annealing + (before this many steps the scalar value is held fixed at `init_value`). + + Returns: + schedule: A function that maps step counts to values. + """ + if transition_steps <= 0: + logging.info( + 'A polynomial schedule was set with a non-positive `transition_steps` ' + 'value; this results in a constant schedule with value `init_value`.') + return lambda count: init_value + + if transition_begin < 0: + logging.info( + 'An exponential schedule was set with a negative `transition_begin` ' + 'value; this will result in `transition_begin` falling back to `0`.') + transition_begin = 0 + + def schedule(count): + count = jnp.clip(count - transition_begin, 0, transition_steps) + frac = 1 - count / transition_steps + return (init_value - end_value) * (frac**power) + end_value + return schedule + + +# Alias polynomial schedule to linear schedule for convenience. +def linear_schedule( + init_value: chex.Scalar, + end_value: chex.Scalar, + transition_steps: int, + transition_begin: int = 0 +) -> base.Schedule: + return polynomial_schedule( + init_value=init_value, end_value=end_value, power=1, + transition_steps=transition_steps, transition_begin=transition_begin) + + +def piecewise_constant_schedule( + init_value: float, + boundaries_and_scales: Optional[Dict[int, float]] = None +) -> base.Schedule: + """Returns a function which implements a piecewise constant schedule. + + Args: + init_value: An initial value `init_v`. + boundaries_and_scales: A map from boundaries `b_i` to non-negative scaling + factors `f_i`. For any step count `s`, the schedule returns `init_v` + scaled by the product of all factors `f_i` such that `b_i` < `s`. + + Returns: + schedule: A function that maps step counts to values. + """ + if boundaries_and_scales is not None: + all_positive = all(scale >= 0. for scale in boundaries_and_scales.values()) + if not all_positive: + raise ValueError( + '`piecewise_constant_schedule` expects non-negative scale factors') + + def schedule(count): + v = init_value + if boundaries_and_scales is not None: + for threshold, scale in sorted(boundaries_and_scales.items()): + indicator = jnp.maximum(0., jnp.sign(threshold - count)) + v = v * indicator + (1 - indicator) * scale * v + return v + + return schedule + + +def exponential_decay( + init_value: float, + transition_steps: int, + decay_rate: float, + transition_begin: int = 0, + staircase: bool = False, + end_value: Optional[float] = None +) -> base.Schedule: + """Constructs a schedule with either continuous or discrete exponential decay. + + This function applies an exponential decay function to a provided initial + value. The function returns the decayed value as follows: + + ``` + decayed_value = init_value * decay_rate ^ (count / transition_steps) + ``` + + If the argument `staircase` is `True`, then `count / transition_steps` is + an integer division and the decayed value follows a staircase function. + + Args: + init_value: the initial learning rate. + transition_steps: must be positive. See the decay computation above. + decay_rate: must not be zero. The decay rate. + transition_begin: must be positive. After how many steps to start annealing + (before this many steps the scalar value is held fixed at `init_value`). + staircase: if `True`, decay the values at discrete intervals. + end_value: the value at which the exponential decay stops. When + `decay_rate` < 1, `end_value` is treated as a lower bound, otherwise as + an upper bound. Has no effect when `decay_rate` = 0. + + Returns: + schedule: A function that maps step counts to values. + """ + + if transition_steps <= 0: + logging.info( + 'An exponential schedule was set with a non-positive `transition_steps`' + ' value; this will result in a constant schedule with value ' + '`init_value`.') + return lambda count: init_value + + if decay_rate == 0: + logging.info( + 'An exponential schedule was set with a zero `decay_rate` value; ' + 'this will result in a constant schedule with value `init_value`.') + return lambda count: init_value + + if transition_begin < 0: + logging.info( + 'An exponential schedule was set with a negative `transition_begin` ' + 'value; this will result in `transition_begin` falling back to `0`.') + transition_begin = 0 + + if end_value is not None: + clip_fn = jnp.maximum if decay_rate < 1.0 else jnp.minimum + + def schedule(count): + decreased_count = count - transition_begin + p = decreased_count / transition_steps + if staircase: + p = jnp.floor(p) + decayed_value = jnp.where( + decreased_count <= 0, init_value, init_value * jnp.power(decay_rate, p)) + if end_value is not None: + decayed_value = clip_fn(decayed_value, end_value) + return decayed_value + + return schedule + + +def cosine_decay_schedule( + init_value: float, + decay_steps: int, + alpha: float = 0.0 +) -> base.Schedule: + """Returns a function which implements cosine learning rate decay. + + The schedule does not restart when ``decay_steps`` has been reached. Instead, + the learning rate remains constant afterwards. For a cosine schedule with + restarts, :func:`optax.join_schedules` can be used to join several cosine + decay schedules. + + For more details see: https://arxiv.org/abs/1608.03983. + + Args: + init_value: An initial value `init_v`. + decay_steps: Positive integer - the number of steps for which to apply + the decay for. + alpha: Float. The minimum value of the multiplier used to adjust the + learning rate. + + Returns: + schedule: A function that maps step counts to values. + """ + if not decay_steps > 0: + raise ValueError('The cosine_decay_schedule requires positive decay_steps!') + + def schedule(count): + count = jnp.minimum(count, decay_steps) + cosine_decay = 0.5 * (1 + jnp.cos(jnp.pi * count / decay_steps)) + decayed = (1 - alpha) * cosine_decay + alpha + return init_value * decayed + + return schedule + + +def _linear_interpolate(start: float, end: float, pct: float): + return (end-start) * pct + start + + +def _cosine_interpolate(start: float, end: float, pct: float): + return end + (start-end) / 2.0 * (jnp.cos(jnp.pi * pct) + 1) + + +def piecewise_interpolate_schedule( + interpolate_type: str, + init_value: float, + boundaries_and_scales: Optional[Dict[int, float]] = None +) -> base.Schedule: + """Returns a function which implements a piecewise interpolated schedule. + + Args: + interpolate_type: 'linear' or 'cosine', specifying the interpolation + strategy. + init_value: An initial value `init_v`. + boundaries_and_scales: A map from boundaries `b_i` to non-negative scaling + factors `f_i`. At boundary step `b_i`, the schedule returns `init_v` + scaled by the product of all factors `f_j` such that `b_j` <= `b_i`. The + values in between each boundary will be interpolated as per `type`. + + Returns: + schedule: A function that maps step counts to values. + """ + if interpolate_type == 'linear': + interpolate_fn = _linear_interpolate + elif interpolate_type == 'cosine': + interpolate_fn = _cosine_interpolate + else: + raise ValueError('`interpolate_type` must be either \'cos\' or \'linear\'') + + if boundaries_and_scales: + boundaries, scales = zip(*sorted(boundaries_and_scales.items())) + if not all(scale >= 0. for scale in scales): + raise ValueError( + '`piecewise_interpolate_schedule` expects non-negative scale factors') + else: + boundaries, scales = (), () + + bounds = jnp.stack((0,) + boundaries) + values = jnp.cumprod(jnp.stack((init_value,) + scales)) + interval_sizes = (bounds[1:] - bounds[:-1]) + + def schedule(count): + indicator = (bounds[:-1] <= count) & (count < bounds[1:]) + pct = (count - bounds[:-1]) / interval_sizes + interp_vals = interpolate_fn(values[:-1], values[1:], pct) + return indicator.dot(interp_vals) + (bounds[-1] <= count) * values[-1] + + return schedule + + +def linear_onecycle_schedule( + transition_steps: int, + peak_value: float, + pct_start: float = 0.3, + pct_final: float = 0.85, + div_factor: float = 25.0, + final_div_factor: float = 1e4 +) -> base.Schedule: + """Returns a function which implements the onecycle learning rate schedule. + + This function uses a linear annealing strategy. + For more details see: https://arxiv.org/abs/1708.07120 + + Args: + transition_steps: Number of steps over which annealing takes place. + peak_value: Maximum value attained by schedule at pct_start percent + of the cycle (in number of steps). + pct_start: The percentage of the cycle (in number of steps) spent + increasing the learning rate. + pct_final: The percentage of the cycle (in number of steps) spent + increasing to peak_value then decreasing back to init_value. + div_factor: Determines the initial value via init_value = + peak_value / div_factor + final_div_factor: Determines the final value via final_value = + init_value / final_div_factor + + Returns: + schedule: A function that maps step counts to values. + """ + if transition_steps <= 0: + raise ValueError( + 'A linear onecycle schedule was set with a non-positive ' + '`transition_steps`') + + return piecewise_interpolate_schedule( + 'linear', + peak_value / div_factor, + {int(pct_start * transition_steps): div_factor, + int(pct_final * transition_steps): 1. / div_factor, + transition_steps: 1. / final_div_factor}) + + +def cosine_onecycle_schedule( + transition_steps: int, + peak_value: float, + pct_start: float = 0.3, + div_factor: float = 25.0, + final_div_factor: float = 1e4 +) -> base.Schedule: + """Returns a function which implements the onecycle learning rate schedule. + + This function uses a cosine annealing strategy. + For more details see: https://arxiv.org/abs/1708.07120 + + Args: + transition_steps: Number of steps over which annealing takes place. + peak_value: Maximum value attained by schedule at pct_start percent + of the cycle (in number of steps). + pct_start: The percentage of the cycle (in number of steps) spent + increasing the learning rate. + div_factor: Determines the initial value via init_value = + peak_value / div_factor + final_div_factor: Determines the final value via final_value = + init_value / final_div_factor + + Returns: + schedule: A function that maps step counts to values. + """ + if transition_steps <= 0: + raise ValueError( + 'A linear onecycle schedule was set with a non-positive ' + '`transition_steps`') + + return piecewise_interpolate_schedule( + 'cosine', + peak_value / div_factor, + {int(pct_start * transition_steps): div_factor, + int(transition_steps): 1. / (div_factor * final_div_factor)}) + + +def join_schedules(schedules: Sequence[base.Schedule], + boundaries: Sequence[int]) -> base.Schedule: + """Sequentially apply multiple schedules. + + Args: + schedules: A list of callables (expected to be optax schedules). Each + schedule will receive a step count indicating the number of steps since + the previous boundary transition. + boundaries: A list of integers (of length one less than schedules) that + indicate when to transition between schedules. + Returns: + schedule: A function that maps step counts to values. + """ + def schedule(step: jnp.DeviceArray) -> jnp.DeviceArray: + output = schedules[0](step) + for boundary, schedule in zip(boundaries, schedules[1:]): + output = jnp.where(step < boundary, output, schedule(step - boundary)) + return output + return schedule + + +def warmup_cosine_decay_schedule( + init_value: float, + peak_value: float, + warmup_steps: int, + decay_steps: int, + end_value: float = 0.0 +) -> base.Schedule: + """Linear warmup followed by cosine decay. + + Args: + init_value: Initial value for the scalar to be annealed. + peak_value: Peak value for scalar to be annealed at end of warmup. + warmup_steps: Positive integer, the length of the linear warmup. + decay_steps: Positive integer, the total length of the schedule. Note that + this includes the warmup time, so the number of steps during which cosine + annealing is applied is `decay_steps - warmup_steps`. + end_value: End value of the scalar to be annealed. + Returns: + schedule: A function that maps step counts to values. + """ + schedules = [ + linear_schedule( + init_value=init_value, + end_value=peak_value, + transition_steps=warmup_steps), + cosine_decay_schedule( + init_value=peak_value, + decay_steps=decay_steps - warmup_steps, + alpha=end_value/peak_value)] + return join_schedules(schedules, [warmup_steps]) + + +def warmup_exponential_decay_schedule( + init_value: float, + peak_value: float, + warmup_steps: int, + transition_steps: int, + decay_rate: float, + transition_begin: int = 0, + staircase: bool = False, + end_value: Optional[float] = None +) -> base.Schedule: + """Linear warmup followed by exponential decay. + + Args: + init_value: Initial value for the scalar to be annealed. + peak_value: Peak value for scalar to be annealed at end of warmup. + warmup_steps: Positive integer, the length of the linear warmup. + transition_steps: must be positive. See `exponential_decay` for more + details. + decay_rate: must not be zero. The decay rate. + transition_begin: must be positive. After how many steps to start annealing + (before this many steps the scalar value is held fixed at `peak_value`). + staircase: if `True`, decay the values at discrete intervals. + end_value: the value at which the exponential decay stops. When + `decay_rate` < 1, `end_value` is treated as a lower bound, otherwise as + an upper bound. Has no effect when `decay_rate` = 0. + Returns: + schedule: A function that maps step counts to values. + """ + schedules = [ + linear_schedule( + init_value=init_value, + end_value=peak_value, + transition_steps=warmup_steps), + exponential_decay( + init_value=peak_value, + transition_steps=transition_steps, + decay_rate=decay_rate, + transition_begin=transition_begin, + staircase=staircase, + end_value=end_value)] + return join_schedules(schedules, [warmup_steps]) + + +def sgdr_schedule(cosine_kwargs: Iterable[Dict[str, chex.Numeric]] + ) -> base.Schedule: + """SGD with warm restarts, from Loschilov & Hutter (arXiv:1608.03983). + + This learning rate schedule applies multiple joined cosine decay cycles. + For more details see: https://arxiv.org/abs/1608.03983 + + Args: + cosine_kwargs: An Iterable of dicts, where each element specifies the + arguments to pass to each cosine decay cycle. The `decay_steps` kwarg + will specify how long each cycle lasts for, and therefore when to + transition to the next cycle. + Returns: + schedule: A function that maps step counts to values. + """ + boundaries = [] + schedules = [] + step = 0 + for kwargs in cosine_kwargs: + schedules += [warmup_cosine_decay_schedule(**kwargs)] + boundaries += [step + kwargs['decay_steps']] + step += kwargs['decay_steps'] + return join_schedules(schedules, boundaries[:-1]) + + +def _convert_floats(x, dtype): + """Convert float-like inputs to dtype, rest pass through.""" + if jax.dtypes.scalar_type_of(x) == float: + return jnp.asarray(x, dtype=dtype) + return x + + +class InjectHyperparamsState(NamedTuple): + """Maintains inner transform state, hyperparameters, and step count.""" + count: jnp.ndarray # shape=(), dtype=jnp.int32 + hyperparams: Dict[str, chex.Numeric] + inner_state: base.OptState + + +def inject_hyperparams( + inner_factory: Callable[..., base.GradientTransformation], + static_args: Union[str, Iterable[str]] = (), + hyperparam_dtype: Optional[jnp.dtype] = None, +) -> Callable[..., base.GradientTransformation]: + """Wrapper that injects hyperparameters into the inner GradientTransformation. + + This wrapper allows you to pass schedules (i.e. a function that returns a + numeric value given a step count) instead of constants for + hyperparameters. You may only schedule numeric hyperparameters (i.e. boolean + flags cannot be scheduled). + + For example, to use ``scale_by_adam`` with a piecewise linear + schedule for beta_1 and constant for beta_2:: + + scheduled_adam = optax.inject_hyperparams(optax.scale_by_adam)( + b1=optax.piecewise_linear_schedule(...), + b2=0.99) + + You may manually change numeric hyperparameters that were not scheduled + through the ``hyperparams`` dict in the ``InjectHyperparamState``:: + + state = scheduled_adam.init(params) + updates, state = scheduled_adam.update(grads, state) + state.hyperparams['b2'] = 0.95 + updates, state = scheduled_adam.update(updates, state) # uses b2 = 0.95 + + Manually overriding scheduled hyperparameters will have no effect (e.g. + in the code sample above, you cannot manually adjust ``b1``). + + Args: + inner_factory: a function that returns the inner + ``optax.GradientTransformation`` given the hyperparameters. + static_args: a string or iterable of strings specifying which + callable parameters are not schedules. inject_hyperparams treats all + callables as schedules by default, so if a hyperparameter is a + non-schedule callable, you must specify that using this argument. + hyperparam_dtype: Optional datatype override. If specified, all float + hyperparameters will be cast to this type. + + Returns: + A callable that returns a ``optax.GradientTransformation``. This callable + accepts the same arguments as ``inner_factory``, except you may provide + schedules in place of the constant arguments. + """ + static_args = ({static_args} if isinstance(static_args, str) else + set(static_args)) + inner_signature = inspect.signature(inner_factory) + + if not static_args.issubset(inner_signature.parameters): + raise ValueError( + '`static_args` must specify a subset of `inner_factory`\'s parameters. ' + f'Given `static_args`: {static_args}. `inner_factory` parameters: ' + f'{set(inner_signature.parameters.keys())}') + + @functools.wraps(inner_factory) + def wrapped_transform(*args, **kwargs) -> base.GradientTransformation: + bound_arguments = inner_signature.bind(*args, **kwargs) + bound_arguments.apply_defaults() + + sched_hps, numeric_hps, other_hps = {}, {}, {} + for name, value in bound_arguments.arguments.items(): + if name in static_args or isinstance(value, bool): + other_hps[name] = value + elif callable(value): + sched_hps[name] = value + elif isinstance(value, (int, float, chex.Array)): + numeric_hps[name] = value + else: + other_hps[name] = value + + def schedule_fn(count, dtype): + return {k: _convert_floats(f(count), dtype) for k, f in sched_hps.items()} + + def init_fn(params): + count = jnp.zeros([], jnp.int32) + if hyperparam_dtype is None: + dtype = getattr(next(iter( + jax.tree_util.tree_leaves(params)), None), 'dtype', None) + else: + dtype = hyperparam_dtype + hparams = { + k: jnp.asarray(_convert_floats(v, dtype)) + for k, v in numeric_hps.items()} + hparams.update(schedule_fn(count, dtype)) + return InjectHyperparamsState( # pylint:disable=too-many-function-args + count, hparams, inner_factory(**other_hps, **hparams).init(params)) + + def update_fn(updates, state, params=None): + if hyperparam_dtype is None: + dtype = getattr(next(iter( + jax.tree_util.tree_leaves(updates)), None), 'dtype', None) + else: + dtype = hyperparam_dtype + hparams = {k: _convert_floats(v, dtype) + for k, v in state.hyperparams.items()} + hparams.update(schedule_fn(state.count, dtype)) + updates, inner_state = inner_factory(**other_hps, **hparams).update( + updates, state.inner_state, params) + count_inc = numerics.safe_int32_increment(state.count) + + # pylint:disable=too-many-function-args + return updates, InjectHyperparamsState(count_inc, hparams, inner_state) + # pylint:enable=too-many-function-args + + return base.GradientTransformation(init_fn, update_fn) + + return wrapped_transform diff --git a/lib/python3.10/site-packages/optax/_src/schedule_test.py b/lib/python3.10/site-packages/optax/_src/schedule_test.py new file mode 100644 index 0000000000000000000000000000000000000000..89c0d51b5988ac1fcb59dadf253d9f38e4bc16b3 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/schedule_test.py @@ -0,0 +1,649 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `schedule.py`.""" + +import functools + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +import jax.numpy as jnp +import numpy as np + +from optax._src import clipping +from optax._src import schedule +from optax._src import transform +from optax._src import wrappers + + +class ConstantTest(chex.TestCase): + + @chex.all_variants + def test_constant(self): + """Check constant schedule.""" + # Get schedule function. + const_value = 10 + num_steps = 15 + schedule_fn = self.variant(schedule.constant_schedule(const_value)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(num_steps): + # Compute next value. + generated_vals.append(schedule_fn(count)) + # Test output. + expected_vals = np.array([const_value] * num_steps, dtype=np.float32) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + +class PolynomialTest(chex.TestCase): + + @chex.all_variants + def test_linear(self): + """Check linear schedule.""" + # Get schedule function. + schedule_fn = self.variant( + schedule.polynomial_schedule( + init_value=10., end_value=20., power=1, transition_steps=10)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(15): + # Compute next value. + generated_vals.append(schedule_fn(count)) + # Test output. + expected_vals = np.array(list(range(10, 20)) + [20] * 5, dtype=np.float32) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + @chex.all_variants + def test_zero_steps_schedule(self): + # Get schedule function. + initial_value = 10. + end_value = 20. + + for num_steps in [-1, 0]: + schedule_fn = self.variant( + schedule.polynomial_schedule( + init_value=initial_value, end_value=end_value, + power=1, transition_steps=num_steps)) + for count in range(15): + np.testing.assert_allclose(schedule_fn(count), initial_value) + + @chex.all_variants + def test_nonlinear(self): + """Check non-linear (quadratic) schedule.""" + # Get schedule function. + schedule_fn = self.variant( + schedule.polynomial_schedule( + init_value=25., end_value=10., power=2, transition_steps=10)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(15): + # Compute next value. + generated_vals.append(schedule_fn(count)) + # Test output. + expected_vals = np.array( + [10. + 15. * (1. - n / 10)**2 for n in range(10)] + [10] * 5, + dtype=np.float32) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + @chex.all_variants + def test_with_decay_begin(self): + """Check quadratic schedule with non-zero schedule begin.""" + # Get schedule function. + schedule_fn = self.variant( + schedule.polynomial_schedule( + init_value=30., end_value=10., power=2, + transition_steps=10, transition_begin=4)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(20): + # Compute next value. + generated_vals.append(schedule_fn(count)) + # Test output. + expected_vals = np.array( + [30.] * 4 + [10. + 20. * (1. - n / 10)**2 for n in range(10)] + + [10] * 6, + dtype=np.float32) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + +class PiecewiseConstantTest(chex.TestCase): + + @chex.all_variants + def test_positive(self): + """Check piecewise constant schedule of positive values.""" + # Get schedule function. + schedule_fn = self.variant( + schedule.piecewise_constant_schedule(0.1, {3: 2., 6: 0.5})) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(10): + # Compute next value. + generated_vals.append(schedule_fn(count)) + # Test output. + expected_vals = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + @chex.all_variants + def test_negative(self): + """Check piecewise constant schedule of negative values.""" + # Get schedule function. + schedule_fn = self.variant( + schedule.piecewise_constant_schedule(-0.1, {3: 2., 6: 0.5})) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(10): + # Compute next value. + generated_vals.append(schedule_fn(count)) + # Test output. + expected_vals = -1 * np.array( + [0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + +class ExponentialTest(chex.TestCase): + + @chex.all_variants + @parameterized.parameters(False, True) + def test_constant_schedule(self, staircase): + """Checks constant schedule for exponential decay schedule.""" + num_steps = 15 + # Get schedule function. + init_value = 1. + schedule_fn = self.variant( + schedule.exponential_decay( + init_value=init_value, transition_steps=num_steps, + decay_rate=1., staircase=staircase)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(num_steps): + generated_vals.append(schedule_fn(count)) + expected_vals = np.array([init_value] * num_steps, dtype=np.float32) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + @chex.all_variants + @parameterized.parameters(False, True) + def test_nonvalid_transition_steps(self, staircase): + """Checks nonvalid decay steps results in a constant schedule.""" + init_value = 1. + for transition_steps in [-1, 0]: + schedule_fn = self.variant( + schedule.exponential_decay( + init_value=init_value, transition_steps=transition_steps, + decay_rate=1., staircase=staircase)) + for count in range(15): + np.testing.assert_allclose(schedule_fn(count), init_value) + + @chex.all_variants + @parameterized.parameters(False, True) + def test_nonvalid_decay_rate(self, staircase): + """Checks nonvalid decay steps results in a constant schedule.""" + init_value = 1. + schedule_fn = self.variant( + schedule.exponential_decay( + init_value=init_value, transition_steps=2, + decay_rate=0., staircase=staircase)) + for count in range(15): + np.testing.assert_allclose(schedule_fn(count), init_value) + + @chex.all_variants + @parameterized.parameters((False, 0), (True, 0), (False, 5), (True, 5)) + def test_exponential(self, staircase, transition_begin): + """Checks non-linear (quadratic) schedule.""" + # Get schedule function. + init_value = 1. + num_steps = 15 + transition_steps = 2 + decay_rate = 2. + schedule_fn = self.variant( + schedule.exponential_decay( + init_value=init_value, transition_steps=transition_steps, + decay_rate=decay_rate, transition_begin=transition_begin, + staircase=staircase)) + + # Test that generated values equal the expected schedule values. + def _staircased(count): + p = count / transition_steps + if staircase: + p = np.floor(p) + return p + + generated_vals = [] + for count in range(num_steps + transition_begin): + generated_vals.append(schedule_fn(count)) + expected_vals = np.array( + [init_value] * transition_begin + [ + init_value * np.power(decay_rate, _staircased(count)) + for count in range(num_steps) + ], + dtype=np.float32) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + @chex.all_variants + @parameterized.parameters( + (0.2, 0.1, False), (1.0, 0.1, False), (2.0, 3.0, False), + (0.2, 0.1, True), (1.0, 0.1, True), (2.0, 3.0, True)) + def test_end_value_with_staircase(self, decay_rate, end_value, staircase): + # Get schedule function. + init_value = 1. + num_steps = 11 + transition_steps = 2 + transition_begin = 3 + schedule_fn = self.variant( + schedule.exponential_decay( + init_value=init_value, transition_steps=transition_steps, + decay_rate=decay_rate, transition_begin=transition_begin, + staircase=staircase, end_value=end_value)) + + # Test that generated values equal the expected schedule values. + def _staircased(count): + p = count / transition_steps + if staircase: + p = np.floor(p) + return p + + generated_vals = [] + for count in range(num_steps + transition_begin): + generated_vals.append(schedule_fn(count)) + expected_vals = np.array( + [init_value] * transition_begin + [ + init_value * np.power(decay_rate, _staircased(count)) + for count in range(num_steps) + ], + dtype=np.float32) + + if decay_rate < 1.0: + expected_vals = np.maximum(expected_vals, end_value) + else: + expected_vals = np.minimum(expected_vals, end_value) + + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + @chex.all_variants + def test_immutable_count(self): + """Checks constant schedule for exponential decay schedule.""" + num_steps = 5 + # Get schedule function. + init_value = 32. + schedule_fn = self.variant( + schedule.exponential_decay( + init_value=init_value, transition_steps=1, + decay_rate=0.5)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(num_steps): + # Jax arrays are read-only in ChexVariantType.WITHOUT_DEVICE. + immutable_count = jnp.array(count, dtype=jnp.float32) + generated_vals.append(schedule_fn(immutable_count)) + expected_vals = np.array([32, 16, 8, 4, 2], dtype=np.float32) + np.testing.assert_allclose( + expected_vals, np.array(generated_vals), atol=1e-3) + + +class CosineDecayTest(chex.TestCase): + + @chex.all_variants + def test_decay_count_smaller_count(self): + """Check cosine schedule decay for the entire training schedule.""" + initial_value = 0.1 + schedule_fn = self.variant( + schedule.cosine_decay_schedule(initial_value, 10, 0.0)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(10): + # Compute next value. + generated_vals.append(schedule_fn(count)) + # Test output. + expected_multipliers = np.array( + 0.5 + 0.5 * np.cos( + np.pi * np.array( + [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]))) + np.testing.assert_allclose( + initial_value * expected_multipliers, + np.array(generated_vals), atol=1e-3) + + @chex.all_variants + def test_decay_count_greater_count(self): + """Check cosine schedule decay for a part of the training schedule.""" + initial_value = 0.1 + schedule_fn = self.variant( + schedule.cosine_decay_schedule(initial_value, 5, 0.0)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(12): + # Compute next value. + generated_vals.append(schedule_fn(count)) + + # Test output. + expected_multipliers = np.array( + 0.5 + 0.5 * np.cos( + np.pi * np.array( + [0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.]))) + np.testing.assert_allclose( + initial_value * expected_multipliers, + np.array(generated_vals), atol=1e-3) + + @chex.all_variants + def test_decay_count_greater_count_with_alpha(self): + """Check cosine schedule decay for a part of the training schedule.""" + # Get schedule function. + initial_value = 0.1 + schedule_fn = self.variant( + schedule.cosine_decay_schedule(initial_value, 5, 0.1)) + # Test that generated values equal the expected schedule values. + generated_vals = [] + for count in range(12): + # Compute next value. + generated_vals.append(schedule_fn(count)) + + # Test output. + expected_multipliers = np.array( + 0.5 + 0.5 * np.cos( + np.pi * np.array( + [0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.]))) + expected_multipliers = 0.9 * expected_multipliers + 0.1 + np.testing.assert_allclose( + initial_value * expected_multipliers, + np.array(generated_vals), atol=1e-3) + + +class WarmupCosineDecayTest(chex.TestCase): + + @chex.all_variants + @parameterized.named_parameters( + ('with end value', 10, 0.5, 1e-4), + ('without end value', 5, 3, 0.),) + def test_limits(self, init_value, peak_value, end_value): + """Check cosine schedule decay for the entire training schedule.""" + schedule_fn = self.variant(schedule.warmup_cosine_decay_schedule( + init_value=init_value, + peak_value=peak_value, + warmup_steps=100, + decay_steps=1000, + end_value=end_value, + )) + + np.testing.assert_allclose(init_value, schedule_fn(0)) + np.testing.assert_allclose(peak_value, schedule_fn(100)) + np.testing.assert_allclose(end_value, schedule_fn(1000), rtol=1e-3) + + +class SGDRTest(chex.TestCase): + + @chex.all_variants + @parameterized.named_parameters( + ('with step decay', 1.6, 0.8, 0.4), + ('without step_decay', 1.6, 1.6, 1.6),) + def test_limits(self, lr0, lr1, lr2): + """Check cosine schedule decay for the entire training schedule.""" + lr_kwargs = [] + for step, lr in zip([2e3, 3e3, 5e3], [lr0, lr1, lr2]): + lr_kwargs += [dict(decay_steps=int(step), peak_value=lr, + init_value=0, end_value=0.0, warmup_steps=500)] + schedule_fn = self.variant(schedule.sgdr_schedule(lr_kwargs)) + np.testing.assert_allclose(lr0, schedule_fn(500)) + np.testing.assert_allclose(lr1, schedule_fn(2500)) + np.testing.assert_allclose(lr2, schedule_fn(5500)) + + +class PiecewiseInterpolateTest(chex.TestCase): + + @chex.all_variants + def test_linear_piecewise(self): + schedule_fn = self.variant(schedule.piecewise_interpolate_schedule( + 'linear', 200., {5: 1.5, 10: 0.25})) + generated_vals = [schedule_fn(step) for step in range(13)] + expected_vals = [200., 220., 240., 260., 280., 300., 255., 210., 165., + 120., 75., 75., 75.] + np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3) + + @chex.all_variants + def test_cos_piecewise(self): + schedule_fn = self.variant(schedule.piecewise_interpolate_schedule( + 'cosine', 400., {5: 1.2, 3: 0.6, 7: 1.})) + generated_vals = [schedule_fn(step) for step in range(9)] + expected_vals = [400., 360., 280., 240., 264., 288., 288., 288., 288.] + np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3) + + @chex.all_variants + def test_empty_dict(self): + schedule_fn = self.variant(schedule.piecewise_interpolate_schedule( + 'linear', 13., {})) + generated_vals = [schedule_fn(step) for step in range(5)] + expected_vals = [13., 13., 13., 13., 13.] + np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3) + + @chex.all_variants + def test_no_dict(self): + schedule_fn = self.variant(schedule.piecewise_interpolate_schedule( + 'cosine', 17.)) + generated_vals = [schedule_fn(step) for step in range(3)] + expected_vals = [17., 17., 17.] + np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3) + + def test_invalid_type(self): + # pytype: disable=wrong-arg-types + with self.assertRaises(ValueError): + schedule.piecewise_interpolate_schedule('linar', 13.) + with self.assertRaises(ValueError): + schedule.piecewise_interpolate_schedule('', 13., {5: 3.}) + with self.assertRaises(ValueError): + schedule.piecewise_interpolate_schedule(None, 13., {}) + # pytype: enable=wrong-arg-types + + def test_invalid_scale(self): + with self.assertRaises(ValueError): + schedule.piecewise_interpolate_schedule('linear', 13., {5: -3}) + + +class OneCycleTest(chex.TestCase): + + @chex.all_variants + def test_linear(self): + schedule_fn = self.variant(schedule.linear_onecycle_schedule( + transition_steps=10, + peak_value=1000, + pct_start=0.3, + pct_final=0.7, + div_factor=10., + final_div_factor=100.)) + + generated_vals = [schedule_fn(step) for step in range(12)] + expected_vals = [100., 400., 700., 1000., 775., 550., 325., 100., 67., + 34., 1., 1.] + np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3) + + @chex.all_variants + def test_cosine(self): + schedule_fn = self.variant(schedule.cosine_onecycle_schedule( + transition_steps=5, + peak_value=1000., + pct_start=0.4, + div_factor=10., + final_div_factor=100.)) + + generated_vals = [schedule_fn(step) for step in range(7)] + expected_vals = [100., 550., 1000., 750.25, 250.75, 1., 1.] + np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3) + + def test_nonpositive_transition_steps(self): + with self.assertRaises(ValueError): + schedule.cosine_onecycle_schedule(transition_steps=0, peak_value=5.) + with self.assertRaises(ValueError): + schedule.linear_onecycle_schedule(transition_steps=0, peak_value=5.) + + +class InjectHyperparamsTest(chex.TestCase): + """Tests for the inject_hyperparams wrapper.""" + + @chex.all_variants + def test_updates(self): + optim = schedule.inject_hyperparams(transform.scale)( # stateless + step_size=schedule.piecewise_constant_schedule( + 3.0, {1: 5, 7: 2, 12: 1.5})) + + params = [jnp.zeros([], dtype=jnp.float32)] + state = self.variant(optim.init)(params) + update_fn = self.variant(optim.update) + expected_step_size = [3.0]*2 + [15.0]*6 + [30.0]*5 + [45.0]*3 + + grads = [jnp.ones([], dtype=jnp.float32)] + for i in range(15): + updates, state = update_fn(grads, state, params=params) + np.testing.assert_almost_equal(updates[0], expected_step_size[i+1]) + + @chex.all_variants + def test_hyperparams_state(self): + optim = schedule.inject_hyperparams(transform.trace)( # stateful + decay=schedule.piecewise_constant_schedule( + 0.8, {3: 0.5, 9: 1.25}), + nesterov=True) + + params = [jnp.zeros([2, 3]) for _ in range(3)] + state = self.variant(optim.init)(params) + update_fn = self.variant(optim.update) + + expected_mom = [0.8]*4 + [0.4]*6 + [0.5]*2 + grads = jax.tree_util.tree_map(jnp.ones_like, params) + for i in range(12): + np.testing.assert_almost_equal(state.hyperparams['decay'], + expected_mom[i]) + _, state = update_fn(grads, state) + + np.testing.assert_almost_equal(state.hyperparams['decay'], + expected_mom[-1]) + + @chex.all_variants + def test_constant_hyperparams(self): + optim = schedule.inject_hyperparams(transform.scale_by_adam)(b1=0., b2=0.) + + params = [jnp.zeros([2, 3]) for _ in range(3)] + state = self.variant(optim.init)(params) + update_fn = self.variant(optim.update) + + grads = jax.tree_util.tree_map(jnp.ones_like, params) + for _ in range(5): + updates, state = update_fn(grads, state, params) + np.testing.assert_almost_equal(state.hyperparams['b1'], 0.0) + np.testing.assert_almost_equal(state.hyperparams['b2'], 0.0) + np.testing.assert_almost_equal(state.hyperparams['eps'], 1e-8) + np.testing.assert_almost_equal(state.hyperparams['eps_root'], 0.0) + assert 'eps' in state.hyperparams + chex.assert_tree_all_close(updates, grads) + + @chex.all_variants + def test_overriding_hyperparam(self): + optim = schedule.inject_hyperparams(clipping.clip_by_global_norm)(0.1) + params = jnp.zeros((3, 5, 7)) + state = self.variant(optim.init)(params) + update_fn = self.variant(optim.update) + + grads = jnp.ones_like(params) + for i in range(5): + state.hyperparams['max_norm'] = i + updates, state = update_fn(grads, state) + assert np.isclose(jnp.linalg.norm(updates.ravel()), i) + + @chex.all_variants + @parameterized.named_parameters(('string', 'mask'), ('list', ['mask'])) + def test_static_args(self, static_args): + @functools.partial(schedule.inject_hyperparams, static_args=static_args) + def custom_optim(learning_rate, mask): + return wrappers.masked(transform.scale(-learning_rate), mask) + + optim = custom_optim( + 0.1, functools.partial(jax.tree_util.tree_map, lambda x: x.ndim > 1)) + params = [jnp.ones((1, 2)), jnp.ones(2), jnp.ones((1, 1, 1))] + grads = params + state = self.variant(optim.init)(params) + updates, state = self.variant(optim.update)(grads, state) + expected_updates = jax.tree_util.tree_map( + lambda x: -0.1 * x if x.ndim > 1 else x, grads) + + assert set(state.hyperparams.keys()) == {'learning_rate'}, state.hyperparams + chex.assert_tree_all_close(updates, expected_updates) + + @chex.all_variants + @parameterized.named_parameters(('one_arg', 'b1'), ('two_arg', ['b1', 'b2'])) + def test_numeric_static_args(self, static_args): + optim = schedule.inject_hyperparams( + transform.scale_by_adam, static_args=static_args)(b1=0.9, b2=0.95) + + params = [jnp.ones((1, 2)), jnp.ones(2), jnp.ones((1, 1, 1))] + grads = params + state = self.variant(optim.init)(params) + _, state = self.variant(optim.update)(grads, state) + + assert not set(state.hyperparams.keys()).intersection(set(static_args)) + + @chex.all_variants + @parameterized.named_parameters( + ('bf16hyp f32param bf16grad', jnp.bfloat16, jnp.float32, jnp.bfloat16), + ('bf16hyp f32param f32_grads', jnp.bfloat16, jnp.float32, jnp.float32), + ('f32hyp bf16param bf16grad', jnp.float32, jnp.bfloat16, jnp.bfloat16), + ('f32hyp f32param bf16grad', jnp.float32, jnp.float32, jnp.bfloat16), + ('f32hyp bf16param f32grad', jnp.float32, jnp.bfloat16, jnp.float32), + ) + def test_hyperparam_dtypes(self, + hyperparam_dtype, + param_dtype, + grad_dtype): + """Tests that hyperparam dtype override works as desired.""" + optim = schedule.inject_hyperparams( + transform.scale_by_adam, + hyperparam_dtype=hyperparam_dtype)(b1=0.9, b2=0.95) + + params = [jnp.ones((1, 2), dtype=param_dtype), + jnp.ones(2, dtype=param_dtype), + jnp.ones((1, 1, 1), dtype=param_dtype)] + grads = jax.tree_map(lambda x: x.astype(grad_dtype), params) + state = self.variant(optim.init)(params) + # Check that the hyperparams are overriden + self.assertEqual(state.hyperparams['b1'].dtype, hyperparam_dtype) + self.assertEqual(state.hyperparams['b2'].dtype, hyperparam_dtype) + + _, state = self.variant(optim.update)(grads, state) + + self.assertEqual(state.hyperparams['b1'].dtype, hyperparam_dtype) + self.assertEqual(state.hyperparams['b2'].dtype, hyperparam_dtype) + + @parameterized.named_parameters(('string', 'lr'), ('list', ['lr'])) + def test_static_args_error(self, static_args): + with self.assertRaises(ValueError): + schedule.inject_hyperparams(transform.scale, static_args=static_args) + + @chex.all_variants + def test_inject_hyperparams_starts_with_step_count_zero(self): + """Checks that inject_hyperparams uses step count 0 in the first update.""" + # See also: https://github.com/deepmind/optax/issues/415. + opt = schedule.inject_hyperparams(transform.scale)(lambda count: count) + params = jnp.zeros(3) + grads = jnp.array([-1, 0, 1]) + updates, _ = self.variant(opt.update)(grads, opt.init(params)) + np.testing.assert_array_equal(updates, np.zeros(3)) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/second_order.py b/lib/python3.10/site-packages/optax/_src/second_order.py new file mode 100644 index 0000000000000000000000000000000000000000..6793dbc97af98094cf98613714669a317f733d97 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/second_order.py @@ -0,0 +1,111 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions for computing diagonals of Hessians & Fisher info of parameters. + +Computing the Hessian or Fisher information matrices for neural networks is +typically intractible due to the quadratic memory requirements. Solving for the +diagonals of these matrices is often a better solution. + +This module provides two functions for computing these diagonals, `hessian_diag` +and `fisher_diag`., each with sub-quadratic memory requirements. + +""" + +from typing import Any, Callable + +import jax +from jax.flatten_util import ravel_pytree +import jax.numpy as jnp + + +# This covers both Jax and Numpy arrays. +# TODO(b/160876114): use the pytypes defined in Chex. +Array = jnp.ndarray +# LossFun of type f(params, inputs, targets). +LossFun = Callable[[Any, Array, Array], Array] + + +def ravel(p: Any) -> Array: + return ravel_pytree(p)[0] + + +def hvp( + loss: LossFun, + v: jnp.DeviceArray, + params: Any, + inputs: jnp.DeviceArray, + targets: jnp.DeviceArray, +) -> jnp.DeviceArray: + """Performs an efficient vector-Hessian (of `loss`) product. + + Args: + loss: the loss function. + v: a vector of size `ravel(params)`. + params: model parameters. + inputs: inputs at which `loss` is evaluated. + targets: targets at which `loss` is evaluated. + + Returns: + An Array corresponding to the product of `v` and the Hessian of `loss` + evaluated at `(params, inputs, targets)`. + """ + _, unravel_fn = ravel_pytree(params) + loss_fn = lambda p: loss(p, inputs, targets) + return jax.jvp(jax.grad(loss_fn), [params], [unravel_fn(v)])[1] + + +def hessian_diag( + loss: LossFun, + params: Any, + inputs: jnp.DeviceArray, + targets: jnp.DeviceArray, +) -> jnp.DeviceArray: + """Computes the diagonal hessian of `loss` at (`inputs`, `targets`). + + Args: + loss: the loss function. + params: model parameters. + inputs: inputs at which `loss` is evaluated. + targets: targets at which `loss` is evaluated. + + Returns: + A DeviceArray corresponding to the product to the Hessian of `loss` + evaluated at `(params, inputs, targets)`. + """ + vs = jnp.eye(ravel(params).size) + comp = lambda v: jnp.vdot(v, ravel(hvp(loss, v, params, inputs, targets))) + return jax.vmap(comp)(vs) + + +def fisher_diag( + negative_log_likelihood: LossFun, + params: Any, + inputs: jnp.ndarray, + targets: jnp.ndarray, +) -> jnp.DeviceArray: + """Computes the diagonal of the (observed) Fisher information matrix. + + Args: + negative_log_likelihood: the negative log likelihood function. + params: model parameters. + inputs: inputs at which `negative_log_likelihood` is evaluated. + targets: targets at which `negative_log_likelihood` is evaluated. + + Returns: + An Array corresponding to the product to the Hessian of + `negative_log_likelihood` evaluated at `(params, inputs, targets)`. + """ + return jnp.square( + ravel(jax.grad(negative_log_likelihood)(params, inputs, targets))) diff --git a/lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators.py b/lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators.py new file mode 100644 index 0000000000000000000000000000000000000000..fd2ba605db0e1c3e704ee07cda31c1c8086085c9 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators.py @@ -0,0 +1,317 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Stochastic Monte Carlo gradient estimators. + +Utility functions to approximate gradients of the form using Monte Carlo +estimation: + \nabla_{\theta} E_{p(x; \theta)} f(x) + +Here f is assumed to have no dependence on the parameters theta - if f has +dependence on theta, the functions below need to be called with `stop_grad(f)` +and the chain rule needs to be applied outside these functions in order +to obtain unbiased gradient. + +For more details, see: +S. Mohamed, M. Rosca, M. Figurnov, A Mnih. + Monte Carlo Gradient Estimation in Machine Learning. JMLR, 2020. +""" + +import math +from typing import Any, Callable, Sequence + +import chex +import jax +import jax.numpy as jnp +import numpy as np +from optax._src import base +from optax._src import utils + + +def score_function_jacobians( + function: Callable[[chex.Array], float], + params: base.Params, + dist_builder: Callable[..., Any], + rng: chex.PRNGKey, + num_samples: int) -> Sequence[chex.Array]: + r"""Score function gradient estimation. + + Approximates: + \nabla_{\theta} E_{p(x; \theta)} f(x) + With: + E_{p(x; \theta)} f(x) \nabla_{\theta} \log p(x; \theta) + + Requires: p to be differentiable wrt to theta. Applicable to both continuous + and discrete random variables. No requirements on f. + + Args: + function: Function f(x) for which to estimate grads_{params} E_dist f(x). + The function takes in one argument (a sample from the distribution) and + returns a floating point value. + params: A tuple of jnp arrays. + The parameters for which to construct the distribution. + dist_builder: a constructor which builds a distribution given the input + parameters specified by params. `dist_builder(params)` should return a + valid distribution. + rng: a PRNGKey key. + num_samples: Int, the number of samples used to compute the grads. + + Returns: + A tuple of size `params`, each element is `num_samples x param.shape` + jacobian vector containing the estimates of the gradients obtained for + each sample. + The mean of this vector is the gradient wrt to parameters that can be used + for learning. The entire jacobian vector can be used to assess estimator + variance. + """ + def surrogate(params): + dist = dist_builder(*params) + one_sample_surrogate_fn = lambda x: function(x) * dist.log_prob(x) + samples = jax.lax.stop_gradient(dist.sample((num_samples,), seed=rng)) + # We vmap the function application over samples - this ensures that the + # function we use does not have to be vectorized itself. + return jax.vmap(one_sample_surrogate_fn)(samples) + + return jax.jacfwd(surrogate)(params) + + +def pathwise_jacobians( + function: Callable[[chex.Array], float], + params: base.Params, + dist_builder: Callable[..., Any], + rng: chex.PRNGKey, + num_samples: int) -> Sequence[chex.Array]: + r"""Pathwise gradient estimation. + + Approximates: + \nabla_{\theta} E_{p(x; \theta)} f(x) + With: + E_{p(\epsilon)} \nabla_{\theta} f(g(\epsilon, \theta)) + where x = g(\epsilon, \theta). g depends on the distribution p. + + Requires: p to be reparametrizable and the reparametrization to be implemented + in tensorflow_probability. Applicable to continuous random variables. + f needs to be differentiable. + + Args: + function: Function f(x) for which to estimate grads_{params} E_dist f(x). + The function takes in one argument (a sample from the distribution) and + returns a floating point value. + params: A tuple of jnp arrays. + The parameters for which to construct the distribution. + dist_builder: a constructor which builds a distribution given the input + parameters specified by params. `dist_builder(params)` should return a + valid distribution. + rng: a PRNGKey key. + num_samples: Int, the number of samples used to compute the grads. + + Returns: + A tuple of size `params`, each element is `num_samples x param.shape` + jacobian vector containing the estimates of the gradients obtained for + each sample. + The mean of this vector is the gradient wrt to parameters that can be used + for learning. The entire jacobian vector can be used to assess estimator + variance. + """ + def surrogate(params): + # We vmap the function application over samples - this ensures that the + # function we use does not have to be vectorized itself. + dist = dist_builder(*params) + return jax.vmap(function)(dist.sample((num_samples,), seed=rng)) + + return jax.jacfwd(surrogate)(params) + + +def measure_valued_jacobians( + function: Callable[[chex.Array], float], + params: base.Params, + dist_builder: Callable[..., Any], + rng: chex.PRNGKey, + num_samples: int, + coupling: bool = True) -> Sequence[chex.Array]: + r"""Measure valued gradient estimation. + + Approximates: + \nabla_{\theta} E_{p(x; \theta)} f(x) + With: + 1./ c (E_{p1(x; \theta)} f(x) - E_{p2(x; \theta)} f(x)) where p1 and p2 are + measures which depend on p. + + Currently only supports computing gradients of expectations of Gaussian RVs. + + Args: + function: Function f(x) for which to estimate grads_{params} E_dist f(x). + The function takes in one argument (a sample from the distribution) and + returns a floating point value. + params: A tuple of jnp arrays. + The parameters for which to construct the distribution. + dist_builder: a constructor which builds a distribution given the input + parameters specified by params. `dist_builder(params)` should return a + valid distribution. + rng: a PRNGKey key. + num_samples: Int, the number of samples used to compute the grads. + coupling: A boolean. Whether or not to use coupling for the positive and + negative samples. Recommended: True, as this reduces variance. + + Returns: + A tuple of size `params`, each element is `num_samples x param.shape` + jacobian vector containing the estimates of the gradients obtained for + each sample. + The mean of this vector is the gradient wrt to parameters that can be used + for learning. The entire jacobian vector can be used to assess estimator + variance. + """ + if dist_builder is not utils.multi_normal: + raise ValueError( + 'Unsupported distribution builder for measure_valued_jacobians!') + dist = dist_builder(*params) + # Need to apply chain rule for log scale grad (instead of scale grad). + return [ + measure_valued_estimation_mean( + function, dist, rng, num_samples, coupling=coupling), + jnp.exp(dist.log_scale) * measure_valued_estimation_std( + function, dist, rng, num_samples, coupling=coupling)] + + +def measure_valued_estimation_mean( + function: Callable[[chex.Array], float], + dist: Any, + rng: chex.PRNGKey, + num_samples: int, + coupling: bool = True) -> chex.Array: + """Measure valued grads of a Gaussian expectation of `function` wrt the mean. + + Args: + function: Function f(x) for which to estimate grads_{mean} E_dist f(x). + The function takes in one argument (a sample from the distribution) and + returns a floating point value. + dist: a distribution on which we can call `sample`. + rng: a PRNGKey key. + num_samples: Int, the number of samples used to compute the grads. + coupling: A boolean. Whether or not to use coupling for the positive and + negative samples. Recommended: True, as this reduces variance. + + Returns: + A `num_samples x D` vector containing the estimates of the gradients + obtained for each sample. The mean of this vector can be used to update + the mean parameter. The entire vector can be used to assess estimator + variance. + """ + mean, log_std = dist.params + std = jnp.exp(log_std) + + dist_samples = dist.sample((num_samples,), seed=rng) + + pos_rng, neg_rng = jax.random.split(rng) + pos_sample = jax.random.weibull_min( + pos_rng, scale=math.sqrt(2.), concentration=2., shape=dist_samples.shape) + + if coupling: + neg_sample = pos_sample + else: + neg_sample = jax.random.weibull_min( + neg_rng, + scale=math.sqrt(2.), + concentration=2., + shape=dist_samples.shape) + + # N x D + positive_diag = mean + std * pos_sample + # N x D + negative_diag = mean - std * neg_sample + + # NOTE: you can sample base samples here if you use the same rng + # Duplicate the D dimension - N x D x D. + base_dist_samples = utils.tile_second_to_last_dim(dist_samples) + positive = utils.set_diags(base_dist_samples, positive_diag) + negative = utils.set_diags(base_dist_samples, negative_diag) + + c = np.sqrt(2 * np.pi) * std # D + # Apply function. We apply the function to each element of N x D x D. + # We apply a function that takes a sample and returns one number, so the + # output will be N x D (which is what we want, batch by dimension). + # We apply a function in parallel to the batch. + # Broadcast the division. + vmaped_function = jax.vmap(jax.vmap(function, 1, 0)) + grads = (vmaped_function(positive) - vmaped_function(negative)) / c + + chex.assert_shape(grads, (num_samples,) + std.shape) + return grads + + +def measure_valued_estimation_std( + function: Callable[[chex.Array], float], + dist: Any, + rng: chex.PRNGKey, + num_samples: int, + coupling: bool = True) -> chex.Array: + """Measure valued grads of a Gaussian expectation of `function` wrt the std. + + Args: + function: Function f(x) for which to estimate grads_{std} E_dist f(x). + The function takes in one argument (a sample from the distribution) and + returns a floating point value. + dist: a distribution on which we can call `sample`. + rng: a PRNGKey key. + num_samples: Int, the number of samples used to compute the grads. + coupling: A boolean. Whether or not to use coupling for the positive and + negative samples. Recommended: True, as this reduces variance. + + Returns: + A `num_samples x D` vector containing the estimates of the gradients + obtained for each sample. The mean of this vector can be used to update + the scale parameter. The entire vector can be used to assess estimator + variance. + """ + mean, log_std = dist.params + std = jnp.exp(log_std) + + dist_samples = dist.sample((num_samples,), seed=rng) + + pos_rng, neg_rng = jax.random.split(rng) + + # The only difference between mean and std gradients is what we sample. + pos_sample = jax.random.double_sided_maxwell( + pos_rng, loc=0.0, scale=1.0, shape=dist_samples.shape) + if coupling: + unif_rvs = jax.random.uniform(neg_rng, dist_samples.shape) + neg_sample = unif_rvs * pos_sample + else: + neg_sample = jax.random.normal(neg_rng, dist_samples.shape) + + # Both need to be positive in the case of the scale. + # N x D + positive_diag = mean + std * pos_sample + # N x D + negative_diag = mean + std * neg_sample + + # NOTE: you can sample base samples here if you use the same rng + # Duplicate the D dimension - N x D x D. + base_dist_samples = utils.tile_second_to_last_dim(dist_samples) + positive = utils.set_diags(base_dist_samples, positive_diag) + negative = utils.set_diags(base_dist_samples, negative_diag) + + # Different C for the scale + c = std # D + # Apply function. We apply the function to each element of N x D x D. + # We apply a function that takes a sample and returns one number, so the + # output will be N x D (which is what we want, batch by dimension). + # We apply a function in parallel to the batch. + # Broadcast the division. + vmaped_function = jax.vmap(jax.vmap(function, 1, 0)) + grads = (vmaped_function(positive) - vmaped_function(negative)) / c + + chex.assert_shape(grads, (num_samples,) + std.shape) + return grads + diff --git a/lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators_test.py b/lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b3a4e36340f5048d3cfdd68699a8015171879319 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/stochastic_gradient_estimators_test.py @@ -0,0 +1,371 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `stochastic_gradient_estimators.py`.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +import jax.numpy as jnp +import numpy as np + +from optax._src import stochastic_gradient_estimators as sge +from optax._src import utils + + +# Set seed for deterministic sampling. +np.random.seed(42) + + +_estimator_to_num_samples = { + sge.score_function_jacobians: 5 * 10**5, + sge.measure_valued_jacobians: 10**5, + sge.pathwise_jacobians: 5 * 10**4, +} + +_weighted_estimator_to_num_samples = { + sge.score_function_jacobians: 5 * 10**6, + sge.measure_valued_jacobians: 5 * 10**5, + sge.pathwise_jacobians: 5 * 10**4, +} + + +def _ones(dims): + return jnp.ones(shape=(dims), dtype=jnp.float32) + + +def _assert_equal(actual, expected, rtol=1e-2, atol=1e-2): + """Asserts that arrays are equal.""" + # Note: assert_allclose does not check shapes + chex.assert_equal_shape((actual, expected)) + + # We get around the bug https://github.com/numpy/numpy/issues/13801 + zero_indices = np.argwhere(expected == 0) + if not np.all(np.abs(actual[zero_indices]) <= atol): + raise AssertionError(f'Larger than {atol} diff in {actual[zero_indices]}') + + non_zero_indices = np.argwhere(expected != 0) + np.testing.assert_allclose( + np.asarray(actual)[non_zero_indices], + expected[non_zero_indices], rtol, atol) + + +def _estimator_variant(variant, estimator): + return variant(estimator, static_argnums=(0, 2, 4)) + + +def _measure_valued_variant(variant): + return variant( + sge.measure_valued_jacobians, + static_argnums=(0, 2, 4, 5)) + + +class GradientEstimatorsTest(chex.TestCase): + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', sge.score_function_jacobians), + ('_pathwise_jacobians', sge.pathwise_jacobians), + ('_measure_valued_jacobians', sge.measure_valued_jacobians), + ], [ + ('0.1', 0.1), + ('0.5', 0.5), + ('0.9', 0.9), + ], + named=True)) + def testConstantFunction(self, estimator, constant): + data_dims = 3 + num_samples = _estimator_to_num_samples[estimator] + + effective_mean = 1.5 + mean = effective_mean * _ones(data_dims) + + effective_log_scale = 0.0 + log_scale = effective_log_scale * _ones(data_dims) + rng = jax.random.PRNGKey(1) + + jacobians = _estimator_variant(self.variant, estimator)( + lambda x: jnp.array(constant), [mean, log_scale], + utils.multi_normal, rng, num_samples) + + # Average over the number of samples. + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads = np.mean(mean_jacobians, axis=0) + expected_mean_grads = np.zeros(data_dims, dtype=np.float32) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads = np.mean(log_scale_jacobians, axis=0) + expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32) + + _assert_equal(mean_grads, expected_mean_grads, atol=5e-3) + _assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-3) + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', sge.score_function_jacobians), + ('_pathwise_jacobians', sge.pathwise_jacobians), + ('_measure_valued_jacobians', sge.measure_valued_jacobians), + ], [ + ('0.5_-1.', 0.5, -1.), + ('0.7_0.0)', 0.7, 0.0), + ('0.8_0.1', 0.8, 0.1), + ], + named=True)) + def testLinearFunction(self, estimator, effective_mean, effective_log_scale): + data_dims = 3 + num_samples = _estimator_to_num_samples[estimator] + rng = jax.random.PRNGKey(1) + + mean = effective_mean * _ones(data_dims) + log_scale = effective_log_scale * _ones(data_dims) + + jacobians = _estimator_variant(self.variant, estimator)( + np.sum, [mean, log_scale], + utils.multi_normal, rng, num_samples) + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads = np.mean(mean_jacobians, axis=0) + expected_mean_grads = np.ones(data_dims, dtype=np.float32) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads = np.mean(log_scale_jacobians, axis=0) + expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32) + + _assert_equal(mean_grads, expected_mean_grads) + _assert_equal(log_scale_grads, expected_log_scale_grads) + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', sge.score_function_jacobians), + ('_pathwise_jacobians', sge.pathwise_jacobians), + ('_measure_valued_jacobians', sge.measure_valued_jacobians), + ], [ + ('1.0_0.3', 1.0, 0.3), + ], + named=True)) + def testQuadraticFunction( + self, estimator, effective_mean, effective_log_scale): + data_dims = 3 + num_samples = _estimator_to_num_samples[estimator] + rng = jax.random.PRNGKey(1) + + mean = effective_mean * _ones(data_dims) + log_scale = effective_log_scale * _ones(data_dims) + + jacobians = _estimator_variant(self.variant, estimator)( + lambda x: np.sum(x**2) / 2, [mean, log_scale], + utils.multi_normal, rng, num_samples) + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads = np.mean(mean_jacobians, axis=0) + expected_mean_grads = effective_mean * np.ones( + data_dims, dtype=np.float32) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads = np.mean(log_scale_jacobians, axis=0) + expected_log_scale_grads = np.exp(2 * effective_log_scale) * np.ones( + data_dims, dtype=np.float32) + + _assert_equal(mean_grads, expected_mean_grads, atol=5e-2) + _assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-2) + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', sge.score_function_jacobians), + ('_pathwise_jacobians', sge.pathwise_jacobians), + ('_measure_valued_jacobians', sge.measure_valued_jacobians), + ], [ + ('case_1', [1.0, 2.0, 3.], [-1., 0.3, -2.], [1., 1., 1.]), + ('case_2', [1.0, 2.0, 3.], [-1., 0.3, -2.], [4., 2., 3.]), + ('case_3', [1.0, 2.0, 3.], [0.1, 0.2, 0.1], [10., 5., 1.]), + ], + named=True)) + def testWeightedLinear( + self, estimator, effective_mean, effective_log_scale, weights): + num_samples = _weighted_estimator_to_num_samples[estimator] + rng = jax.random.PRNGKey(1) + + mean = jnp.array(effective_mean) + log_scale = jnp.array(effective_log_scale) + weights = jnp.array(weights) + + data_dims = len(effective_mean) + + function = lambda x: jnp.sum(x * weights) + jacobians = _estimator_variant(self.variant, estimator)( + function, [mean, log_scale], + utils.multi_normal, rng, num_samples) + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads = np.mean(mean_jacobians, axis=0) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads = np.mean(log_scale_jacobians, axis=0) + + expected_mean_grads = weights + expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32) + + _assert_equal(mean_grads, expected_mean_grads, atol=5e-2) + _assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-2) + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product([ + ('_score_function_jacobians', sge.score_function_jacobians), + ('_pathwise_jacobians', sge.pathwise_jacobians), + ('_measure_valued_jacobians', sge.measure_valued_jacobians), + ], [ + ('case_1', [1.0, 2.0, 3.], [-1., 0.3, -2.], [1., 1., 1.]), + ('case_2', [1.0, 2.0, 3.], [-1., 0.3, -2.], [4., 2., 3.]), + ('case_3', [1.0, 2.0, 3.], [0.1, 0.2, 0.1], [3., 5., 1.]), + ], + named=True)) + def testWeightedQuadratic( + self, estimator, effective_mean, effective_log_scale, weights): + num_samples = _weighted_estimator_to_num_samples[estimator] + rng = jax.random.PRNGKey(1) + + mean = jnp.array(effective_mean, dtype=jnp.float32) + log_scale = jnp.array(effective_log_scale, dtype=jnp.float32) + weights = jnp.array(weights, dtype=jnp.float32) + + data_dims = len(effective_mean) + + function = lambda x: jnp.sum(x * weights) ** 2 + jacobians = _estimator_variant(self.variant, estimator)( + function, [mean, log_scale], + utils.multi_normal, rng, num_samples) + + mean_jacobians = jacobians[0] + chex.assert_shape(mean_jacobians, (num_samples, data_dims)) + mean_grads = np.mean(mean_jacobians, axis=0) + + log_scale_jacobians = jacobians[1] + chex.assert_shape(log_scale_jacobians, (num_samples, data_dims)) + log_scale_grads = np.mean(log_scale_jacobians, axis=0) + + expected_mean_grads = 2 * weights * np.sum(weights * mean) + effective_scale = np.exp(log_scale) + expected_scale_grads = 2 * weights ** 2 * effective_scale + expected_log_scale_grads = expected_scale_grads * effective_scale + + _assert_equal(mean_grads, expected_mean_grads, atol=1e-1, rtol=1e-1) + _assert_equal( + log_scale_grads, expected_log_scale_grads, atol=1e-1, rtol=1e-1) + + @chex.all_variants + @parameterized.named_parameters( + chex.params_product( + [ + ('_sum_cos_x', [1.0], [1.0], lambda x: jnp.sum(jnp.cos(x))), + # Need to ensure that the mean is not too close to 0. + ('_sum_log_x', [10.0], [0.0], lambda x: jnp.sum(jnp.log(x))), + ('_sum_cos_2x', [1.0, 2.0], [1.0, -2 + ], lambda x: jnp.sum(jnp.cos(2 * x))), + ('_cos_sum_2x', [1.0, 2.0], [1.0, -2 + ], lambda x: jnp.cos(jnp.sum(2 * x))), + ], + [ + ('coupling', True), + ('nocoupling', False), + ], + named=True)) + def testNonPolynomialFunctionConsistencyWithPathwise(self, effective_mean, + effective_log_scale, + function, coupling): + num_samples = 10**5 + rng = jax.random.PRNGKey(1) + measure_rng, pathwise_rng = jax.random.split(rng) + + mean = jnp.array(effective_mean, dtype=jnp.float32) + log_scale = jnp.array(effective_log_scale, dtype=jnp.float32) + data_dims = len(effective_mean) + + measure_valued_jacobians = _measure_valued_variant(self.variant)( + function, [mean, log_scale], + utils.multi_normal, measure_rng, num_samples, coupling) + + measure_valued_mean_jacobians = measure_valued_jacobians[0] + chex.assert_shape(measure_valued_mean_jacobians, (num_samples, data_dims)) + measure_valued_mean_grads = np.mean(measure_valued_mean_jacobians, axis=0) + + measure_valued_log_scale_jacobians = measure_valued_jacobians[1] + chex.assert_shape( + measure_valued_log_scale_jacobians, (num_samples, data_dims)) + measure_valued_log_scale_grads = np.mean( + measure_valued_log_scale_jacobians, axis=0) + + pathwise_jacobians = _estimator_variant( + self.variant, sge.pathwise_jacobians)(function, [mean, log_scale], + utils.multi_normal, pathwise_rng, + num_samples) + + pathwise_mean_jacobians = pathwise_jacobians[0] + chex.assert_shape(pathwise_mean_jacobians, (num_samples, data_dims)) + pathwise_mean_grads = np.mean(pathwise_mean_jacobians, axis=0) + + pathwise_log_scale_jacobians = pathwise_jacobians[1] + chex.assert_shape(pathwise_log_scale_jacobians, (num_samples, data_dims)) + pathwise_log_scale_grads = np.mean(pathwise_log_scale_jacobians, axis=0) + + _assert_equal( + pathwise_mean_grads, measure_valued_mean_grads, rtol=5e-1, atol=1e-1) + _assert_equal( + pathwise_log_scale_grads, measure_valued_log_scale_grads, + rtol=5e-1, atol=1e-1) + + +class MeasuredValuedEstimatorsTest(chex.TestCase): + + @chex.all_variants + @parameterized.parameters([True, False]) + def testRaisesErrorForNonGaussian(self, coupling): + num_samples = 10**5 + rng = jax.random.PRNGKey(1) + + function = lambda x: jnp.sum(x) ** 2 + + mean = jnp.array(0, dtype=jnp.float32) + log_scale = jnp.array(0., dtype=jnp.float32) + + class TestDist(): + + def __init__(self, params): + self._params = params + + def sample(self, n): + return np.zeros(n) + + with self.assertRaises(ValueError): + _measure_valued_variant(self.variant)( + function, [mean, log_scale], + TestDist, rng, num_samples, coupling) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/test_utils.py b/lib/python3.10/site-packages/optax/_src/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5a18c78c80cc42ddd6a2476957053dc043cc96a8 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/test_utils.py @@ -0,0 +1,42 @@ +# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing utilities for Optax.""" + +import inspect +import types +from typing import Sequence, Tuple + + +def find_internal_python_modules( + root_module: types.ModuleType, +) -> Sequence[Tuple[str, types.ModuleType]]: + """Returns `(name, module)` for all Optax submodules under `root_module`.""" + modules = set([(root_module.__name__, root_module)]) + visited = set() + to_visit = [root_module] + + while to_visit: + mod = to_visit.pop() + visited.add(mod) + + for name in dir(mod): + obj = getattr(mod, name) + if inspect.ismodule(obj) and obj not in visited: + if obj.__name__.startswith('optax'): + if '_src' not in obj.__name__: + to_visit.append(obj) + modules.add((obj.__name__, obj)) + + return sorted(modules) diff --git a/lib/python3.10/site-packages/optax/_src/transform.py b/lib/python3.10/site-packages/optax/_src/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2b210524a6de3825a0759e91f45c6bd2bc9a0b --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/transform.py @@ -0,0 +1,1143 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gradient transformations.""" + +import functools +from typing import Any, Callable, NamedTuple, Optional, Union + +import chex +import jax +import jax.numpy as jnp + +from optax._src import base +from optax._src import clipping +from optax._src import numerics +from optax._src import utils +from optax._src import wrappers + +# pylint:disable=no-value-for-parameter + +_abs_sq = numerics.abs_sq + + +class TraceState(NamedTuple): + """Holds an aggregation of past updates.""" + trace: base.Params + + +def trace( + decay: float, + nesterov: bool = False, + accumulator_dtype: Optional[Any] = None, +) -> base.GradientTransformation: + """Compute a trace of past updates. + + Note: `trace` and `ema` have very similar but distinct updates; + `trace = decay * trace + t`, while `ema = decay * ema + (1-decay) * t`. + Both are frequently found in the optimization literature. + + Args: + decay: Decay rate for the trace of past updates. + nesterov: Whether to use Nesterov momentum. + accumulator_dtype: Optional `dtype` to be used for the accumulator; if + `None` then the `dtype` is inferred from `params` and `updates`. + + Returns: + A `GradientTransformation` object. + """ + + accumulator_dtype = utils.canonicalize_dtype(accumulator_dtype) + + def init_fn(params): + return TraceState( + trace=jax.tree_util.tree_map( + lambda t: jnp.zeros_like(t, dtype=accumulator_dtype), params)) + + def update_fn(updates, state, params=None): + del params + f = lambda g, t: g + decay * t + new_trace = jax.tree_util.tree_map(f, updates, state.trace) + updates = ( + jax.tree_util.tree_map(f, updates, new_trace) if nesterov + else new_trace) + new_trace = utils.cast_tree(new_trace, accumulator_dtype) + return updates, TraceState(trace=new_trace) + + return base.GradientTransformation(init_fn, update_fn) + + +def update_moment(updates, moments, decay, order): + """Compute the exponential moving average of the `order`-th moment.""" + return jax.tree_util.tree_map( + lambda g, t: (1 - decay) * (g ** order) + decay * t, updates, moments) + + +def update_infinity_moment(updates, moments, decay, eps): + """Compute the exponential moving average of the infinity norm.""" + return jax.tree_util.tree_map( + lambda g, t: jnp.maximum(jnp.abs(g) + eps, decay * t), updates, moments) + + +def update_moment_per_elem_norm(updates, moments, decay, order): + """Compute the EMA of the `order`-th moment of the element-wise norm.""" + + def orderth_norm(g): + if jnp.isrealobj(g): + return g ** order + else: + half_order = order / 2 + # JAX generates different HLO for int and float `order` + if half_order.is_integer(): + half_order = int(half_order) + return _abs_sq(g) ** half_order + + return jax.tree_util.tree_map( + lambda g, t: (1 - decay) * orderth_norm(g) + decay * t, updates, moments) + + +@functools.partial(jax.jit, inline=True) +def bias_correction(moment, decay, count): + """Performs bias correction. It becomes a no-op as count goes to infinity.""" + # The conversion to the data type of the moment ensures that bfloat16 remains + # bfloat16 in the optimizer state. This conversion has to be done after + # `bias_correction_` is calculated as calculating `decay**count` in low + # precision can result in it being rounded to 1 and subsequently a + # "division by zero" error. + bias_correction_ = 1 - decay**count + + # Perform division in the original precision. + return jax.tree_util.tree_map( + lambda t: t / bias_correction_.astype(t.dtype), moment) + + +def _reject_complex(params): + if any(jnp.iscomplexobj(x) for x in jax.tree_util.tree_leaves(params)): + raise ValueError('This transformation does not support complex parameters.') + + +class EmaState(NamedTuple): + """Holds an exponential moving average of past updates.""" + count: chex.Array # shape=(), dtype=jnp.int32. + ema: base.Params + + +def ema( + decay: float, + debias: bool = True, + accumulator_dtype: Optional[Any] = None +) -> base.GradientTransformation: + """Compute an exponential moving average of past updates. + + Note: `trace` and `ema` have very similar but distinct updates; + `ema = decay * ema + (1-decay) * t`, while `trace = decay * trace + t`. + Both are frequently found in the optimization literature. + + Args: + decay: Decay rate for the exponential moving average. + debias: Whether to debias the transformed gradient. + accumulator_dtype: Optional `dtype` to used for the accumulator; if `None` + then the `dtype` is inferred from `params` and `updates`. + + Returns: + A `GradientTransformation` object. + """ + + accumulator_dtype = utils.canonicalize_dtype(accumulator_dtype) + + def init_fn(params): + return EmaState( + count=jnp.zeros([], jnp.int32), + ema=jax.tree_util.tree_map( + lambda t: jnp.zeros_like(t, dtype=accumulator_dtype), params)) + + def update_fn(updates, state, params=None): + del params + updates = new_ema = update_moment(updates, state.ema, decay, order=1) + count_inc = utils.safe_int32_increment(state.count) + if debias: + updates = bias_correction(new_ema, decay, count_inc) + state_ema = utils.cast_tree(new_ema, accumulator_dtype) + return updates, EmaState(count=count_inc, ema=state_ema) + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByRssState(NamedTuple): + """State holding the sum of gradient squares to date.""" + sum_of_squares: base.Updates + + +def scale_by_rss( + initial_accumulator_value: float = 0.1, + eps: float = 1e-7 +) -> base.GradientTransformation: + """Rescale updates by the root of the sum of all squared gradients to date. + + References: + [Duchi et al, 2011](https://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) + [McMahan et al., 2010](https://arxiv.org/abs/1002.4908) + + Args: + initial_accumulator_value: Starting value for accumulators, must be >= 0. + eps: A small floating point value to avoid zero denominator. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + sum_of_squares = jax.tree_util.tree_map( + lambda t: jnp.full_like(t, initial_accumulator_value), params) + return ScaleByRssState(sum_of_squares=sum_of_squares) + + def update_fn(updates, state, params=None): + del params + sum_of_squares = jax.tree_util.tree_map( + lambda g, t: _abs_sq(g) + t, updates, state.sum_of_squares) + inv_sqrt_g_square = jax.tree_util.tree_map( + lambda t: jnp.where(t > 0, jax.lax.rsqrt(t + eps), 0.0), sum_of_squares) + updates = jax.tree_util.tree_map( + lambda scale, g: scale * g, inv_sqrt_g_square, updates) + return updates, ScaleByRssState(sum_of_squares=sum_of_squares) + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByRmsState(NamedTuple): + """State for exponential root mean-squared (RMS)-normalized updates.""" + nu: base.Updates + + +def scale_by_rms( + decay: float = 0.9, + eps: float = 1e-8, + initial_scale: float = 0. +) -> base.GradientTransformation: + """Rescale updates by the root of the exp. moving avg of the square. + + References: + [Hinton](www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) + + Args: + decay: Decay rate for the exponentially weighted average of squared grads. + eps: Term added to the denominator to improve numerical stability. + initial_scale: Initial value for second moment. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + nu = jax.tree_util.tree_map( + lambda n: jnp.full_like(n, initial_scale), params) # second moment + return ScaleByRmsState(nu=nu) + + def update_fn(updates, state, params=None): + del params + nu = update_moment_per_elem_norm(updates, state.nu, decay, 2) + updates = jax.tree_util.tree_map( + lambda g, n: g * jax.lax.rsqrt(n + eps), updates, nu) + return updates, ScaleByRmsState(nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByRStdDevState(NamedTuple): + """State for centered exponential moving average of squares of updates.""" + mu: base.Updates + nu: base.Updates + + +def scale_by_stddev( + decay: float = 0.9, + eps: float = 1e-8, + initial_scale: float = 0. +) -> base.GradientTransformation: + """Rescale updates by the root of the centered exp. moving average of squares. + + References: + [Hinton](www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) + + Args: + decay: Decay rate for the exponentially weighted average of squared grads. + eps: Term added to the denominator to improve numerical stability. + initial_scale: Initial value for second moment. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + mu = jax.tree_util.tree_map(jnp.zeros_like, params) # First moment + nu = jax.tree_util.tree_map( + lambda n: jnp.full_like(n, initial_scale), params) # second moment + return ScaleByRStdDevState(mu=mu, nu=nu) + + def update_fn(updates, state, params=None): + del params + mu = update_moment(updates, state.mu, decay, 1) + nu = update_moment_per_elem_norm(updates, state.nu, decay, 2) + updates = jax.tree_util.tree_map( + lambda g, m, n: g * jax.lax.rsqrt(n - _abs_sq(m) + eps), + updates, mu, nu) + return updates, ScaleByRStdDevState(mu=mu, nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByAdamState(NamedTuple): + """State for the Adam algorithm.""" + count: chex.Array # shape=(), dtype=jnp.int32. + mu: base.Updates + nu: base.Updates + + +def scale_by_adam( + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, + eps_root: float = 0.0, + mu_dtype: Optional[Any] = None, +) -> base.GradientTransformation: + """Rescale updates according to the Adam algorithm. + + References: + [Kingma et al, 2014](https://arxiv.org/abs/1412.6980) + + Args: + b1: Decay rate for the exponentially weighted average of grads. + b2: Decay rate for the exponentially weighted average of squared grads. + eps: Term added to the denominator to improve numerical stability. + eps_root: Term added to the denominator inside the square-root to improve + numerical stability when backpropagating gradients through the rescaling. + mu_dtype: Optional `dtype` to be used for the first order accumulator; if + `None` then the `dtype is inferred from `params` and `updates`. + + Returns: + A `GradientTransformation` object. + """ + + mu_dtype = utils.canonicalize_dtype(mu_dtype) + + def init_fn(params): + mu = jax.tree_util.tree_map( # First moment + lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) + nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment + return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu) + + def update_fn(updates, state, params=None): + del params + mu = update_moment(updates, state.mu, b1, 1) + nu = update_moment_per_elem_norm(updates, state.nu, b2, 2) + count_inc = numerics.safe_int32_increment(state.count) + mu_hat = bias_correction(mu, b1, count_inc) + nu_hat = bias_correction(nu, b2, count_inc) + updates = jax.tree_util.tree_map( + lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat) + mu = utils.cast_tree(mu, mu_dtype) + return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByAmsgradState(NamedTuple): + """State for the AMSGrad algorithm.""" + count: chex.Array # shape=(), dtype=jnp.int32. + mu: base.Updates + nu: base.Updates + nu_max: base.Updates + + +def scale_by_amsgrad( + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, + eps_root: float = 0.0, + mu_dtype: Optional[Any] = None, +) -> base.GradientTransformation: + """Rescale updates according to the AMSGrad algorithm. + + References: + [Reddi et al, 2018](https://openreview.net/forum?id=ryQu7f-RZ) + + Args: + b1: Decay rate for the exponentially weighted average of grads. + b2: Decay rate for the exponentially weighted average of squared grads. + eps: Term added to the denominator to improve numerical stability. + eps_root: Term added to the denominator inside the square-root to improve + numerical stability when backpropagating gradients through the rescaling. + mu_dtype: Optional `dtype` to be used for the first order accumulator; if + `None` then the `dtype is inferred from `params` and `updates`. + + Returns: + A `GradientTransformation` object. + """ + + mu_dtype = utils.canonicalize_dtype(mu_dtype) + + def init_fn(params): + mu = jax.tree_util.tree_map( # First moment + lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) + nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment + nu_max = jax.tree_util.tree_map(jnp.zeros_like, params) + return ScaleByAmsgradState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu, + nu_max=nu_max) + + def update_fn(updates, state, params=None): + del params + mu = update_moment(updates, state.mu, b1, 1) + nu = update_moment_per_elem_norm(updates, state.nu, b2, 2) + count_inc = numerics.safe_int32_increment(state.count) + mu_hat = bias_correction(mu, b1, count_inc) + nu_hat = bias_correction(nu, b2, count_inc) + nu_max = jax.tree_util.tree_map(jnp.maximum, state.nu_max, nu_hat) + updates = jax.tree_util.tree_map( + lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_max) + mu = utils.cast_tree(mu, mu_dtype) + return updates, ScaleByAmsgradState(count=count_inc, mu=mu, nu=nu, + nu_max=nu_max) + + return base.GradientTransformation(init_fn, update_fn) + + +def scale_by_adamax( + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8 +) -> base.GradientTransformation: + """Rescale updates according to the Adamax algorithm. + + References: + [Kingma et al, 2014](https://arxiv.org/abs/1412.6980) + + Args: + b1: Decay rate for the exponentially weighted average of grads. + b2: Decay rate for the exponentially weighted maximum of grads. + eps: Term added to the denominator to improve numerical stability. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + mu = jax.tree_util.tree_map(jnp.zeros_like, params) # First moment + nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Infinite moment + return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu) + + def update_fn(updates, state, params=None): + del params + count_inc = numerics.safe_int32_increment(state.count) + mu = update_moment(updates, state.mu, b1, 1) + nu = update_infinity_moment(updates, state.nu, b2, eps) + # Bias correction for mean. No bias correction needed for infinity moment. + mu_hat = bias_correction(mu, b1, count_inc) + updates = jax.tree_util.tree_map(lambda m, v: m / v, mu_hat, nu) + return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +ScaleState = base.EmptyState + + +def scale( + step_size: float +) -> base.GradientTransformation: + """Scale updates by some fixed scalar `step_size`. + + Args: + step_size: A scalar corresponding to a fixed scaling factor for updates. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return ScaleState() + + def update_fn(updates, state, params=None): + del params + updates = jax.tree_util.tree_map(lambda g: step_size * g, updates) + return updates, state + + return base.GradientTransformation(init_fn, update_fn) + + +def scale_by_param_block_norm( + min_scale: float = 1e-3 +) -> base.GradientTransformation: + """Scale updates for each param block by the norm of that block's parameters. + + A `block` is here a weight vector (e.g. in a Linear layer) or a weight matrix + (e.g. in a convolutional layer) appearing as a leaf in the grads/param pytree. + + Args: + min_scale: Minimum scaling factor. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return base.EmptyState() + + def update_fn(updates, state, params): + if params is None: + raise ValueError(base.NO_PARAMS_MSG) + updates = jax.tree_util.tree_map( + lambda u, p: u * numerics.safe_norm(p, min_scale), + updates, params) + return updates, state + + return base.GradientTransformation(init_fn, update_fn) + + +def scale_by_param_block_rms( + min_scale: float = 1e-3 +) -> base.GradientTransformation: + """Scale updates by rms of the gradient for each param vector or matrix. + + A `block` is here a weight vector (e.g. in a Linear layer) or a weight matrix + (e.g. in a convolutional layer) appearing as a leaf in the grads/param pytree. + + Args: + min_scale: Minimum scaling factor. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return base.EmptyState() + + def update_fn(updates, state, params): + if params is None: + raise ValueError(base.NO_PARAMS_MSG) + updates = jax.tree_util.tree_map( + lambda u, p: u * numerics.safe_root_mean_squares(p, min_scale), + updates, params) + return updates, state + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByBeliefState(NamedTuple): + """State for the rescaling by AdaBelief algorithm.""" + count: chex.Array # shape=(), dtype=jnp.int32. + mu: base.Updates + nu: base.Updates + + +def scale_by_belief( + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-16, + eps_root: float = 1e-16 +) -> base.GradientTransformation: + """Rescale updates according to the AdaBelief algorithm. + + References: + [Zhuang et al, 2020](https://arxiv.org/abs/2010.07468) + + Args: + b1: Decay rate for the exponentially weighted average of grads. + b2: Decay rate for the exponentially weighted average of variance of grads. + eps: Term added to the denominator to improve numerical stability. + eps_root: Term added to the second moment of the prediction error to + improve numerical stability. If backpropagating gradients through the + gradient transformation (e.g. for meta-learning), this must be non-zero. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + mu = jax.tree_util.tree_map(jnp.zeros_like, params) # First moment + s = jax.tree_util.tree_map(jnp.zeros_like, params) # Second Central moment + return ScaleByBeliefState(count=jnp.zeros([], jnp.int32), mu=mu, nu=s) + + def update_fn(updates, state, params=None): + del params + mu = update_moment(updates, state.mu, b1, 1) + prediction_error = jax.tree_util.tree_map( + lambda g, m: g-m, updates, state.mu) + nu = update_moment_per_elem_norm(prediction_error, state.nu, b2, 2) + nu = jax.tree_util.tree_map(lambda v: v + eps_root, nu) + count_inc = numerics.safe_int32_increment(state.count) + mu_hat = bias_correction(mu, b1, count_inc) + nu_hat = bias_correction(nu, b2, count_inc) + updates = jax.tree_util.tree_map( + lambda m, v: m / (jnp.sqrt(v) + eps), mu_hat, nu_hat) + return updates, ScaleByBeliefState(count=count_inc, mu=mu, nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +def scale_by_yogi( + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-3, + eps_root: float = 0.0, + initial_accumulator_value: float = 1e-6 +) -> base.GradientTransformation: + """Rescale updates according to the Yogi algorithm. + + Supports complex numbers, see + https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29 + + References: + [Zaheer et al, 2018](https://papers.nips.cc/paper/2018/hash/90365351ccc7437a1309dc64e4db32a3-Abstract.html) #pylint:disable=line-too-long + + Args: + b1: Decay rate for the exponentially weighted average of grads. + b2: Decay rate for the exponentially weighted average of variance of grads. + eps: Term added to the denominator to improve numerical stability. + eps_root: Term added to the denominator inside the square-root to improve + numerical stability when backpropagating gradients through the rescaling. + initial_accumulator_value: The starting value for accumulators. + Only positive values are allowed. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + value_like = lambda p: jnp.full_like(p, initial_accumulator_value) + mu = jax.tree_util.tree_map(value_like, params) # First moment + nu = jax.tree_util.tree_map(value_like, params) # Second Central moment + return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu) + + def update_fn(updates, state, params=None): + del params + mu = update_moment(updates, state.mu, b1, 1) + nu = jax.tree_util.tree_map( + lambda g, v: v - (1 - b2) * jnp.sign(v - _abs_sq(g)) * _abs_sq(g), + updates, state.nu) + count_inc = numerics.safe_int32_increment(state.count) + mu_hat = bias_correction(mu, b1, count_inc) + nu_hat = bias_correction(nu, b2, count_inc) + updates = jax.tree_util.tree_map( + lambda m, v: m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat) + return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +def scale_by_radam( + b1: float = 0.9, + b2: float = 0.999, + eps: float = 1e-8, + eps_root: float = 0.0, + threshold: float = 5.0 +) -> base.GradientTransformation: + """Rescale updates according to the Rectified Adam algorithm. + + References: + [Liu et al, 2020](https://arxiv.org/abs/1908.03265) + + Args: + b1: Decay rate for the exponentially weighted average of grads. + b2: Decay rate for the exponentially weighted average of squared grads. + eps: Term added to the denominator to improve numerical stability. + eps_root: Term added to the denominator inside the square-root to improve + numerical stability when backpropagating gradients through the rescaling. + threshold: Threshold for variance tractability. + + Returns: + A `GradientTransformation` object. + """ + + ro_inf = 2./(1 - b2) - 1 + def _radam_update(params): + ro = params[0] + mu_hat = params[1] + nu_hat = params[2] + r = jnp.sqrt((ro - 4)*(ro - 2)*ro_inf/((ro_inf - 4)*(ro_inf - 2)*ro)) + updates = jax.tree_util.tree_map( + lambda m, v: r*m / (jnp.sqrt(v + eps_root) + eps), mu_hat, nu_hat) + return updates + + def init_fn(params): + mu = jax.tree_util.tree_map(jnp.zeros_like, params) # First moment + nu = jax.tree_util.tree_map(jnp.zeros_like, params) # Second moment + return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu) + + def update_fn(updates, state, params=None): + del params + mu = update_moment(updates, state.mu, b1, 1) + nu = update_moment_per_elem_norm(updates, state.nu, b2, 2) + count_inc = numerics.safe_int32_increment(state.count) + b2t = b2**count_inc + ro = ro_inf - 2 * count_inc * b2t / (1 - b2t) + mu_hat = bias_correction(mu, b1, count_inc) + nu_hat = bias_correction(nu, b2, count_inc) + updates = jax.lax.cond( + ro >= threshold, _radam_update, lambda _: mu_hat, + (ro, mu_hat, nu_hat)) + return updates, ScaleByAdamState(count=count_inc, mu=mu, nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +AddDecayedWeightsState = base.EmptyState + + +def add_decayed_weights( + weight_decay: float = 0.0, + mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None +) -> base.GradientTransformation: + """Add parameter scaled by `weight_decay`. + + Args: + weight_decay: A scalar weight decay rate. + mask: A tree with same structure as (or a prefix of) the params PyTree, + or a Callable that returns such a pytree given the params/updates. + The leaves should be booleans, `True` for leaves/subtrees you want to + apply the transformation to, and `False` for those you want to skip. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return AddDecayedWeightsState() + + def update_fn(updates, state, params): + if params is None: + raise ValueError(base.NO_PARAMS_MSG) + updates = jax.tree_util.tree_map( + lambda g, p: g + weight_decay * p, updates, params) + return updates, state + + # If mask is not `None`, apply mask to the gradient transformation. + # E.g. it is common to skip weight decay on bias units and batch stats. + if mask is not None: + return wrappers.masked( + base.GradientTransformation(init_fn, update_fn), mask) + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByScheduleState(NamedTuple): + """Maintains count for scale scheduling.""" + count: chex.Array # shape=(), dtype=jnp.int32 + + +def scale_by_schedule( + step_size_fn: base.Schedule +) -> base.GradientTransformation: + """Scale updates using a custom schedule for the `step_size`. + + Args: + step_size_fn: A function that takes an update count as input and proposes + the step_size to multiply the updates by. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return ScaleByScheduleState(count=jnp.zeros([], jnp.int32)) + + def update_fn(updates, state, params=None): + del params + step_size = step_size_fn(state.count) + updates = jax.tree_util.tree_map( + lambda g: jnp.array(step_size, dtype=g.dtype) * g, updates) + return updates, ScaleByScheduleState( + count=numerics.safe_int32_increment(state.count)) + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByFromageState(NamedTuple): + """Maintains count for step-size scheduling.""" + count: chex.Array # shape=(), dtype=jnp.int32 + + +class ScaleByTrustRatioState(NamedTuple): + """The scale and decay trust ratio transformation is stateless.""" + + +def scale_by_trust_ratio( + min_norm: float = 0.0, + trust_coefficient: float = 1., + eps: float = 0., +) -> base.GradientTransformation: + """Scale updates by trust ratio`. + + References: + [You et. al 2020](https://arxiv.org/abs/1904.00962) + + Args: + min_norm: Minimum norm for params and gradient norms; by default is zero. + trust_coefficient: A multiplier for the trust ratio. + eps: Additive constant added to the denominator for numerical stability. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return ScaleByTrustRatioState() + + def update_fn(updates, state, params): + if params is None: + raise ValueError(base.NO_PARAMS_MSG) + + def _scale_update(update, param): + + # Clip norms to minimum value, by default no clipping. + param_norm = numerics.safe_norm(param, min_norm) + update_norm = numerics.safe_norm(update, min_norm) + trust_ratio = trust_coefficient * param_norm / (update_norm + eps) + + # If no minimum norm clipping is used + # Set trust_ratio to 1 in case where parameters would never be updated. + zero_norm = jnp.logical_or(param_norm == 0., update_norm == 0.) + safe_trust_ratio = jnp.where( + zero_norm, jnp.array(1.0, dtype=param.dtype), trust_ratio) + + return update * safe_trust_ratio + + updates = jax.tree_util.tree_map(_scale_update, updates, params) + return updates, state + + return base.GradientTransformation(init_fn, update_fn) + + +class AddNoiseState(NamedTuple): + """State for adding gradient noise. Contains a count for annealing.""" + count: chex.Array + rng_key: chex.PRNGKey + + +def add_noise( + eta: float, + gamma: float, + seed: int +) -> base.GradientTransformation: + """Add gradient noise. + + References: + [Neelakantan et al, 2014](https://arxiv.org/abs/1511.06807) + + Args: + eta: Base variance of the gaussian noise added to the gradient. + gamma: Decay exponent for annealing of the variance. + seed: Seed for random number generation. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return AddNoiseState( + count=jnp.zeros([], jnp.int32), rng_key=jax.random.PRNGKey(seed)) + + def update_fn(updates, state, params=None): # pylint: disable=missing-docstring + del params + num_vars = len(jax.tree_util.tree_leaves(updates)) + treedef = jax.tree_util.tree_structure(updates) + count_inc = numerics.safe_int32_increment(state.count) + variance = eta / count_inc**gamma + standard_deviation = jnp.sqrt(variance) + all_keys = jax.random.split(state.rng_key, num=num_vars + 1) + noise = jax.tree_util.tree_map( + lambda g, k: jax.random.normal(k, shape=g.shape, dtype=g.dtype), + updates, jax.tree_util.tree_unflatten(treedef, all_keys[1:])) + updates = jax.tree_util.tree_map( + lambda g, n: g + standard_deviation.astype(g.dtype) * n, + updates, noise) + return updates, AddNoiseState(count=count_inc, rng_key=all_keys[0]) + + return base.GradientTransformation(init_fn, update_fn) + + +class ApplyEvery(NamedTuple): + """Contains a counter and a gradient accumulator.""" + count: chex.Array + grad_acc: base.Updates + + +def apply_every( + k: int = 1 +) -> base.GradientTransformation: + """Accumulate gradients and apply them every k steps. + + Note that if this transformation is part of a chain, the states of the other + transformations will still be updated at every step. In particular, using + `apply_every` with a batch size of N/2 and k=2 is not necessarily equivalent + to not using `apply_every` with a batch size of N. If this equivalence is + important for you, consider using the `optax.MultiSteps`. + + Args: + k: Emit non-zero gradients every k steps, otherwise accumulate them. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + grad_acc = jax.tree_util.tree_map(jnp.zeros_like, params) + return ApplyEvery(count=jnp.zeros([], jnp.int32), grad_acc=grad_acc) + + def update_fn(updates, state, params=None): + del params + c = state.count % k + acc = c != 0 + grad_acc = jax.tree_util.tree_map( + lambda g, ga: acc * ga + g, updates, state.grad_acc) + emit = c == (k - 1) + updates = jax.tree_util.tree_map(lambda ga: emit * ga, grad_acc) + count_inc = numerics.safe_int32_increment(state.count) + return updates, ApplyEvery(count=count_inc % k, grad_acc=grad_acc) + + return base.GradientTransformation(init_fn, update_fn) + + +def _subtract_mean(g): + if len(g.shape) > 1: + return g - g.mean(tuple(range(1, len(g.shape))), keepdims=True) + else: + return g + + +CentralState = base.EmptyState + + +def centralize() -> base.GradientTransformation: + """Centralize gradients. + + References: + [Yong et al, 2020](https://arxiv.org/abs/2004.01461) + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + del params + return CentralState() + + def update_fn(updates, state, params=None): + del params + updates = jax.tree_util.tree_map(_subtract_mean, updates) + return updates, state + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleBySM3State(NamedTuple): + """State for the SM3 algorithm.""" + mu: base.Updates + nu: base.Updates + + +def scale_by_sm3( + b1: float = 0.9, + b2: float = 1.0, + eps: float = 1e-8 +) -> base.GradientTransformation: + """Scale updates by sm3`. + + References: + [Anil et. al 2019](https://arxiv.org/abs/1901.11150) + + Args: + b1: Decay rate for the exponentially weighted average of grads. + b2: Decay rate for the exponentially weighted average of squared grads. + eps: Term added to the denominator to improve numerical stability. + + Returns: + A `GradientTransformation` object. + """ + + def zeros_for_dim(p): + return [jnp.zeros([s]) for s in p.shape] + + def init_fn(params): + _reject_complex(params) + mu = jax.tree_util.tree_map(zeros_for_dim, params) + nu = jax.tree_util.tree_map(jnp.zeros_like, params) + return ScaleBySM3State(mu, nu) + + def _expanded_shape(shape, axis): + # Replaces a `shape` of [M, N, K] with 1 in all dimensions except for i. + # For eg: i = 1 returns [1, N, 1]. + rank = len(shape) + return [1] * axis + [shape[axis]] + [1] * (rank - axis - 1) + + def _new_accum(g, v): + coeffs = ((1.0 - b2) if b2 != 1.0 else 1.0, b2) + if g.ndim < 2: + return coeffs[0]*g**2 + coeffs[1]*v[0] + else: + return coeffs[0]*g**2 + coeffs[1]*functools.reduce(jnp.minimum, v) + + def _new_mu(g, i): + if g.ndim < 2: + return g + else: + return jnp.max(g, axis=other_axes(i, g.ndim)) + + def other_axes(idx, ndim): + return list(range(idx)) + list(range(idx+1, ndim)) + + def update_fn(updates, state, params=None): + del params + mu = jax.tree_util.tree_map( + lambda g, v: # pylint:disable=g-long-lambda + [jnp.reshape(v[i], _expanded_shape(g.shape, i)) for i in range(g.ndim)], + updates, state.mu) + accum = jax.tree_util.tree_map(_new_accum, updates, mu) + accum_inv_sqrt = jax.tree_util.tree_map( + lambda t: jnp.where(t > 0, jax.lax.rsqrt(t + eps), 0.0), accum) + up = jax.tree_util.tree_map(lambda g, a: g*a, updates, accum_inv_sqrt) + nu = update_moment(up, state.nu, b1, 1) + mu = jax.tree_util.tree_map( + lambda g: [_new_mu(g, i) for i in range(g.ndim)], accum) + + return nu, ScaleBySM3State(mu=mu, nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +class ScaleByNovogradState(NamedTuple): + """State for Novograd.""" + count: chex.Array + mu: base.Updates + nu: base.Updates + + +def scale_by_novograd( + b1: float = 0.9, + b2: float = 0.25, + eps: float = 1e-8, + eps_root: float = 0.0, + weight_decay: float = 0.0, + mu_dtype: Optional[Any] = None, +) -> base.GradientTransformation: + """Computes NovoGrad updates. + + References: + [Ginsburg et al, 2019](https://arxiv.org/abs/1905.11286) + + Args: + b1: A decay rate for the exponentially weighted average of grads. + b2: A decay rate for the exponentially weighted average of squared grads. + eps: A term added to the denominator to improve numerical stability. + eps_root: A term added to the denominator inside the square-root to improve + numerical stability when backpropagating gradients through the rescaling. + weight_decay: A scalar weight decay rate. + mu_dtype: An optional `dtype` to be used for the first order accumulator; if + `None` then the `dtype is inferred from `params` and `updates`. + + Returns: + The corresponding `GradientTransformation`. + """ + + mu_dtype = utils.canonicalize_dtype(mu_dtype) + + def init_fn(params): + mu = jax.tree_util.tree_map( # First moment + lambda t: jnp.zeros_like(t, dtype=mu_dtype), params) + nu = jax.tree_util.tree_map(lambda _: 0.0, params) # Second moment + return ScaleByNovogradState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu) + + def nu_addition(grads): + return jnp.linalg.norm(grads)**2 + + def mu_addition(grads, params, nu): + return grads / (jnp.sqrt(nu + eps_root) + eps) + weight_decay * params + + def init_nu(grads, nu): + del nu + return jax.tree_util.tree_map(nu_addition, grads) + + def update_nu(grads, nu): + updates = jax.tree_util.tree_map(nu_addition, grads) + return update_moment(updates, nu, b2, 1) + + def init_mu(grads, params, mu, nu): + del mu + return jax.tree_util.tree_map(mu_addition, grads, params, nu) + + def update_mu(grads, params, mu, nu): + updates = jax.tree_util.tree_map(mu_addition, grads, params, nu) + return jax.tree_util.tree_map(lambda m, u: b1 * m + u, mu, updates) + + # Second moment + def update_fn(updates, state, params): + count_inc = numerics.safe_int32_increment(state.count) + + nu = jax.lax.cond(count_inc == 1, init_nu, update_nu, updates, state.nu) + + mu = jax.lax.cond(count_inc == 1, init_mu, update_mu, updates, params, + state.mu, nu) + + mu = utils.cast_tree(mu, mu_dtype) + updates = mu + return updates, ScaleByNovogradState(count=count_inc, mu=mu, nu=nu) + + return base.GradientTransformation(init_fn, update_fn) + + +def scale_by_optimistic_gradient(alpha: float = 1.0, + beta: float = 1.0 + ) -> base.GradientTransformation: + """Compute generalized optimistic gradients. + + References: + [Mokhtari et al, 2019](https://arxiv.org/abs/1901.08511v2) + + Args: + alpha: Coefficient for generalized optimistic gradient descent. + beta: Coefficient for negative momentum. + + Returns: + A `GradientTransformation` object. + """ + + def init_fn(params): + prev_grads = jax.tree_util.tree_map(jnp.zeros_like, params) + return TraceState(trace=prev_grads) + + def update_fn(updates, state, params=None): + del params + + new_updates = jax.tree_util.tree_map( + lambda grad_t, grad_tm1: (alpha + beta) * grad_t - beta * grad_tm1, + updates, state.trace) + return new_updates, TraceState(trace=updates) + + return base.GradientTransformation(init_fn, update_fn) + + +# TODO(b/183800387): remove legacy aliases. +# These legacy aliases are here for checkpoint compatibility +# To be removed once checkpoints have updated. +_safe_int32_increment = numerics.safe_int32_increment +safe_int32_increment = numerics.safe_int32_increment +AdditiveWeightDecayState = AddDecayedWeightsState +additive_weight_decay = add_decayed_weights +ClipState = clipping.ClipState +ClipByGlobalNormState = clipping.ClipByGlobalNormState diff --git a/lib/python3.10/site-packages/optax/_src/transform_test.py b/lib/python3.10/site-packages/optax/_src/transform_test.py new file mode 100644 index 0000000000000000000000000000000000000000..db029b3d3a77d9f07a5ab1c42e17bf66a3257ad0 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/transform_test.py @@ -0,0 +1,305 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +"""Tests for `transform.py`.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import chex +import jax +import jax.numpy as jnp +import numpy as np + +from optax._src import alias +from optax._src import combine +from optax._src import transform +from optax._src import update + +STEPS = 50 +LR = 1e-2 + + +class TransformTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.])) + self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.])) + + @chex.all_variants + @parameterized.named_parameters([ + ('adam', transform.scale_by_adam), + ('adamax', transform.scale_by_adamax), + ('rmsprop', transform.scale_by_rms), + ('stddev', transform.scale_by_stddev), + ('trust_ratio', transform.scale_by_trust_ratio), + ('param_block_norm', transform.scale_by_param_block_norm), + ('param_block_rms', transform.scale_by_param_block_rms), + ]) + def test_scalers(self, scaler_constr): + params = self.init_params + + scaler = scaler_constr() + init_fn = self.variant(scaler.init) + transform_fn = self.variant(scaler.update) + + state = init_fn(params) + chex.assert_tree_all_finite(state) + + updates, state = transform_fn(self.per_step_updates, state, params) + chex.assert_tree_all_finite((params, updates, state)) + jax.tree_util.tree_map( + lambda *args: chex.assert_equal_shape(args), params, updates) + + @chex.all_variants + def test_add_decayed_weights(self): + # Define a transform that add decayed weights. + # We can define a mask either as a pytree, or as a function that + # returns the pytree. Below we define the pytree directly. + mask = (True, dict(a=True, b=False)) + tx = transform.add_decayed_weights(0.1, mask=mask) + # Define input updates and weights. + updates = ( + jnp.zeros((2,), dtype=jnp.float32), + dict( + a=jnp.zeros((2,), dtype=jnp.float32), + b=jnp.zeros((2,), dtype=jnp.float32),)) + weights = ( + jnp.ones((2,), dtype=jnp.float32), + dict( + a=jnp.ones((2,), dtype=jnp.float32), + b=jnp.ones((2,), dtype=jnp.float32),)) + # This mask means that we will add decayed weights to the first two + # terms in the input updates, but not to the last element. + expected_tx_updates = ( + 0.1*jnp.ones((2,), dtype=jnp.float32), + dict( + a=0.1*jnp.ones((2,), dtype=jnp.float32), + b=jnp.zeros((2,), dtype=jnp.float32),)) + # Apply transform + state = tx.init(weights) + transform_fn = self.variant(tx.update) + new_updates, _ = transform_fn(updates, state, weights) + # Assert output as expected. + chex.assert_tree_all_close(new_updates, expected_tx_updates) + + @chex.all_variants + def test_ema(self): + values = jnp.array([5.0, 7.0]) + decay = 0.9 + d = decay + + ema = transform.ema(decay=decay, debias=False) + state = ema.init(values[0]) # init to zeroes + + transform_fn = self.variant(ema.update) + mean, state = transform_fn(values[0], state) + np.testing.assert_allclose(mean, (1-d) * values[0], atol=1e-4) + + mean, state = transform_fn(values[1], state) + np.testing.assert_allclose( + mean, + (1 - d) * (values[1] + d * values[0]), atol=1e-2) + + @chex.all_variants + def test_ema_debias(self): + values = jnp.array([5.0, 7.0]) + decay = 0.9 + d = decay + + ema = transform.ema(decay=decay) + state = ema.init(values[0]) + + transform_fn = self.variant(ema.update) + mean, state = transform_fn(values[0], state) + np.testing.assert_allclose(mean, values[0], atol=1e-4) + + mean, state = transform_fn(values[1], state) + np.testing.assert_allclose( + mean, + ((1 - d) * values[1] + d * (1 - d) * values[0]) / (1 - d**2), + atol=1e-2) + # The state must not be debiased. + np.testing.assert_allclose( + state.ema, + (1 - d) * values[1] + d * (1 - d) * values[0], + atol=1e-2) + + @chex.all_variants + def test_update_infinity_moment(self): + values = jnp.array([5.0, 7.0]) + decay = 0.9 + d = decay + + transform_fn = self.variant(transform.update_infinity_moment) + + # identity if updating with itself (and positive decay) + np.testing.assert_allclose( + transform_fn(values, values, decay=d, eps=0.), + values, + atol=1e-4 + ) + # return (decayed) max when updating with zeros + np.testing.assert_allclose( + transform_fn(jnp.zeros_like(values), values, decay=d, eps=0.), + d * values, + atol=1e-4 + ) + # infinity norm takes absolute values + np.testing.assert_allclose( + transform_fn(-values, jnp.zeros_like(values), decay=d, eps=0.), + values, + atol=1e-4 + ) + # return at least `eps` + np.testing.assert_allclose( + transform_fn(jnp.zeros_like(values), jnp.zeros_like(values), + decay=d, eps=1e-2), + jnp.ones_like(values) * 1e-2, + atol=1e-4 + ) + + @chex.all_variants + def test_apply_every(self): + # The frequency of the application of sgd + k = 4 + zero_update = (jnp.array([0., 0.]), jnp.array([0., 0.])) + + # optax sgd + optax_sgd_params = self.init_params + sgd = alias.sgd(LR, 0.0) + state_sgd = sgd.init(optax_sgd_params) + + # optax sgd plus apply every + optax_sgd_apply_every_params = self.init_params + sgd_apply_every = combine.chain( + transform.apply_every(k=k), + transform.trace(decay=0, nesterov=False), + transform.scale(-LR)) + state_sgd_apply_every = sgd_apply_every.init(optax_sgd_apply_every_params) + transform_fn = self.variant(sgd_apply_every.update) + + for i in range(STEPS): + # Apply a step of sgd + updates_sgd, state_sgd = sgd.update(self.per_step_updates, state_sgd) + optax_sgd_params = update.apply_updates(optax_sgd_params, updates_sgd) + + # Apply a step of sgd_apply_every + updates_sgd_apply_every, state_sgd_apply_every = transform_fn( + self.per_step_updates, state_sgd_apply_every) + optax_sgd_apply_every_params = update.apply_updates( + optax_sgd_apply_every_params, updates_sgd_apply_every) + + # Every k steps, check equivalence. + if i % k == k-1: + chex.assert_tree_all_close( + optax_sgd_apply_every_params, optax_sgd_params, + atol=1e-6, rtol=1e-5) + # Otherwise, check update is zero. + else: + chex.assert_tree_all_close( + updates_sgd_apply_every, zero_update, atol=0.0, rtol=0.0) + + def test_scale(self): + updates = self.per_step_updates + for i in range(1, STEPS + 1): + factor = 0.1 ** i + rescaler = transform.scale(factor) + # Apply rescaling. + scaled_updates, _ = rescaler.update(updates, None) + # Manually scale updates. + def rescale(t): + return t * factor # pylint:disable=cell-var-from-loop + manual_updates = jax.tree_util.tree_map(rescale, updates) + # Check the rescaled updates match. + chex.assert_tree_all_close(scaled_updates, manual_updates) + + @parameterized.named_parameters([ + ('1d', [1.0, 2.0], [1.0, 2.0]), + ('2d', [[1.0, 2.0], [3.0, 4.0]], [[-0.5, 0.5], [-0.5, 0.5]]), + ('3d', [[[1., 2.], [3., 4.]], + [[5., 6.], [7., 8.]]], [[[-1.5, -0.5], [0.5, 1.5]], + [[-1.5, -0.5], [0.5, 1.5]]]), + ]) + def test_centralize(self, inputs, outputs): + inputs = jnp.asarray(inputs) + outputs = jnp.asarray(outputs) + centralizer = transform.centralize() + centralized_inputs, _ = centralizer.update(inputs, None) + chex.assert_tree_all_close(centralized_inputs, outputs) + + @chex.all_variants + def test_add_noise_has_correct_variance_scaling(self): + # Prepare to compare noise with a rescaled unit-variance substitute. + eta = 0.3 + gamma = 0.55 + seed = 314 + noise = transform.add_noise(eta, gamma, seed) + noise_unit = transform.add_noise(1.0, 0.0, seed) + + params = self.init_params + state = noise.init(params) + state_unit = noise_unit.init(params) + + # Check the noise itself by adding it to zeros. + updates = jax.tree_util.tree_map(jnp.zeros_like, params) + + for i in range(1, STEPS + 1): + updates_i, state = self.variant(noise.update)(updates, state) + updates_i_unit, state_unit = noise_unit.update(updates, state_unit) + + scale = jnp.sqrt(eta / i**gamma) + + updates_i_rescaled = jax.tree_util.tree_map( + lambda g, s=scale: g * s, updates_i_unit) + + chex.assert_tree_all_close(updates_i, updates_i_rescaled, rtol=1e-4) + + def test_scale_by_optimistic_gradient(self): + + def f(params: jnp.ndarray) -> jnp.ndarray: + return params['x'] ** 2 + + initial_params = { + 'x': jnp.array(2.0) + } + + og = transform.scale_by_optimistic_gradient() + og_state = og.init(initial_params) + # Provide some arbitrary previous gradient. + og_state.trace['x'] = 1.5 + + g = jax.grad(f)(initial_params) + og_true = 2 * g['x'] - og_state.trace['x'] + og, og_state = og.update(g, og_state) + + # Compare transformation output with manually computed optimistic gradient. + chex.assert_tree_all_close(og_true, og['x']) + + @chex.all_variants + def test_bias_correction_bf16(self): + bias_correction_fn = self.variant(transform.bias_correction) + m = jnp.logspace(-10, 10, num=21, dtype=jnp.bfloat16) # 1e-10 ... 1e10 + for decay in (0.9, 0.99, 0.999, 0.9995): + for count in (1, 10, 100, 1000): + chex.assert_tree_all_finite( + bias_correction_fn(m, decay, count), + custom_message=f'failed with decay={decay}, count={count}') + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/utils.py b/lib/python3.10/site-packages/optax/_src/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bb1d68264ef07b2ea384d85ff3859663ee17ae89 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/utils.py @@ -0,0 +1,152 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for testing.""" + +from typing import Optional, Tuple, Sequence + +import chex +import jax +import jax.numpy as jnp +import jax.scipy.stats.norm as multivariate_normal + +from optax._src import linear_algebra +from optax._src import numerics + + +def tile_second_to_last_dim(a: chex.Array) -> chex.Array: + ones = jnp.ones_like(a) + a = jnp.expand_dims(a, axis=-1) + return jnp.expand_dims(ones, axis=-2) * a + + +def canonicalize_dtype( + dtype: Optional[chex.ArrayDType]) -> Optional[chex.ArrayDType]: + """Canonicalise a dtype, skip if None.""" + if dtype is not None: + return jax.dtypes.canonicalize_dtype(dtype) + return dtype + + +def cast_tree(tree: chex.ArrayTree, + dtype: Optional[chex.ArrayDType]) -> chex.ArrayTree: + """Cast tree to given dtype, skip if None.""" + if dtype is not None: + return jax.tree_util.tree_map(lambda t: t.astype(dtype), tree) + else: + return tree + + +def set_diags(a: chex.Array, new_diags: chex.Array) -> chex.Array: + """Set the diagonals of every DxD matrix in an input of shape NxDxD. + + Args: + a: rank 3, tensor NxDxD. + new_diags: NxD matrix, the new diagonals of each DxD matrix. + + Returns: + NxDxD tensor, with the same contents as `a` but with the diagonal + changed to `new_diags`. + """ + n, d, d1 = a.shape + assert d == d1 + + indices1 = jnp.repeat(jnp.arange(n), d) + indices2 = jnp.tile(jnp.arange(d), n) + indices3 = indices2 + + # Use numpy array setting + a = a.at[indices1, indices2, indices3].set(new_diags.flatten()) + return a + + +class MultiNormalDiagFromLogScale(): + """MultiNormalDiag which directly exposes its input parameters.""" + + def __init__(self, loc: chex.Array, log_scale: chex.Array): + self._log_scale = log_scale + self._scale = jnp.exp(log_scale) + self._mean = loc + self._param_shape = jax.lax.broadcast_shapes( + self._mean.shape, self._scale.shape) + + def sample(self, shape: Sequence[int], + seed: chex.PRNGKey) -> chex.Array: + sample_shape = tuple(shape) + self._param_shape + return jax.random.normal( + seed, shape=sample_shape) * self._scale + self._mean + + def log_prob(self, x: chex.Array) -> chex.Array: + log_prob = multivariate_normal.logpdf(x, loc=self._mean, scale=self._scale) + # Sum over parameter axes. + sum_axis = [-(i + 1) for i in range(len(self._param_shape))] + return jnp.sum(log_prob, axis=sum_axis) + + @property + def log_scale(self) -> chex.Array: + return self._log_scale + + @property + def params(self) -> Sequence[chex.Array]: + return [self._mean, self._log_scale] + + +def multi_normal(loc: chex.Array, + log_scale: chex.Array) -> MultiNormalDiagFromLogScale: + return MultiNormalDiagFromLogScale(loc=loc, log_scale=log_scale) + + +@jax.custom_vjp +def _scale_gradient(inputs: chex.ArrayTree, scale: float) -> chex.ArrayTree: + """Internal gradient scaling implementation.""" + del scale # Only used for the backward pass defined in _scale_gradient_bwd. + return inputs + + +def _scale_gradient_fwd(inputs: chex.ArrayTree, + scale: float) -> Tuple[chex.ArrayTree, float]: + return _scale_gradient(inputs, scale), scale + + +def _scale_gradient_bwd(scale: float, + g: chex.ArrayTree) -> Tuple[chex.ArrayTree, None]: + return (jax.tree_util.tree_map(lambda g_: g_ * scale, g), None) + + +_scale_gradient.defvjp(_scale_gradient_fwd, _scale_gradient_bwd) + + +def scale_gradient(inputs: chex.ArrayTree, scale: float) -> chex.ArrayTree: + """Scales gradients for the backwards pass. + + Args: + inputs: A nested array. + scale: The scale factor for the gradient on the backwards pass. + + Returns: + An array of the same structure as `inputs`, with scaled backward gradient. + """ + # Special case scales of 1. and 0. for more efficiency. + if scale == 1.: + return inputs + elif scale == 0.: + return jax.lax.stop_gradient(inputs) + else: + return _scale_gradient(inputs, scale) + + +# TODO(b/183800387): remove legacy aliases. +safe_norm = numerics.safe_norm +safe_int32_increment = numerics.safe_int32_increment +global_norm = linear_algebra.global_norm diff --git a/lib/python3.10/site-packages/optax/_src/utils_test.py b/lib/python3.10/site-packages/optax/_src/utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b377c534aadcaab1b2f7dfcb71e05070d7a98a5c --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/utils_test.py @@ -0,0 +1,65 @@ +# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `utils.py`.""" + +from unittest import mock + +from absl.testing import absltest +from absl.testing import parameterized + +import jax + +from optax._src import utils + + +class ScaleGradientTest(parameterized.TestCase): + + @parameterized.product(inputs=[-1., 0., 1.], scale=[-0.5, 0., 0.5, 1., 2.]) + @mock.patch.object(jax.lax, 'stop_gradient', wraps=jax.lax.stop_gradient) + def test_scale_gradient(self, mock_sg, inputs, scale): + + def fn(inputs): + outputs = utils.scale_gradient(inputs, scale) + return outputs ** 2 + + grad = jax.grad(fn) + self.assertEqual(grad(inputs), 2 * inputs * scale) + if scale == 0.: + mock_sg.assert_called_once_with(inputs) + else: + self.assertFalse(mock_sg.called) + self.assertEqual(fn(inputs), inputs ** 2) + + @parameterized.product(scale=[-0.5, 0., 0.5, 1., 2.]) + def test_scale_gradient_pytree(self, scale): + + def fn(inputs): + outputs = utils.scale_gradient(inputs, scale) + outputs = jax.tree_util.tree_map(lambda x: x ** 2, outputs) + return sum(jax.tree_util.tree_leaves(outputs)) + + inputs = dict(a=-1., b=dict(c=(2.,), d=0.)) + + grad = jax.grad(fn) + grads = grad(inputs) + jax.tree_util.tree_map( + lambda i, g: self.assertEqual(g, 2 * i * scale), inputs, grads) + self.assertEqual( + fn(inputs), + sum(jax.tree_util.tree_leaves( + jax.tree_util.tree_map(lambda x: x**2, inputs)))) + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/optax/_src/wrappers.py b/lib/python3.10/site-packages/optax/_src/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..79fb4f595986bc4e8f5d52a5783cd5fcc87c96a0 --- /dev/null +++ b/lib/python3.10/site-packages/optax/_src/wrappers.py @@ -0,0 +1,547 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Transformation wrappers.""" + +import functools +from typing import Any, Callable, NamedTuple, Optional, Tuple, Union + +import chex +import jax +from jax import lax +import jax.numpy as jnp +from jax.tree_util import tree_flatten +from jax.tree_util import tree_map +from jax.tree_util import tree_unflatten +import numpy as np +from optax._src import base +from optax._src import numerics +import typing_extensions + +Array = jnp.ndarray + + +def flatten( + inner: base.GradientTransformation +) -> base.GradientTransformation: + """Flattens parameters and gradients for init and update of inner transform. + + This can reduce the overhead of performing many calculations on lots of small + variables, at the cost of slightly increased memory usage. + + Args: + inner: Inner transformation to flatten inputs for. + + Returns: + New GradientTransformation. + """ + + def _flatten(params): + """Flattens and concatenates all tensors in params to a single vector.""" + params, _ = tree_flatten(params) + return jnp.concatenate([jnp.reshape(param, [-1]) for param in params]) + + def _unflatten(updates, flat): + """Extracts tensors from flat, using the structure and shapes of params.""" + updates_flat, treedef = tree_flatten(updates) + offsets = [] + for update in updates_flat: + size = np.prod(update.shape) + if offsets: + offsets.append(size + offsets[-1]) + else: + offsets.append(size) + del offsets[-1] + flat_split = jnp.split(flat, offsets) + reshaped = [ + jnp.reshape(flat_update, update.shape) + for flat_update, update in zip(flat_split, updates_flat) + ] + return tree_unflatten(treedef, reshaped) + + def init_fn(params): + flat = _flatten(params) + return inner.init(flat) + + def update_fn(updates, state, params=None): + if params is not None: + params = _flatten(params) + updates_flat, state = inner.update(_flatten(updates), state, params) + updates = _unflatten(updates, updates_flat) + return updates, state + + return base.GradientTransformation(init_fn, update_fn) + + +class ApplyIfFiniteState(NamedTuple): + """State of the `GradientTransformation` returned by `apply_if_finite`. + + Fields: + notfinite_count: Number of consecutive gradient updates containing an Inf or + a NaN. This number is reset to 0 whenever a gradient update without an Inf + or a NaN is done. + last_finite: Whether or not the last gradient update contained an Inf of a + NaN. + total_notfinite: Total number of gradient updates containing an Inf or + a NaN since this optimizer was initialised. This number is never reset. + inner_state: The state of the inner `GradientTransformation`. + """ + notfinite_count: jnp.array + last_finite: jnp.array + total_notfinite: jnp.array + inner_state: Any + + +def apply_if_finite( + inner: base.GradientTransformation, + max_consecutive_errors: int +) -> base.GradientTransformation: + """A function that wraps an optimizer to make it robust to a few NaNs or Infs. + + The purpose of this function is to prevent any optimization to happen if the + gradients contain NaNs or Infs. That is, when a NaN of Inf is detected in the + gradients, the wrapped optimizer ignores that gradient update. If the NaNs or + Infs persist after a given number of updates, the wrapped optimizer gives up + and accepts the update. + + Args: + inner: Inner transformation to be wrapped. + max_consecutive_errors: Maximum number of consecutive gradient updates + containing NaNs of Infs that the wrapped optimizer will ignore. After + that many ignored updates, the optimizer will give up and accept. + + Returns: + New GradientTransformation. + """ + + def init(params): + return ApplyIfFiniteState( + notfinite_count=jnp.zeros([], jnp.int32), + last_finite=jnp.array(True, jnp.bool_), + total_notfinite=jnp.zeros([], jnp.int32), + inner_state=inner.init(params)) + + def update(updates, state, params=None): + inner_state = state.inner_state + flat_updates = tree_flatten(updates)[0] + isfinite = jnp.all( + jnp.array([jnp.all(jnp.isfinite(p)) for p in flat_updates])) + notfinite_count = jnp.where( + isfinite, jnp.zeros([], jnp.int32), + numerics.safe_int32_increment(state.notfinite_count)) + + def do_update(_): + return inner.update(updates, inner_state, params) + def reject_update(_): + return (tree_map(jnp.zeros_like, updates), inner_state) + + updates, new_inner_state = lax.cond( + jnp.logical_or(isfinite, notfinite_count > max_consecutive_errors), + do_update, reject_update, operand=None) + + return updates, ApplyIfFiniteState( + notfinite_count=notfinite_count, + last_finite=isfinite, + total_notfinite=jnp.where( + isfinite, state.total_notfinite, + numerics.safe_int32_increment(state.total_notfinite)), + inner_state=new_inner_state) + + return base.GradientTransformation(init=init, update=update) + + +def _zeros_tree_like(inp_tree): + return jax.tree_util.tree_map(jnp.zeros_like, inp_tree) + + +class MultiStepsState(NamedTuple): + """State of the `GradientTransformation` returned by `MultiSteps`. + + Fields: + mini_step: current mini-step counter. At an update, this either increases by + 1 or is reset to 0. + gradient_step: gradient step counter. This only increases after enough + mini-steps have been accumulated. + inner_opt_state: the state of the wrapped otpimiser. + acc_grads: accumulated gradients over multiple mini-steps. + skip_state: an arbitrarily nested tree of arrays. This is only + relevant when passing a `should_skip_update_fn` to `MultiSteps`. This + structure will then contain values for debugging and or monitoring. The + actual structure will vary depending on the choice of + `ShouldSkipUpdateFunction`. + """ + mini_step: Array + gradient_step: Array + inner_opt_state: Any + acc_grads: Any + skip_state: chex.ArrayTree = () + + +class ShouldSkipUpdateFunction(typing_extensions.Protocol): + + def __call__(self, updates: base.Updates, gradient_step: Array, + params: Optional[base.Params]) -> Tuple[Array, chex.ArrayTree]: + """Returns true to indicate that updates should be skipped in a multi-step. + + Args: + updates: The updates that the gradient transformation has proposed + to apply + gradient_step: The current gradient step (see + `MultiStepsState.gradient_step`). This can be used for example to reject + large gradients with an annealed maximum allowed gradient norm. + params: If known, the current parameter tree of the function being + transformed. + Returns: + A tuple: + * First element is an array with a single bool indicating whether or not + the updates should be applied. + * Second element is an arbitrarily nested structure of arrays that will be + stored in `MultiStepsState.skip_state`. The structure will vary from + function to function. Debugging info, or values to monitor, can be put + in this structure. + """ + + +def skip_not_finite( + updates: base.Updates, gradient_step: Array, + params: Optional[base.Params]) -> Tuple[Array, chex.ArrayTree]: + """Returns True iff any of the `updates` contains an inf or a NaN. + + Args: + updates: see `ShouldSkipUpdateFunction`. + gradient_step: see `ShouldSkipUpdateFunction`. + params: see `ShouldSkipUpdateFunction`. + + Returns: + A tuple: + * First element is a scalar array of type bool. + * Second element is a dictionary with keys: + - `should_skip`: True iff `updates` contains an inf or a NaN. + - `num_not_finite`: total number of inf and NaN found in `updates`. + """ + del gradient_step, params + all_is_finite = [jnp.sum(jnp.logical_not(jnp.isfinite(p))) + for p in jax.tree_util.tree_leaves(updates)] + num_not_finite = jnp.sum(jnp.array(all_is_finite)) + should_skip = num_not_finite > 0 + return should_skip, dict(should_skip=should_skip, + num_not_finite=num_not_finite) + + +def skip_large_updates(updates: base.Updates, + gradient_step: Array, + params: Optional[base.Params], + max_squared_norm: float) -> Tuple[Array, chex.ArrayTree]: + """Returns True if the global norm square of `updates` is small enough. + + Args: + updates: see `ShouldSkipUpdateFunction`. + gradient_step: see `ShouldSkipUpdateFunction`. + params: see `ShouldSkipUpdateFunction`. + max_squared_norm: only updates with a norm square strictly less than this + value will be accepted. + + Returns: + A tuple: + * First element is a scalar array of type bool. + * Second element is a dictionary with keys: + - `should_skip`: True iff square norm of `updates` is larger or equal than + `max_squared_norm`. + - `norm_squared`: overall norm square of the `updates`. + """ + del gradient_step, params + norm_sq = jnp.sum( + jnp.array([jnp.sum(p**2) for p in jax.tree_util.tree_leaves(updates)])) + # This will also return True if `norm_sq` is NaN. + should_skip = jnp.logical_not(norm_sq < max_squared_norm) + return should_skip, dict(should_skip=should_skip, norm_squared=norm_sq) + + +class MultiSteps: + """An optimizer wrapper to accumulate gradients over multiple steps. + + This wrapper collects together the updates passed to its `update` function + over consecutive steps until a given number of scheduled steps is reached. + In each of these intermediate steps, the returned value from the optimizer is + a tree of zeros of the same shape of the updates passed as input. + + Once the scheduled number of intermediate 'mini-steps' has been reached, the + gradients accumulated to the current time will be passed to the wrapped + optimizer's update function, (with the inner optimizer's state being updated + appropriately) and then returned to the caller. The wrapper's accumulated + gradients are then set back to zero and the process starts again. + + The number of mini-steps per gradient update is controlled by a function, and + it can vary over training. This offers a means of varying batch size over + training. + """ + + def __init__( + self, + opt: base.GradientTransformation, + every_k_schedule: Union[int, Callable[[Array], Array]], + use_grad_mean: bool = True, + should_skip_update_fn: Optional[ShouldSkipUpdateFunction] = None): + """Initialiser. + + Args: + opt: the wrapped optimizer. + every_k_schedule: an int or f a function. + * As a function, it returns how many mini-steps should be accumulated + in a single gradient step. Its only argument is the current + gradient step count. By varying the returned value, users can vary the + overall training batch size. + * If an `int`, this is the constant number of mini-steps per gradient + update. + use_grad_mean: if `True` (the default), gradients accumulated over + multiple mini-steps are averaged. Otherwise, they are summed. + should_skip_update_fn: if provided, this function is used to decide when + to accept or reject the updates from a mini-step. When a mini-step is + rejected, the inner state of `MultiSteps` is not updated. In other + words, it is as if this mini-step never happened. For example: + * to ignore updates containing inf or NaN, do + `should_skip_update_fn=skip_not_finite`; + * to ignore updates with a norm square larger then 42, do + `should_skip_update_fn=functools.partial(skip_large_updates, + max_norm_sq=42.)`. + Note that the optimizer's state `MultiStepsState` contains a field + `skip_state` in which debugging and monitoring information returned + by `should_skip_update_fn` is written. + """ + self._opt = opt + if isinstance(every_k_schedule, int): + self._every_k_schedule = lambda step: every_k_schedule + else: + self._every_k_schedule = every_k_schedule + self._use_grad_mean = use_grad_mean + + if self._use_grad_mean: + # Use Welford algorithm for numerically stable aggregation of mean. + self._acc_update = ( + lambda grad, acc, *, n_acc: acc + (grad - acc) / (n_acc + 1)) + else: + self._acc_update = lambda grad, acc, *, n_acc: grad + acc + + if should_skip_update_fn is None: + + def should_skip_update_fn(*unused_args, **unused_kwargs): + return jnp.array(False, dtype=jnp.bool_), () + + self._should_skip_update_fn = should_skip_update_fn + + @property + def inner_opt(self): + return self._opt + + def init(self, params: Any) -> MultiStepsState: + """Builds and returns initial `MultiStepsState`.""" + updates = _zeros_tree_like(params) + gradient_step = jnp.zeros([], dtype=jnp.int32) + _, skip_state = self._should_skip_update_fn(updates, gradient_step, params) + init_state = MultiStepsState( + mini_step=jnp.zeros([], dtype=jnp.int32), + gradient_step=gradient_step, + inner_opt_state=self._opt.init(params), + acc_grads=updates, + skip_state=skip_state) + return init_state + + def update(self, + updates: base.Updates, + state: MultiStepsState, + params: Optional[base.Params] = None + ) -> Tuple[base.Updates, MultiStepsState]: + """Accumulates gradients and proposes non-zero updates every `k_steps`.""" + k_steps = self._every_k_schedule(state.gradient_step) + acc_grads = jax.tree_util.tree_map( + functools.partial(self._acc_update, n_acc=state.mini_step), + updates, state.acc_grads) + + should_skip_update, skip_state = self._should_skip_update_fn( + updates, state.gradient_step, params) + + def final_step(args): + del args + final_updates, new_inner_state = self._opt.update( + acc_grads, state.inner_opt_state, params=params) + new_state = MultiStepsState( + mini_step=jnp.zeros([], dtype=jnp.int32), + gradient_step=numerics.safe_int32_increment(state.gradient_step), + inner_opt_state=new_inner_state, + acc_grads=_zeros_tree_like(acc_grads), + skip_state=skip_state) + return final_updates, new_state + + def mid_step(args): + del args + updates_shape_dtype, _ = jax.eval_shape( + self._opt.update, acc_grads, state.inner_opt_state, params=params) + mid_updates = jax.tree_util.tree_map( + lambda sd: jnp.zeros(sd.shape, sd.dtype), updates_shape_dtype) + new_state = MultiStepsState( + mini_step=numerics.safe_int32_increment(state.mini_step), + gradient_step=state.gradient_step, + inner_opt_state=state.inner_opt_state, + acc_grads=acc_grads, + skip_state=skip_state) + return mid_updates, new_state + + new_updates, new_state = jax.lax.cond( + state.mini_step < k_steps - 1, (), mid_step, (), final_step) + + if (should_skip_update.dtype, should_skip_update.shape) != (jnp.bool_, ()): + raise ValueError( + 'The `should_skip_update_fn` function should return a boolean scalar ' + f'array, but it returned an array of dtype {should_skip_update.dtype}' + f' and shape {should_skip_update.shape}') + + multi_state_when_skip = MultiStepsState( + mini_step=state.mini_step, + gradient_step=state.gradient_step, + inner_opt_state=state.inner_opt_state, + acc_grads=state.acc_grads, + skip_state=skip_state) + zero_updates = jax.tree_util.tree_map(jnp.zeros_like, updates) + new_updates, new_state = jax.lax.cond( + should_skip_update, + (), lambda args: (zero_updates, multi_state_when_skip), + (), lambda args: (new_updates, new_state)) + + return new_updates, new_state + + def has_updated(self, state: MultiStepsState) -> Array: + return jnp.logical_and(state.mini_step == 0, state.gradient_step > 0) + + def gradient_transformation(self) -> base.GradientTransformation: + return base.GradientTransformation(init=self.init, update=self.update) + + +class MaskedState(NamedTuple): + """Maintains inner transform state for masked transformations.""" + inner_state: Any + + +class MaskedNode(NamedTuple): + """A node used to mask out unspecified parts of a tree. + + This node is ignored when mapping functions across the tree e.g. using + `jax.tree_util.tree_map` since it is a container without children. It can + therefore be used to mask out parts of a tree. + """ + + +def masked( + inner: base.GradientTransformation, + mask: Union[base.PyTree, Callable[[base.Params], base.PyTree]] +) -> base.GradientTransformation: + """Mask updates so only some are transformed, the rest are passed through. + + For example, it is common to skip weight decay for BatchNorm scale and all + bias parameters. In many networks, these are the only parameters with only + one dimension. So, you may create a mask function to mask these out as + follows:: + + mask_fn = lambda p: jax.tree_util.tree_map(lambda x: x.ndim != 1, p) + weight_decay = optax.masked(optax.add_decayed_weights(0.001), mask_fn) + + You may alternatively create the mask pytree upfront:: + + mask = jax.tree_util.tree_map(lambda x: x.ndim != 1, params) + weight_decay = optax.masked(optax.add_decayed_weights(0.001), mask) + + For the ``inner`` transform, state will only be stored for the parameters that + have a mask value of ``True``. + + Args: + inner: Inner transformation to mask. + mask: a PyTree with same structure as (or a prefix of) the params PyTree, or + a Callable that returns such a pytree given the params/updates. The leaves + should be booleans, ``True`` for leaves/subtrees you want to apply the + transformation to, and ``False`` for those you want to skip. The mask must + be static for the gradient transformation to be jit-compilable. + + Returns: + New GradientTransformation wrapping ``inner``. + """ + def mask_pytree(pytree, mask_tree): + return tree_map(lambda m, p: p if m else MaskedNode(), mask_tree, pytree) + + def init_fn(params): + mask_tree = mask(params) if callable(mask) else mask + masked_params = mask_pytree(params, mask_tree) + return MaskedState(inner_state=inner.init(masked_params)) + + def update_fn(updates, state, params=None): + mask_tree = mask(updates) if callable(mask) else mask + masked_updates = mask_pytree(updates, mask_tree) + masked_params = None if params is None else mask_pytree(params, mask_tree) + + new_masked_updates, new_inner_state = inner.update( + masked_updates, state.inner_state, masked_params) + + new_updates = tree_map( + lambda m, new_u, old_u: new_u if m else old_u, + mask_tree, new_masked_updates, updates) + return new_updates, MaskedState(inner_state=new_inner_state) + + return base.GradientTransformation(init_fn, update_fn) + + +class MaybeUpdateState(NamedTuple): + """Maintains inner transform state and adds a step counter.""" + inner_state: Any + step: Array + + +def maybe_update( + inner: base.GradientTransformation, + should_update_fn: Callable[[Array], Array] +) -> base.GradientTransformation: + """Calls the inner update function only at certain steps. + + Creates a transformation wrapper which counts the number of times the `update` + function has been called. This counter is passed to the `should_update_fn` to + decide when to call the inner update function. + + When not calling the inner update function, the `updates` and the inner state + are left untouched and just passed through. The step counter is increased + regardless. + + Args: + inner: the inner transformation. + should_update_fn: this function takes in a step counter (array of shape [] + and dtype int32), and returns a boolean array of shape []. + + Returns: + An `optax.GradientTransformation`. + """ + + def init_fn(params): + return MaybeUpdateState( + inner_state=inner.init(params), step=jnp.zeros([], dtype=jnp.int32)) + + def update_fn(updates, state, params=None): + + def do_update(_): + return inner.update(updates, state.inner_state, params) + + def reject_update(_): + return updates, state.inner_state + + updates, new_inner_state = lax.cond( + should_update_fn(state.step), do_update, reject_update, operand=None) + return updates, MaybeUpdateState(new_inner_state, + numerics.safe_int32_increment(state.step)) + + return base.GradientTransformation(init_fn, update_fn) diff --git a/lib/python3.10/site-packages/optax/experimental/__init__.py b/lib/python3.10/site-packages/optax/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f2c41abf651418d84ea77ee99aeab56710a5a747 --- /dev/null +++ b/lib/python3.10/site-packages/optax/experimental/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental features in Optax. + +Features may be removed or modified at any time. +""" + +from optax._src.experimental.complex_valued import split_real_and_imaginary +from optax._src.experimental.complex_valued import SplitRealAndImaginaryState +from optax._src.experimental.extra_args import GradientTransformationWithExtraArgs +from optax._src.experimental.extra_args import named_chain diff --git a/lib/python3.10/site-packages/optax/optax_test.py b/lib/python3.10/site-packages/optax/optax_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1627a6af99724ef77e955cc6cad8d59d1f2669bd --- /dev/null +++ b/lib/python3.10/site-packages/optax/optax_test.py @@ -0,0 +1,29 @@ +# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for optax.""" + +from absl.testing import absltest +import optax + + +class OptaxTest(absltest.TestCase): + """Test optax can be imported correctly.""" + + def test_import(self): + self.assertTrue(hasattr(optax, 'GradientTransformation')) + + +if __name__ == '__main__': + absltest.main() diff --git a/lib/python3.10/site-packages/pasta/__init__.py b/lib/python3.10/site-packages/pasta/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2b07805a439bab8c51aa78f093962e7b80071de1 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/__init__.py @@ -0,0 +1,30 @@ +# coding=utf-8 +"""Pasta enables AST-based transformations on python source code.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pasta.base import annotate +from pasta.base import ast_utils +from pasta.base import codegen + + +def parse(src): + t = ast_utils.parse(src) + annotator = annotate.AstAnnotator(src) + annotator.visit(t) + return t + + +def dump(tree): + return codegen.to_str(tree) diff --git a/lib/python3.10/site-packages/pasta/augment/__init__.py b/lib/python3.10/site-packages/pasta/augment/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/pasta/augment/errors.py b/lib/python3.10/site-packages/pasta/augment/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..bb2fb265069bfb9b5bf922601b4b4d4affacec46 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/augment/errors.py @@ -0,0 +1,23 @@ +# coding=utf-8 +"""Errors that can occur during augmentation.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +class InvalidAstError(Exception): + """Occurs when the syntax tree does not meet some expected condition.""" diff --git a/lib/python3.10/site-packages/pasta/augment/import_utils.py b/lib/python3.10/site-packages/pasta/augment/import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2cda69cadc0c2f9afa140282aff3e3506c6ef2eb --- /dev/null +++ b/lib/python3.10/site-packages/pasta/augment/import_utils.py @@ -0,0 +1,217 @@ +# coding=utf-8 +"""Functions for dealing with import statements.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import copy +import logging + +from pasta.augment import errors +from pasta.base import ast_utils +from pasta.base import scope + + +def add_import(tree, name_to_import, asname=None, from_import=True, merge_from_imports=True): + """Adds an import to the module. + + This function will try to ensure not to create duplicate imports. If name_to_import is + already imported, it will return the existing import. This is true even if asname is set + (asname will be ignored, and the existing name will be returned). + + If the import would create a name that already exists in the scope given by tree, this + function will "import as", and append "_x" to the asname where x is the smallest positive + integer generating a unique name. + + Arguments: + tree: (ast.Module) Module AST to modify. + name_to_import: (string) The absolute name to import. + asname: (string) The alias for the import ("import name_to_import as asname") + from_import: (boolean) If True, import the name using an ImportFrom node. + merge_from_imports: (boolean) If True, merge a newly inserted ImportFrom + node into an existing ImportFrom node, if applicable. + + Returns: + The name (as a string) that can be used to reference the imported name. This + can be the fully-qualified name, the basename, or an alias name. + """ + sc = scope.analyze(tree) + + # Don't add anything if it's already imported + if name_to_import in sc.external_references: + existing_ref = next((ref for ref in sc.external_references[name_to_import] + if ref.name_ref is not None), None) + if existing_ref: + return existing_ref.name_ref.id + + import_node = None + added_name = None + + def make_safe_alias_node(alias_name, asname): + # Try to avoid name conflicts + new_alias = ast.alias(name=alias_name, asname=asname) + imported_name = asname or alias_name + counter = 0 + while imported_name in sc.names: + counter += 1 + imported_name = new_alias.asname = '%s_%d' % (asname or alias_name, + counter) + return new_alias + + # Add an ImportFrom node if requested and possible + if from_import and '.' in name_to_import: + from_module, alias_name = name_to_import.rsplit('.', 1) + + new_alias = make_safe_alias_node(alias_name, asname) + + if merge_from_imports: + # Try to add to an existing ImportFrom from the same module + existing_from_import = next( + (node for node in tree.body if isinstance(node, ast.ImportFrom) + and node.module == from_module and node.level == 0), None) + if existing_from_import: + existing_from_import.names.append(new_alias) + return new_alias.asname or new_alias.name + + # Create a new node for this import + import_node = ast.ImportFrom(module=from_module, names=[new_alias], level=0) + + # If not already created as an ImportFrom, create a normal Import node + if not import_node: + new_alias = make_safe_alias_node(alias_name=name_to_import, asname=asname) + import_node = ast.Import( + names=[new_alias]) + + # Insert the node at the top of the module and return the name in scope + tree.body.insert(1 if ast_utils.has_docstring(tree) else 0, import_node) + return new_alias.asname or new_alias.name + + +def split_import(sc, node, alias_to_remove): + """Split an import node by moving the given imported alias into a new import. + + Arguments: + sc: (scope.Scope) Scope computed on whole tree of the code being modified. + node: (ast.Import|ast.ImportFrom) An import node to split. + alias_to_remove: (ast.alias) The import alias node to remove. This must be a + child of the given `node` argument. + + Raises: + errors.InvalidAstError: if `node` is not appropriately contained in the tree + represented by the scope `sc`. + """ + parent = sc.parent(node) + parent_list = None + for a in ('body', 'orelse', 'finalbody'): + if hasattr(parent, a) and node in getattr(parent, a): + parent_list = getattr(parent, a) + break + else: + raise errors.InvalidAstError('Unable to find list containing import %r on ' + 'parent node %r' % (node, parent)) + + idx = parent_list.index(node) + new_import = copy.deepcopy(node) + new_import.names = [alias_to_remove] + node.names.remove(alias_to_remove) + + parent_list.insert(idx + 1, new_import) + return new_import + + +def get_unused_import_aliases(tree, sc=None): + """Get the import aliases that aren't used. + + Arguments: + tree: (ast.AST) An ast to find imports in. + sc: A scope.Scope representing tree (generated from scratch if not + provided). + + Returns: + A list of ast.alias representing imported aliases that aren't referenced in + the given tree. + """ + if sc is None: + sc = scope.analyze(tree) + unused_aliases = set() + for node in ast.walk(tree): + if isinstance(node, ast.alias): + str_name = node.asname if node.asname is not None else node.name + if str_name in sc.names: + name = sc.names[str_name] + if not name.reads: + unused_aliases.add(node) + else: + # This happens because of https://github.com/google/pasta/issues/32 + logging.warning('Imported name %s not found in scope (perhaps it\'s ' + 'imported dynamically)', str_name) + + return unused_aliases + + +def remove_import_alias_node(sc, node): + """Remove an alias and if applicable remove their entire import. + + Arguments: + sc: (scope.Scope) Scope computed on whole tree of the code being modified. + node: (ast.Import|ast.ImportFrom|ast.alias) The node to remove. + """ + import_node = sc.parent(node) + if len(import_node.names) == 1: + import_parent = sc.parent(import_node) + ast_utils.remove_child(import_parent, import_node) + else: + ast_utils.remove_child(import_node, node) + + +def remove_duplicates(tree, sc=None): + """Remove duplicate imports, where it is safe to do so. + + This does NOT remove imports that create new aliases + + Arguments: + tree: (ast.AST) An ast to modify imports in. + sc: A scope.Scope representing tree (generated from scratch if not + provided). + + Returns: + Whether any changes were made. + """ + if sc is None: + sc = scope.analyze(tree) + + modified = False + seen_names = set() + for node in tree.body: + if isinstance(node, (ast.Import, ast.ImportFrom)): + for alias in list(node.names): + import_node = sc.parent(alias) + if isinstance(import_node, ast.Import): + full_name = alias.name + elif import_node.module: + full_name = '%s%s.%s' % ('.' * import_node.level, + import_node.module, alias.name) + else: + full_name = '%s%s' % ('.' * import_node.level, alias.name) + full_name += ':' + (alias.asname or alias.name) + if full_name in seen_names: + remove_import_alias_node(sc, alias) + modified = True + else: + seen_names.add(full_name) + return modified diff --git a/lib/python3.10/site-packages/pasta/augment/import_utils_test.py b/lib/python3.10/site-packages/pasta/augment/import_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a1e626e72a68cff26b81123c4c873c872aabdae2 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/augment/import_utils_test.py @@ -0,0 +1,428 @@ +# coding=utf-8 +"""Tests for import_utils.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import traceback +import unittest + +import pasta +from pasta.augment import import_utils +from pasta.base import ast_utils +from pasta.base import test_utils +from pasta.base import scope + + +class SplitImportTest(test_utils.TestCase): + + def test_split_normal_import(self): + src = 'import aaa, bbb, ccc\n' + t = ast.parse(src) + import_node = t.body[0] + sc = scope.analyze(t) + import_utils.split_import(sc, import_node, import_node.names[1]) + + self.assertEqual(2, len(t.body)) + self.assertEqual(ast.Import, type(t.body[1])) + self.assertEqual([alias.name for alias in t.body[0].names], ['aaa', 'ccc']) + self.assertEqual([alias.name for alias in t.body[1].names], ['bbb']) + + def test_split_from_import(self): + src = 'from aaa import bbb, ccc, ddd\n' + t = ast.parse(src) + import_node = t.body[0] + sc = scope.analyze(t) + import_utils.split_import(sc, import_node, import_node.names[1]) + + self.assertEqual(2, len(t.body)) + self.assertEqual(ast.ImportFrom, type(t.body[1])) + self.assertEqual(t.body[0].module, 'aaa') + self.assertEqual(t.body[1].module, 'aaa') + self.assertEqual([alias.name for alias in t.body[0].names], ['bbb', 'ddd']) + + def test_split_imports_with_alias(self): + src = 'import aaa as a, bbb as b, ccc as c\n' + t = ast.parse(src) + import_node = t.body[0] + sc = scope.analyze(t) + import_utils.split_import(sc, import_node, import_node.names[1]) + + self.assertEqual(2, len(t.body)) + self.assertEqual([alias.name for alias in t.body[0].names], ['aaa', 'ccc']) + self.assertEqual([alias.name for alias in t.body[1].names], ['bbb']) + self.assertEqual(t.body[1].names[0].asname, 'b') + + def test_split_imports_multiple(self): + src = 'import aaa, bbb, ccc\n' + t = ast.parse(src) + import_node = t.body[0] + alias_bbb = import_node.names[1] + alias_ccc = import_node.names[2] + sc = scope.analyze(t) + import_utils.split_import(sc, import_node, alias_bbb) + import_utils.split_import(sc, import_node, alias_ccc) + + self.assertEqual(3, len(t.body)) + self.assertEqual([alias.name for alias in t.body[0].names], ['aaa']) + self.assertEqual([alias.name for alias in t.body[1].names], ['ccc']) + self.assertEqual([alias.name for alias in t.body[2].names], ['bbb']) + + def test_split_nested_imports(self): + test_cases = ( + 'def foo():\n {import_stmt}\n', + 'class Foo(object):\n {import_stmt}\n', + 'if foo:\n {import_stmt}\nelse:\n pass\n', + 'if foo:\n pass\nelse:\n {import_stmt}\n', + 'if foo:\n pass\nelif bar:\n {import_stmt}\n', + 'try:\n {import_stmt}\nexcept:\n pass\n', + 'try:\n pass\nexcept:\n {import_stmt}\n', + 'try:\n pass\nfinally:\n {import_stmt}\n', + 'for i in foo:\n {import_stmt}\n', + 'for i in foo:\n pass\nelse:\n {import_stmt}\n', + 'while foo:\n {import_stmt}\n', + ) + + for template in test_cases: + try: + src = template.format(import_stmt='import aaa, bbb, ccc') + t = ast.parse(src) + sc = scope.analyze(t) + import_node = ast_utils.find_nodes_by_type(t, ast.Import)[0] + import_utils.split_import(sc, import_node, import_node.names[1]) + + split_import_nodes = ast_utils.find_nodes_by_type(t, ast.Import) + self.assertEqual(1, len(t.body)) + self.assertEqual(2, len(split_import_nodes)) + self.assertEqual([alias.name for alias in split_import_nodes[0].names], + ['aaa', 'ccc']) + self.assertEqual([alias.name for alias in split_import_nodes[1].names], + ['bbb']) + except: + self.fail('Failed while executing case:\n%s\nCaused by:\n%s' % + (src, traceback.format_exc())) + +class GetUnusedImportsTest(test_utils.TestCase): + + def test_normal_imports(self): + src = """\ +import a +import b +a.foo() +""" + tree = ast.parse(src) + self.assertItemsEqual(import_utils.get_unused_import_aliases(tree), + [tree.body[1].names[0]]) + + def test_import_from(self): + src = """\ +from my_module import a +import b +from my_module import c +b.foo() +c.bar() +""" + tree = ast.parse(src) + self.assertItemsEqual(import_utils.get_unused_import_aliases(tree), + [tree.body[0].names[0]]) + + def test_import_from_alias(self): + src = """\ +from my_module import a, b +b.foo() +""" + tree = ast.parse(src) + self.assertItemsEqual(import_utils.get_unused_import_aliases(tree), + [tree.body[0].names[0]]) + + def test_import_asname(self): + src = """\ +from my_module import a as a_mod, b as unused_b_mod +import c as c_mod, d as unused_d_mod +a_mod.foo() +c_mod.foo() +""" + tree = ast.parse(src) + self.assertItemsEqual(import_utils.get_unused_import_aliases(tree), + [tree.body[0].names[1], + tree.body[1].names[1]]) + + def test_dynamic_import(self): + # For now we just don't want to error out on these, longer + # term we want to do the right thing (see + # https://github.com/google/pasta/issues/32) + src = """\ +def foo(): + import bar +""" + tree = ast.parse(src) + self.assertItemsEqual(import_utils.get_unused_import_aliases(tree), + []) + + + +class RemoveImportTest(test_utils.TestCase): + # Note that we don't test any 'asname' examples but as far as remove_import_alias_node + # is concerned its not a different case because its still just an alias type + # and we don't care about the internals of the alias we're trying to remove. + def test_remove_just_alias(self): + src = "import a, b" + tree = ast.parse(src) + sc = scope.analyze(tree) + + unused_b_node = tree.body[0].names[1] + + import_utils.remove_import_alias_node(sc, unused_b_node) + + self.assertEqual(len(tree.body), 1) + self.assertEqual(type(tree.body[0]), ast.Import) + self.assertEqual(len(tree.body[0].names), 1) + self.assertEqual(tree.body[0].names[0].name, 'a') + + def test_remove_just_alias_import_from(self): + src = "from m import a, b" + tree = ast.parse(src) + sc = scope.analyze(tree) + + unused_b_node = tree.body[0].names[1] + + import_utils.remove_import_alias_node(sc, unused_b_node) + + self.assertEqual(len(tree.body), 1) + self.assertEqual(type(tree.body[0]), ast.ImportFrom) + self.assertEqual(len(tree.body[0].names), 1) + self.assertEqual(tree.body[0].names[0].name, 'a') + + def test_remove_full_import(self): + src = "import a" + tree = ast.parse(src) + sc = scope.analyze(tree) + + a_node = tree.body[0].names[0] + + import_utils.remove_import_alias_node(sc, a_node) + + self.assertEqual(len(tree.body), 0) + + def test_remove_full_importfrom(self): + src = "from m import a" + tree = ast.parse(src) + sc = scope.analyze(tree) + + a_node = tree.body[0].names[0] + + import_utils.remove_import_alias_node(sc, a_node) + + self.assertEqual(len(tree.body), 0) + + +class AddImportTest(test_utils.TestCase): + + def test_add_normal_import(self): + tree = ast.parse('') + self.assertEqual('a.b.c', + import_utils.add_import(tree, 'a.b.c', from_import=False)) + self.assertEqual('import a.b.c\n', pasta.dump(tree)) + + def test_add_normal_import_with_asname(self): + tree = ast.parse('') + self.assertEqual( + 'd', + import_utils.add_import(tree, 'a.b.c', asname='d', from_import=False) + ) + self.assertEqual('import a.b.c as d\n', pasta.dump(tree)) + + def test_add_from_import(self): + tree = ast.parse('') + self.assertEqual('c', + import_utils.add_import(tree, 'a.b.c', from_import=True)) + self.assertEqual('from a.b import c\n', pasta.dump(tree)) + + def test_add_from_import_with_asname(self): + tree = ast.parse('') + self.assertEqual( + 'd', + import_utils.add_import(tree, 'a.b.c', asname='d', from_import=True) + ) + self.assertEqual('from a.b import c as d\n', pasta.dump(tree)) + + def test_add_single_name_from_import(self): + tree = ast.parse('') + self.assertEqual('foo', + import_utils.add_import(tree, 'foo', from_import=True)) + self.assertEqual('import foo\n', pasta.dump(tree)) + + def test_add_single_name_from_import_with_asname(self): + tree = ast.parse('') + self.assertEqual( + 'bar', + import_utils.add_import(tree, 'foo', asname='bar', from_import=True) + ) + self.assertEqual('import foo as bar\n', pasta.dump(tree)) + + def test_add_existing_import(self): + tree = ast.parse('from a.b import c') + self.assertEqual('c', import_utils.add_import(tree, 'a.b.c')) + self.assertEqual('from a.b import c\n', pasta.dump(tree)) + + def test_add_existing_import_aliased(self): + tree = ast.parse('from a.b import c as d') + self.assertEqual('d', import_utils.add_import(tree, 'a.b.c')) + self.assertEqual('from a.b import c as d\n', pasta.dump(tree)) + + def test_add_existing_import_aliased_with_asname(self): + tree = ast.parse('from a.b import c as d') + self.assertEqual('d', import_utils.add_import(tree, 'a.b.c', asname='e')) + self.assertEqual('from a.b import c as d\n', pasta.dump(tree)) + + def test_add_existing_import_normal_import(self): + tree = ast.parse('import a.b.c') + self.assertEqual('a.b', + import_utils.add_import(tree, 'a.b', from_import=False)) + self.assertEqual('import a.b.c\n', pasta.dump(tree)) + + def test_add_existing_import_normal_import_aliased(self): + tree = ast.parse('import a.b.c as d') + self.assertEqual('a.b', + import_utils.add_import(tree, 'a.b', from_import=False)) + self.assertEqual('d', + import_utils.add_import(tree, 'a.b.c', from_import=False)) + self.assertEqual('import a.b\nimport a.b.c as d\n', pasta.dump(tree)) + + def test_add_import_with_conflict(self): + tree = ast.parse('def c(): pass\n') + self.assertEqual('c_1', + import_utils.add_import(tree, 'a.b.c', from_import=True)) + self.assertEqual( + 'from a.b import c as c_1\ndef c():\n pass\n', pasta.dump(tree)) + + def test_add_import_with_asname_with_conflict(self): + tree = ast.parse('def c(): pass\n') + self.assertEqual('c_1', + import_utils.add_import(tree, 'a.b', asname='c', from_import=True)) + self.assertEqual( + 'from a import b as c_1\ndef c():\n pass\n', pasta.dump(tree)) + + def test_merge_from_import(self): + tree = ast.parse('from a.b import c') + + # x is explicitly not merged + self.assertEqual('x', import_utils.add_import(tree, 'a.b.x', + merge_from_imports=False)) + self.assertEqual('from a.b import x\nfrom a.b import c\n', + pasta.dump(tree)) + + # y is allowed to be merged and is grouped into the first matching import + self.assertEqual('y', import_utils.add_import(tree, 'a.b.y', + merge_from_imports=True)) + self.assertEqual('from a.b import x, y\nfrom a.b import c\n', + pasta.dump(tree)) + + def test_add_import_after_docstring(self): + tree = ast.parse('\'Docstring.\'') + self.assertEqual('a', import_utils.add_import(tree, 'a')) + self.assertEqual('\'Docstring.\'\nimport a\n', pasta.dump(tree)) + + +class RemoveDuplicatesTest(test_utils.TestCase): + def test_remove_duplicates(self): + src = """ +import a +import b +import c +import b +import d +""" + tree = ast.parse(src) + self.assertTrue(import_utils.remove_duplicates(tree)) + + self.assertEqual(len(tree.body), 4) + self.assertEqual(tree.body[0].names[0].name, 'a') + self.assertEqual(tree.body[1].names[0].name, 'b') + self.assertEqual(tree.body[2].names[0].name, 'c') + self.assertEqual(tree.body[3].names[0].name, 'd') + + def test_remove_duplicates_multiple(self): + src = """ +import a, b +import b, c +import d, a, e, f +""" + tree = ast.parse(src) + self.assertTrue(import_utils.remove_duplicates(tree)) + + self.assertEqual(len(tree.body), 3) + self.assertEqual(len(tree.body[0].names), 2) + self.assertEqual(tree.body[0].names[0].name, 'a') + self.assertEqual(tree.body[0].names[1].name, 'b') + self.assertEqual(len(tree.body[1].names), 1) + self.assertEqual(tree.body[1].names[0].name, 'c') + self.assertEqual(len(tree.body[2].names), 3) + self.assertEqual(tree.body[2].names[0].name, 'd') + self.assertEqual(tree.body[2].names[1].name, 'e') + self.assertEqual(tree.body[2].names[2].name, 'f') + + def test_remove_duplicates_empty_node(self): + src = """ +import a, b, c +import b, c +""" + tree = ast.parse(src) + self.assertTrue(import_utils.remove_duplicates(tree)) + + self.assertEqual(len(tree.body), 1) + self.assertEqual(len(tree.body[0].names), 3) + self.assertEqual(tree.body[0].names[0].name, 'a') + self.assertEqual(tree.body[0].names[1].name, 'b') + self.assertEqual(tree.body[0].names[2].name, 'c') + + def test_remove_duplicates_normal_and_from(self): + src = """ +import a.b +from a import b +""" + tree = ast.parse(src) + self.assertFalse(import_utils.remove_duplicates(tree)) + self.assertEqual(len(tree.body), 2) + + def test_remove_duplicates_aliases(self): + src = """ +import a +import a as ax +import a as ax2 +import a as ax +""" + tree = ast.parse(src) + self.assertTrue(import_utils.remove_duplicates(tree)) + self.assertEqual(len(tree.body), 3) + self.assertEqual(tree.body[0].names[0].asname, None) + self.assertEqual(tree.body[1].names[0].asname, 'ax') + self.assertEqual(tree.body[2].names[0].asname, 'ax2') + + +def suite(): + result = unittest.TestSuite() + result.addTests(unittest.makeSuite(SplitImportTest)) + result.addTests(unittest.makeSuite(GetUnusedImportsTest)) + result.addTests(unittest.makeSuite(RemoveImportTest)) + result.addTests(unittest.makeSuite(AddImportTest)) + result.addTests(unittest.makeSuite(RemoveDuplicatesTest)) + return result + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/pasta/augment/inline.py b/lib/python3.10/site-packages/pasta/augment/inline.py new file mode 100644 index 0000000000000000000000000000000000000000..b810199b168ed1c79104d9f3c97ddc20f17b0ad9 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/augment/inline.py @@ -0,0 +1,65 @@ +# coding=utf-8 +"""Inline constants in a python module.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import copy + +from pasta.base import ast_utils +from pasta.base import scope + + +class InlineError(Exception): + pass + + +def inline_name(t, name): + """Inline a constant name into a module.""" + sc = scope.analyze(t) + name_node = sc.names[name] + + # The name must be a Name node (not a FunctionDef, etc.) + if not isinstance(name_node.definition, ast.Name): + raise InlineError('%r is not a constant; it has type %r' % ( + name, type(name_node.definition))) + + assign_node = sc.parent(name_node.definition) + if not isinstance(assign_node, ast.Assign): + raise InlineError('%r is not declared in an assignment' % name) + + value = assign_node.value + if not isinstance(sc.parent(assign_node), ast.Module): + raise InlineError('%r is not a top-level name' % name) + + # If the name is written anywhere else in this module, it is not constant + for ref in name_node.reads: + if isinstance(getattr(ref, 'ctx', None), ast.Store): + raise InlineError('%r is not a constant' % name) + + # Replace all reads of the name with a copy of its value + for ref in name_node.reads: + ast_utils.replace_child(sc.parent(ref), ref, copy.deepcopy(value)) + + # Remove the assignment to this name + if len(assign_node.targets) == 1: + ast_utils.remove_child(sc.parent(assign_node), assign_node) + else: + tgt_list = [tgt for tgt in assign_node.targets + if not (isinstance(tgt, ast.Name) and tgt.id == name)] + assign_node.targets = tgt_list diff --git a/lib/python3.10/site-packages/pasta/augment/inline_test.py b/lib/python3.10/site-packages/pasta/augment/inline_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cd71c7161288d3aebcc38a03bbbf8a265619450a --- /dev/null +++ b/lib/python3.10/site-packages/pasta/augment/inline_test.py @@ -0,0 +1,97 @@ +# coding=utf-8 +"""Tests for augment.inline.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import textwrap +import unittest + +from pasta.augment import inline +from pasta.base import test_utils + + +class InlineTest(test_utils.TestCase): + + def test_inline_simple(self): + src = 'x = 1\na = x\n' + t = ast.parse(src) + inline.inline_name(t, 'x') + self.checkAstsEqual(t, ast.parse('a = 1\n')) + + def test_inline_multiple_targets(self): + src = 'x = y = z = 1\na = x + y\n' + t = ast.parse(src) + inline.inline_name(t, 'y') + self.checkAstsEqual(t, ast.parse('x = z = 1\na = x + 1\n')) + + def test_inline_multiple_reads(self): + src = textwrap.dedent('''\ + CONSTANT = "foo" + def a(b=CONSTANT): + return b == CONSTANT + ''') + expected = textwrap.dedent('''\ + def a(b="foo"): + return b == "foo" + ''') + t = ast.parse(src) + inline.inline_name(t, 'CONSTANT') + self.checkAstsEqual(t, ast.parse(expected)) + + def test_inline_non_constant_fails(self): + src = textwrap.dedent('''\ + NOT_A_CONSTANT = "foo" + NOT_A_CONSTANT += "bar" + ''') + t = ast.parse(src) + with self.assertRaisesRegexp(inline.InlineError, + '\'NOT_A_CONSTANT\' is not a constant'): + inline.inline_name(t, 'NOT_A_CONSTANT') + + def test_inline_function_fails(self): + src = 'def func(): pass\nfunc()\n' + t = ast.parse(src) + + with self.assertRaisesRegexp( + inline.InlineError, + '\'func\' is not a constant; it has type %r' % ast.FunctionDef): + inline.inline_name(t, 'func') + + def test_inline_conditional_fails(self): + src = 'if define:\n x = 1\na = x\n' + t = ast.parse(src) + with self.assertRaisesRegexp(inline.InlineError, + '\'x\' is not a top-level name'): + inline.inline_name(t, 'x') + + def test_inline_non_assign_fails(self): + src = 'CONSTANT1, CONSTANT2 = values' + t = ast.parse(src) + with self.assertRaisesRegexp( + inline.InlineError, '\'CONSTANT1\' is not declared in an assignment'): + inline.inline_name(t, 'CONSTANT1') + + +def suite(): + result = unittest.TestSuite() + result.addTests(unittest.makeSuite(InlineTest)) + return result + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/pasta/augment/rename.py b/lib/python3.10/site-packages/pasta/augment/rename.py new file mode 100644 index 0000000000000000000000000000000000000000..aead50a8b4a3fe9a08723c7106744c9c05d08ee9 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/augment/rename.py @@ -0,0 +1,154 @@ +# coding=utf-8 +"""Rename names in a python module.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import six + +from pasta.augment import import_utils +from pasta.base import ast_utils +from pasta.base import scope + + +def rename_external(t, old_name, new_name): + """Rename an imported name in a module. + + This will rewrite all import statements in `tree` that reference the old + module as well as any names in `tree` which reference the imported name. This + may introduce new import statements, but only if necessary. + + For example, to move and rename the module `foo.bar.utils` to `foo.bar_utils`: + > rename_external(tree, 'foo.bar.utils', 'foo.bar_utils') + + - import foo.bar.utils + + import foo.bar_utils + + - from foo.bar import utils + + from foo import bar_utils + + - from foo.bar import logic, utils + + from foo.bar import logic + + from foo import bar_utils + + Arguments: + t: (ast.Module) Module syntax tree to perform the rename in. This will be + updated as a result of this function call with all affected nodes changed + and potentially new Import/ImportFrom nodes added. + old_name: (string) Fully-qualified path of the name to replace. + new_name: (string) Fully-qualified path of the name to update to. + + Returns: + True if any changes were made, False otherwise. + """ + sc = scope.analyze(t) + + if old_name not in sc.external_references: + return False + + has_changed = False + renames = {} + already_changed = [] + for ref in sc.external_references[old_name]: + if isinstance(ref.node, ast.alias): + parent = sc.parent(ref.node) + # An alias may be the most specific reference to an imported name, but it + # could if it is a child of an ImportFrom, the ImportFrom node's module + # may also need to be updated. + if isinstance(parent, ast.ImportFrom) and parent not in already_changed: + assert _rename_name_in_importfrom(sc, parent, old_name, new_name) + renames[old_name.rsplit('.', 1)[-1]] = new_name.rsplit('.', 1)[-1] + already_changed.append(parent) + else: + ref.node.name = new_name + ref.node.name[len(old_name):] + if not ref.node.asname: + renames[old_name] = new_name + has_changed = True + elif isinstance(ref.node, ast.ImportFrom): + if ref.node not in already_changed: + assert _rename_name_in_importfrom(sc, ref.node, old_name, new_name) + renames[old_name.rsplit('.', 1)[-1]] = new_name.rsplit('.', 1)[-1] + already_changed.append(ref.node) + has_changed = True + + for rename_old, rename_new in six.iteritems(renames): + _rename_reads(sc, t, rename_old, rename_new) + return has_changed + + +def _rename_name_in_importfrom(sc, node, old_name, new_name): + if old_name == new_name: + return False + + module_parts = node.module.split('.') + old_parts = old_name.split('.') + new_parts = new_name.split('.') + + # If just the module is changing, rename it + if module_parts[:len(old_parts)] == old_parts: + node.module = '.'.join(new_parts + module_parts[len(old_parts):]) + return True + + # Find the alias node to be changed + for alias_to_change in node.names: + if alias_to_change.name == old_parts[-1]: + break + else: + return False + + alias_to_change.name = new_parts[-1] + + # Split the import if the package has changed + if module_parts != new_parts[:-1]: + if len(node.names) > 1: + new_import = import_utils.split_import(sc, node, alias_to_change) + new_import.module = '.'.join(new_parts[:-1]) + else: + node.module = '.'.join(new_parts[:-1]) + + return True + + +def _rename_reads(sc, t, old_name, new_name): + """Updates all locations in the module where the given name is read. + + Arguments: + sc: (scope.Scope) Scope to work in. This should be the scope of `t`. + t: (ast.AST) The AST to perform updates in. + old_name: (string) Dotted name to update. + new_name: (string) Dotted name to replace it with. + + Returns: + True if any changes were made, False otherwise. + """ + name_parts = old_name.split('.') + try: + name = sc.names[name_parts[0]] + for part in name_parts[1:]: + name = name.attrs[part] + except KeyError: + return False + + has_changed = False + for ref_node in name.reads: + if isinstance(ref_node, (ast.Name, ast.Attribute)): + ast_utils.replace_child(sc.parent(ref_node), ref_node, + ast.parse(new_name).body[0].value) + has_changed = True + + return has_changed diff --git a/lib/python3.10/site-packages/pasta/augment/rename_test.py b/lib/python3.10/site-packages/pasta/augment/rename_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5f773fe05a50ecfff2d8d64506a161914e37e287 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/augment/rename_test.py @@ -0,0 +1,119 @@ +# coding=utf-8 +"""Tests for augment.rename.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import unittest + +from pasta.augment import rename +from pasta.base import scope +from pasta.base import test_utils + + +class RenameTest(test_utils.TestCase): + + def test_rename_external_in_import(self): + src = 'import aaa.bbb.ccc\naaa.bbb.ccc.foo()' + t = ast.parse(src) + self.assertTrue(rename.rename_external(t, 'aaa.bbb', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse('import xxx.yyy.ccc\nxxx.yyy.ccc.foo()')) + + t = ast.parse(src) + self.assertTrue(rename.rename_external(t, 'aaa.bbb.ccc', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse('import xxx.yyy\nxxx.yyy.foo()')) + + t = ast.parse(src) + self.assertFalse(rename.rename_external(t, 'bbb', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse(src)) + + def test_rename_external_in_import_with_asname(self): + src = 'import aaa.bbb.ccc as ddd\nddd.foo()' + t = ast.parse(src) + self.assertTrue(rename.rename_external(t, 'aaa.bbb', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse('import xxx.yyy.ccc as ddd\nddd.foo()')) + + def test_rename_external_in_import_multiple_aliases(self): + src = 'import aaa, aaa.bbb, aaa.bbb.ccc' + t = ast.parse(src) + self.assertTrue(rename.rename_external(t, 'aaa.bbb', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse('import aaa, xxx.yyy, xxx.yyy.ccc')) + + def test_rename_external_in_importfrom(self): + src = 'from aaa.bbb.ccc import ddd\nddd.foo()' + t = ast.parse(src) + self.assertTrue(rename.rename_external(t, 'aaa.bbb', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse('from xxx.yyy.ccc import ddd\nddd.foo()')) + + t = ast.parse(src) + self.assertTrue(rename.rename_external(t, 'aaa.bbb.ccc', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse('from xxx.yyy import ddd\nddd.foo()')) + + t = ast.parse(src) + self.assertFalse(rename.rename_external(t, 'bbb', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse(src)) + + def test_rename_external_in_importfrom_alias(self): + src = 'from aaa.bbb import ccc\nccc.foo()' + t = ast.parse(src) + self.assertTrue(rename.rename_external(t, 'aaa.bbb.ccc', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse('from xxx import yyy\nyyy.foo()')) + + def test_rename_external_in_importfrom_alias_with_asname(self): + src = 'from aaa.bbb import ccc as abc\nabc.foo()' + t = ast.parse(src) + self.assertTrue(rename.rename_external(t, 'aaa.bbb.ccc', 'xxx.yyy')) + self.checkAstsEqual(t, ast.parse('from xxx import yyy as abc\nabc.foo()')) + + def test_rename_reads_name(self): + src = 'aaa.bbb()' + t = ast.parse(src) + sc = scope.analyze(t) + self.assertTrue(rename._rename_reads(sc, t, 'aaa', 'xxx')) + self.checkAstsEqual(t, ast.parse('xxx.bbb()')) + + def test_rename_reads_name_as_attribute(self): + src = 'aaa.bbb()' + t = ast.parse(src) + sc = scope.analyze(t) + rename._rename_reads(sc, t, 'aaa', 'xxx.yyy') + self.checkAstsEqual(t, ast.parse('xxx.yyy.bbb()')) + + def test_rename_reads_attribute(self): + src = 'aaa.bbb.ccc()' + t = ast.parse(src) + sc = scope.analyze(t) + rename._rename_reads(sc, t, 'aaa.bbb', 'xxx.yyy') + self.checkAstsEqual(t, ast.parse('xxx.yyy.ccc()')) + + def test_rename_reads_noop(self): + src = 'aaa.bbb.ccc()' + t = ast.parse(src) + sc = scope.analyze(t) + rename._rename_reads(sc, t, 'aaa.bbb.ccc.ddd', 'xxx.yyy') + rename._rename_reads(sc, t, 'bbb.aaa', 'xxx.yyy') + self.checkAstsEqual(t, ast.parse(src)) + + +def suite(): + result = unittest.TestSuite() + result.addTests(unittest.makeSuite(RenameTest)) + return result + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/pasta/base/__init__.py b/lib/python3.10/site-packages/pasta/base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/pasta/base/annotate.py b/lib/python3.10/site-packages/pasta/base/annotate.py new file mode 100644 index 0000000000000000000000000000000000000000..af2a55ae8c3496f2cec1b75278a2e6beb24db5de --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/annotate.py @@ -0,0 +1,1543 @@ +# coding=utf-8 +"""Annotate python syntax trees with formatting from the source file.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import ast +import contextlib +import functools +import itertools +import six +from six.moves import zip +import sys + +from pasta.base import ast_constants +from pasta.base import ast_utils +from pasta.base import formatting as fmt +from pasta.base import token_generator + + +# ============================================================================== +# == Helper functions for decorating nodes with prefix + suffix == +# ============================================================================== + +def _gen_wrapper(f, scope=True, prefix=True, suffix=True, max_suffix_lines=None, + semicolon=False, comment=False, statement=False): + @contextlib.wraps(f) + def wrapped(self, node, *args, **kwargs): + with (self.scope(node, trailing_comma=False) if scope else _noop_context()): + if prefix: + self.prefix(node, default=self._indent if statement else '') + f(self, node, *args, **kwargs) + if suffix: + self.suffix(node, max_lines=max_suffix_lines, semicolon=semicolon, + comment=comment, default='\n' if statement else '') + return wrapped + + +@contextlib.contextmanager +def _noop_context(): + yield + + +def expression(f): + """Decorates a function where the node is an expression.""" + return _gen_wrapper(f, max_suffix_lines=0) + + +def fstring_expression(f): + """Decorates a function where the node is a FormattedValue in an fstring.""" + return _gen_wrapper(f, scope=False) + + +def space_around(f): + """Decorates a function where the node has whitespace prefix and suffix.""" + return _gen_wrapper(f, scope=False) + + +def space_left(f): + """Decorates a function where the node has whitespace prefix.""" + return _gen_wrapper(f, scope=False, suffix=False) + + +def statement(f): + """Decorates a function where the node is a statement.""" + return _gen_wrapper(f, scope=False, max_suffix_lines=1, semicolon=True, + comment=True, statement=True) + + +def module(f): + """Special decorator for the module node.""" + return _gen_wrapper(f, scope=False, comment=True) + + +def block_statement(f): + """Decorates a function where the node is a statement with children.""" + @contextlib.wraps(f) + def wrapped(self, node, *args, **kwargs): + self.prefix(node, default=self._indent) + f(self, node, *args, **kwargs) + if hasattr(self, 'block_suffix'): + last_child = ast_utils.get_last_child(node) + # Workaround for ast.Module which does not have a lineno + if last_child and last_child.lineno != getattr(node, 'lineno', 0): + indent = (fmt.get(last_child, 'prefix') or '\n').splitlines()[-1] + self.block_suffix(node, indent) + else: + self.suffix(node, comment=True) + return wrapped + + +# ============================================================================== +# == NodeVisitors for annotating an AST == +# ============================================================================== + +class BaseVisitor(ast.NodeVisitor): + """Walks a syntax tree in the order it appears in code. + + This class has a dual-purpose. It is implemented (in this file) for annotating + an AST with formatting information needed to reconstruct the source code, but + it also is implemented in pasta.base.codegen to reconstruct the source code. + + Each visit method in this class specifies the order in which both child nodes + and syntax tokens appear, plus where to account for whitespace, commas, + parentheses, etc. + """ + + __metaclass__ = abc.ABCMeta + + def __init__(self): + self._stack = [] + self._indent = '' + self._indent_diff = '' + self._default_indent_diff = ' ' + + def visit(self, node): + self._stack.append(node) + super(BaseVisitor, self).visit(node) + assert node is self._stack.pop() + + def prefix(self, node, default=''): + """Account for some amount of whitespace as the prefix to a node.""" + self.attr(node, 'prefix', [lambda: self.ws(comment=True)], default=default) + + def suffix(self, node, max_lines=None, semicolon=False, comment=False, + default=''): + """Account for some amount of whitespace as the suffix to a node.""" + def _ws(): + return self.ws(max_lines=max_lines, semicolon=semicolon, comment=comment) + self.attr(node, 'suffix', [_ws], default=default) + + def indented(self, node, children_attr): + children = getattr(node, children_attr) + prev_indent = self._indent + prev_indent_diff = self._indent_diff + new_diff = fmt.get(children[0], 'indent_diff') + if new_diff is None: + new_diff = self._default_indent_diff + self._indent_diff = new_diff + self._indent = prev_indent + self._indent_diff + for child in children: + yield child + self.attr(node, 'block_suffix_%s' % children_attr, []) + self._indent = prev_indent + self._indent_diff = prev_indent_diff + + def set_default_indent_diff(self, indent): + self._default_indent_diff = indent + + @contextlib.contextmanager + def scope(self, node, attr=None, trailing_comma=False, default_parens=False): + """Context manager to handle a parenthesized scope. + + Arguments: + node: (ast.AST) Node to store the scope prefix and suffix on. + attr: (string, optional) Attribute of the node contained in the scope, if + any. For example, as `None`, the scope would wrap the entire node, but + as 'bases', the scope might wrap only the bases of a class. + trailing_comma: (boolean) If True, allow a trailing comma at the end. + default_parens: (boolean) If True and no formatting information is + present, the scope would be assumed to be parenthesized. + """ + if attr: + self.attr(node, attr + '_prefix', [], + default='(' if default_parens else '') + yield + if attr: + self.attr(node, attr + '_suffix', [], + default=')' if default_parens else '') + + def token(self, token_val): + """Account for a specific token.""" + + def attr(self, node, attr_name, attr_vals, deps=None, default=None): + """Handles an attribute on the given node.""" + + def ws(self, max_lines=None, semicolon=False, comment=True): + """Account for some amount of whitespace. + + Arguments: + max_lines: (int) Maximum number of newlines to consider. + semicolon: (boolean) If True, parse up to the next semicolon (if present). + comment: (boolean) If True, look for a trailing comment even when not in + a parenthesized scope. + """ + return '' + + def dots(self, num_dots): + """Account for a number of dots.""" + return '.' * num_dots + + def ws_oneline(self): + """Account for up to one line of whitespace.""" + return self.ws(max_lines=1) + + def optional_token(self, node, attr_name, token_val, default=False): + """Account for a suffix that may or may not occur.""" + + def one_of_symbols(self, *symbols): + """Account for one of the given symbols.""" + return symbols[0] + + # ============================================================================ + # == BLOCK STATEMENTS: Statements that contain a list of statements == + # ============================================================================ + + # Keeps the entire suffix, so @block_statement is not useful here. + @module + def visit_Module(self, node): + self.generic_visit(node) + + @block_statement + def visit_If(self, node): + tok = 'elif' if fmt.get(node, 'is_elif') else 'if' + self.attr(node, 'open_if', [tok, self.ws], default=tok + ' ') + self.visit(node.test) + self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline], + default=':\n') + + for stmt in self.indented(node, 'body'): + self.visit(stmt) + + if node.orelse: + if (len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If) and + self.check_is_elif(node.orelse[0])): + fmt.set(node.orelse[0], 'is_elif', True) + self.visit(node.orelse[0]) + else: + self.attr(node, 'elseprefix', [self.ws]) + self.token('else') + self.attr(node, 'open_else', [self.ws, ':', self.ws_oneline], + default=':\n') + for stmt in self.indented(node, 'orelse'): + self.visit(stmt) + + @abc.abstractmethod + def check_is_elif(self, node): + """Return True if the node continues a previous `if` statement as `elif`. + + In python 2.x, `elif` statments get parsed as If nodes. E.g, the following + two syntax forms are indistinguishable in the ast in python 2. + + if a: + do_something() + elif b: + do_something_else() + + if a: + do_something() + else: + if b: + do_something_else() + + This method should return True for the 'if b' node if it has the first form. + """ + + @block_statement + def visit_While(self, node): + self.attr(node, 'while_keyword', ['while', self.ws], default='while ') + self.visit(node.test) + self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline], + default=':\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + + if node.orelse: + self.attr(node, 'else', [self.ws, 'else', self.ws, ':', self.ws_oneline], + default=self._indent + 'else:\n') + for stmt in self.indented(node, 'orelse'): + self.visit(stmt) + + @block_statement + def visit_For(self, node): + if hasattr(ast, 'AsyncFor') and isinstance(node, ast.AsyncFor): + self.attr(node, 'for_keyword', ['async', self.ws, 'for', self.ws], + default='async for ') + else: + self.attr(node, 'for_keyword', ['for', self.ws], default='for ') + self.visit(node.target) + self.attr(node, 'for_in', [self.ws, 'in', self.ws], default=' in ') + self.visit(node.iter) + self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline], + default=':\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + + if node.orelse: + self.attr(node, 'else', [self.ws, 'else', self.ws, ':', self.ws_oneline], + default=self._indent + 'else:\n') + + for stmt in self.indented(node, 'orelse'): + self.visit(stmt) + + def visit_AsyncFor(self, node): + return self.visit_For(node) + + @block_statement + def visit_With(self, node): + if hasattr(node, 'items'): + return self.visit_With_3(node) + if not getattr(node, 'is_continued', False): + self.attr(node, 'with', ['with', self.ws], default='with ') + self.visit(node.context_expr) + if node.optional_vars: + self.attr(node, 'with_as', [self.ws, 'as', self.ws], default=' as ') + self.visit(node.optional_vars) + + if len(node.body) == 1 and self.check_is_continued_with(node.body[0]): + node.body[0].is_continued = True + self.attr(node, 'with_comma', [self.ws, ',', self.ws], default=', ') + else: + self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline], + default=':\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + + def visit_AsyncWith(self, node): + return self.visit_With(node) + + @abc.abstractmethod + def check_is_continued_try(self, node): + pass + + @abc.abstractmethod + def check_is_continued_with(self, node): + """Return True if the node continues a previous `with` statement. + + In python 2.x, `with` statments with many context expressions get parsed as + a tree of With nodes. E.g, the following two syntax forms are + indistinguishable in the ast in python 2. + + with a, b, c: + do_something() + + with a: + with b: + with c: + do_something() + + This method should return True for the `with b` and `with c` nodes. + """ + + def visit_With_3(self, node): + if hasattr(ast, 'AsyncWith') and isinstance(node, ast.AsyncWith): + self.attr(node, 'with', ['async', self.ws, 'with', self.ws], + default='async with ') + else: + self.attr(node, 'with', ['with', self.ws], default='with ') + + for i, withitem in enumerate(node.items): + self.visit(withitem) + if i != len(node.items) - 1: + self.token(',') + + self.attr(node, 'with_body_open', [':', self.ws_oneline], default=':\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + + @space_around + def visit_withitem(self, node): + self.visit(node.context_expr) + if node.optional_vars: + self.attr(node, 'as', [self.ws, 'as', self.ws], default=' as ') + self.visit(node.optional_vars) + + @block_statement + def visit_ClassDef(self, node): + for i, decorator in enumerate(node.decorator_list): + self.attr(node, 'decorator_prefix_%d' % i, [self.ws, '@'], default='@') + self.visit(decorator) + self.attr(node, 'decorator_suffix_%d' % i, [self.ws], + default='\n' + self._indent) + self.attr(node, 'class_def', ['class', self.ws, node.name, self.ws], + default='class %s' % node.name, deps=('name',)) + class_args = getattr(node, 'bases', []) + getattr(node, 'keywords', []) + with self.scope(node, 'bases', trailing_comma=bool(class_args), + default_parens=True): + for i, base in enumerate(node.bases): + self.visit(base) + self.attr(node, 'base_suffix_%d' % i, [self.ws]) + if base != class_args[-1]: + self.attr(node, 'base_sep_%d' % i, [',', self.ws], default=', ') + if hasattr(node, 'keywords'): + for i, keyword in enumerate(node.keywords): + self.visit(keyword) + self.attr(node, 'keyword_suffix_%d' % i, [self.ws]) + if keyword != node.keywords[-1]: + self.attr(node, 'keyword_sep_%d' % i, [',', self.ws], default=', ') + self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline], + default=':\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + + @block_statement + def visit_FunctionDef(self, node): + for i, decorator in enumerate(node.decorator_list): + self.attr(node, 'decorator_symbol_%d' % i, [self.ws, '@', self.ws], + default='@') + self.visit(decorator) + self.attr(node, 'decorator_suffix_%d' % i, [self.ws_oneline], + default='\n' + self._indent) + if (hasattr(ast, 'AsyncFunctionDef') and + isinstance(node, ast.AsyncFunctionDef)): + self.attr(node, 'function_def', + [self.ws, 'async', self.ws, 'def', self.ws, node.name, self.ws], + deps=('name',), default='async def %s' % node.name) + else: + self.attr(node, 'function_def', + [self.ws, 'def', self.ws, node.name, self.ws], + deps=('name',), default='def %s' % node.name) + # In Python 3, there can be extra args in kwonlyargs + kwonlyargs = getattr(node.args, 'kwonlyargs', []) + args_count = sum((len(node.args.args + kwonlyargs), + 1 if node.args.vararg else 0, + 1 if node.args.kwarg else 0)) + with self.scope(node, 'args', trailing_comma=args_count > 0, + default_parens=True): + self.visit(node.args) + + if getattr(node, 'returns', None): + self.attr(node, 'returns_prefix', [self.ws, '->', self.ws], + deps=('returns',), default=' -> ') + self.visit(node.returns) + + self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline], + default=':\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + + def visit_AsyncFunctionDef(self, node): + return self.visit_FunctionDef(node) + + @block_statement + def visit_TryFinally(self, node): + # Try with except and finally is a TryFinally with the first statement as a + # TryExcept in Python2 + self.attr(node, 'open_try', ['try', self.ws, ':', self.ws_oneline], + default='try:\n') + # TODO(soupytwist): Find a cleaner solution for differentiating this. + if len(node.body) == 1 and self.check_is_continued_try(node.body[0]): + node.body[0].is_continued = True + self.visit(node.body[0]) + else: + for stmt in self.indented(node, 'body'): + self.visit(stmt) + self.attr(node, 'open_finally', + [self.ws, 'finally', self.ws, ':', self.ws_oneline], + default='finally:\n') + for stmt in self.indented(node, 'finalbody'): + self.visit(stmt) + + @block_statement + def visit_TryExcept(self, node): + if not getattr(node, 'is_continued', False): + self.attr(node, 'open_try', ['try', self.ws, ':', self.ws_oneline], + default='try:\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + for handler in node.handlers: + self.visit(handler) + if node.orelse: + self.attr(node, 'open_else', + [self.ws, 'else', self.ws, ':', self.ws_oneline], + default='else:\n') + for stmt in self.indented(node, 'orelse'): + self.visit(stmt) + + @block_statement + def visit_Try(self, node): + # Python 3 + self.attr(node, 'open_try', [self.ws, 'try', self.ws, ':', self.ws_oneline], + default='try:\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + for handler in node.handlers: + self.visit(handler) + if node.orelse: + self.attr(node, 'open_else', + [self.ws, 'else', self.ws, ':', self.ws_oneline], + default='else:\n') + for stmt in self.indented(node, 'orelse'): + self.visit(stmt) + if node.finalbody: + self.attr(node, 'open_finally', + [self.ws, 'finally', self.ws, ':', self.ws_oneline], + default='finally:\n') + for stmt in self.indented(node, 'finalbody'): + self.visit(stmt) + + @block_statement + def visit_ExceptHandler(self, node): + self.token('except') + if node.type: + self.visit(node.type) + if node.type and node.name: + self.attr(node, 'as', [self.ws, self.one_of_symbols("as", ","), self.ws], + default=' as ') + if node.name: + if isinstance(node.name, ast.AST): + self.visit(node.name) + else: + self.token(node.name) + self.attr(node, 'open_block', [self.ws, ':', self.ws_oneline], + default=':\n') + for stmt in self.indented(node, 'body'): + self.visit(stmt) + + @statement + def visit_Raise(self, node): + if hasattr(node, 'cause'): + return self.visit_Raise_3(node) + + self.token('raise') + if node.type: + self.attr(node, 'type_prefix', [self.ws], default=' ') + self.visit(node.type) + if node.inst: + self.attr(node, 'inst_prefix', [self.ws, ',', self.ws], default=', ') + self.visit(node.inst) + if node.tback: + self.attr(node, 'tback_prefix', [self.ws, ',', self.ws], default=', ') + self.visit(node.tback) + + def visit_Raise_3(self, node): + if node.exc: + self.attr(node, 'open_raise', ['raise', self.ws], default='raise ') + self.visit(node.exc) + if node.cause: + self.attr(node, 'cause_prefix', [self.ws, 'from', self.ws], + default=' from ') + self.visit(node.cause) + else: + self.token('raise') + + # ============================================================================ + # == STATEMENTS: Instructions without a return value == + # ============================================================================ + + @statement + def visit_Assert(self, node): + self.attr(node, 'assert_open', ['assert', self.ws], default='assert ') + self.visit(node.test) + if node.msg: + self.attr(node, 'msg_prefix', [',', self.ws], default=', ') + self.visit(node.msg) + + @statement + def visit_Assign(self, node): + for i, target in enumerate(node.targets): + self.visit(target) + self.attr(node, 'equal_%d' % i, [self.ws, '=', self.ws], default=' = ') + self.visit(node.value) + + @statement + def visit_AugAssign(self, node): + self.visit(node.target) + op_token = '%s=' % ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0] + self.attr(node, 'operator', [self.ws, op_token, self.ws], + default=' %s ' % op_token) + self.visit(node.value) + + @statement + def visit_AnnAssign(self, node): + # TODO: Check default formatting for different values of "simple" + self.visit(node.target) + self.attr(node, 'colon', [self.ws, ':', self.ws], default=': ') + self.visit(node.annotation) + if node.value: + self.attr(node, 'equal', [self.ws, '=', self.ws], default=' = ') + self.visit(node.value) + + @expression + def visit_Await(self, node): + self.attr(node, 'await', ['await', self.ws], default='await ') + self.visit(node.value) + + @statement + def visit_Break(self, node): + self.token('break') + + @statement + def visit_Continue(self, node): + self.token('continue') + + @statement + def visit_Delete(self, node): + self.attr(node, 'del', ['del', self.ws], default='del ') + for i, target in enumerate(node.targets): + self.visit(target) + if target is not node.targets[-1]: + self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ') + + @statement + def visit_Exec(self, node): + # If no formatting info is present, will use parenthesized style + self.attr(node, 'exec', ['exec', self.ws], default='exec') + with self.scope(node, 'body', trailing_comma=False, default_parens=True): + self.visit(node.body) + if node.globals: + self.attr(node, 'in_globals', + [self.ws, self.one_of_symbols('in', ','), self.ws], + default=', ') + self.visit(node.globals) + if node.locals: + self.attr(node, 'in_locals', [self.ws, ',', self.ws], default=', ') + self.visit(node.locals) + + @statement + def visit_Expr(self, node): + self.visit(node.value) + + @statement + def visit_Global(self, node): + self.token('global') + identifiers = [] + for ident in node.names: + if ident != node.names[0]: + identifiers.extend([self.ws, ',']) + identifiers.extend([self.ws, ident]) + self.attr(node, 'names', identifiers) + + @statement + def visit_Import(self, node): + self.token('import') + for i, alias in enumerate(node.names): + self.attr(node, 'alias_prefix_%d' % i, [self.ws], default=' ') + self.visit(alias) + if alias != node.names[-1]: + self.attr(node, 'alias_sep_%d' % i, [self.ws, ','], default=',') + + @statement + def visit_ImportFrom(self, node): + self.token('from') + self.attr(node, 'module_prefix', [self.ws], default=' ') + + module_pattern = [] + if node.level > 0: + module_pattern.extend([self.dots(node.level), self.ws]) + if node.module: + parts = node.module.split('.') + for part in parts[:-1]: + module_pattern += [self.ws, part, self.ws, '.'] + module_pattern += [self.ws, parts[-1]] + + self.attr(node, 'module', module_pattern, + deps=('level', 'module'), + default='.' * node.level + (node.module or '')) + self.attr(node, 'module_suffix', [self.ws], default=' ') + + self.token('import') + with self.scope(node, 'names', trailing_comma=True): + for i, alias in enumerate(node.names): + self.attr(node, 'alias_prefix_%d' % i, [self.ws], default=' ') + self.visit(alias) + if alias is not node.names[-1]: + self.attr(node, 'alias_sep_%d' % i, [self.ws, ','], default=',') + + @expression + def visit_NamedExpr(self, node): + self.visit(target) + self.attr(node, 'equal' % i, [self.ws, ':=', self.ws], default=' := ') + self.visit(node.value) + + @statement + def visit_Nonlocal(self, node): + self.token('nonlocal') + identifiers = [] + for ident in node.names: + if ident != node.names[0]: + identifiers.extend([self.ws, ',']) + identifiers.extend([self.ws, ident]) + self.attr(node, 'names', identifiers) + + @statement + def visit_Pass(self, node): + self.token('pass') + + @statement + def visit_Print(self, node): + self.attr(node, 'print_open', ['print', self.ws], default='print ') + if node.dest: + self.attr(node, 'redirection', ['>>', self.ws], default='>>') + self.visit(node.dest) + if node.values: + self.attr(node, 'values_prefix', [self.ws, ',', self.ws], default=', ') + elif not node.nl: + self.attr(node, 'trailing_comma', [self.ws, ','], default=',') + + for i, value in enumerate(node.values): + self.visit(value) + if value is not node.values[-1]: + self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ') + elif not node.nl: + self.attr(node, 'trailing_comma', [self.ws, ','], default=',') + + @statement + def visit_Return(self, node): + self.token('return') + if node.value: + self.attr(node, 'return_value_prefix', [self.ws], default=' ') + self.visit(node.value) + + @expression + def visit_Yield(self, node): + self.token('yield') + if node.value: + self.attr(node, 'yield_value_prefix', [self.ws], default=' ') + self.visit(node.value) + + @expression + def visit_YieldFrom(self, node): + self.attr(node, 'yield_from', ['yield', self.ws, 'from', self.ws], + default='yield from ') + self.visit(node.value) + + # ============================================================================ + # == EXPRESSIONS: Anything that evaluates and can be in parens == + # ============================================================================ + + @expression + def visit_Attribute(self, node): + self.visit(node.value) + self.attr(node, 'dot', [self.ws, '.', self.ws], default='.') + self.token(node.attr) + + @expression + def visit_BinOp(self, node): + op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0] + self.visit(node.left) + self.attr(node, 'op', [self.ws, op_symbol, self.ws], + default=' %s ' % op_symbol, deps=('op',)) + self.visit(node.right) + + @expression + def visit_BoolOp(self, node): + op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0] + for i, value in enumerate(node.values): + self.visit(value) + if value is not node.values[-1]: + self.attr(node, 'op_%d' % i, [self.ws, op_symbol, self.ws], + default=' %s ' % op_symbol, deps=('op',)) + + @expression + def visit_Call(self, node): + self.visit(node.func) + + with self.scope(node, 'arguments', default_parens=True): + # python <3.5: starargs and kwargs are in separate fields + # python 3.5+: starargs args included as a Starred nodes in the arguments + # and kwargs are included as keywords with no argument name. + if sys.version_info[:2] >= (3, 5): + any_args = self.visit_Call_arguments35(node) + else: + any_args = self.visit_Call_arguments(node) + if any_args: + self.optional_token(node, 'trailing_comma', ',') + + def visit_Call_arguments(self, node): + def arg_location(tup): + arg = tup[1] + if isinstance(arg, ast.keyword): + arg = arg.value + return (getattr(arg, "lineno", 0), getattr(arg, "col_offset", 0)) + + if node.starargs: + sorted_keywords = sorted( + [(None, kw) for kw in node.keywords] + [('*', node.starargs)], + key=arg_location) + else: + sorted_keywords = [(None, kw) for kw in node.keywords] + all_args = [(None, n) for n in node.args] + sorted_keywords + if node.kwargs: + all_args.append(('**', node.kwargs)) + + for i, (prefix, arg) in enumerate(all_args): + if prefix is not None: + self.attr(node, '%s_prefix' % prefix, [self.ws, prefix], default=prefix) + self.visit(arg) + if arg is not all_args[-1][1]: + self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ') + return bool(all_args) + + def visit_Call_arguments35(self, node): + def arg_compare(a1, a2): + """Old-style comparator for sorting args.""" + def is_arg(a): + return not isinstance(a, (ast.keyword, ast.Starred)) + + # No kwarg can come before a regular arg (but Starred can be wherever) + if is_arg(a1) and isinstance(a2, ast.keyword): + return -1 + elif is_arg(a2) and isinstance(a1, ast.keyword): + return 1 + + # If no lineno or col_offset on one of the args, they compare as equal + # (since sorting is stable, this should leave them mostly where they + # were in the initial list). + def get_pos(a): + if isinstance(a, ast.keyword): + a = a.value + return (getattr(a, 'lineno', None), getattr(a, 'col_offset', None)) + + pos1 = get_pos(a1) + pos2 = get_pos(a2) + + if None in pos1 or None in pos2: + return 0 + + # If both have lineno/col_offset set, use that to sort them + return -1 if pos1 < pos2 else 0 if pos1 == pos2 else 1 + + # Note that this always sorts keywords identically to just sorting by + # lineno/col_offset, except in cases where that ordering would have been + # a syntax error (named arg before unnamed arg). + all_args = sorted(node.args + node.keywords, + key=functools.cmp_to_key(arg_compare)) + + for i, arg in enumerate(all_args): + self.visit(arg) + if arg is not all_args[-1]: + self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ') + return bool(all_args) + + def visit_Starred(self, node): + self.attr(node, 'star', ['*', self.ws], default='*') + self.visit(node.value) + + @expression + def visit_Compare(self, node): + self.visit(node.left) + for i, (op, comparator) in enumerate(zip(node.ops, node.comparators)): + self.attr(node, 'op_prefix_%d' % i, [self.ws], default=' ') + self.visit(op) + self.attr(node, 'op_suffix_%d' % i, [self.ws], default=' ') + self.visit(comparator) + + @expression + def visit_Dict(self, node): + self.token('{') + for i, key, value in zip(range(len(node.keys)), node.keys, node.values): + if key is None: + # Handle Python 3.5+ dict unpacking syntax (PEP-448) + self.attr(node, 'starstar_%d' % i, [self.ws, '**'], default='**') + else: + self.visit(key) + self.attr(node, 'key_val_sep_%d' % i, [self.ws, ':', self.ws], + default=': ') + self.visit(value) + if value is not node.values[-1]: + self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ') + self.optional_token(node, 'extracomma', ',', allow_whitespace_prefix=True) + self.attr(node, 'close_prefix', [self.ws, '}'], default='}') + + @expression + def visit_DictComp(self, node): + self.attr(node, 'open_dict', ['{', self.ws], default='{') + self.visit(node.key) + self.attr(node, 'key_val_sep', [self.ws, ':', self.ws], default=': ') + self.visit(node.value) + for comp in node.generators: + self.visit(comp) + self.attr(node, 'close_dict', [self.ws, '}'], default='}') + + @expression + def visit_GeneratorExp(self, node): + self._comp_exp(node) + + @expression + def visit_IfExp(self, node): + self.visit(node.body) + self.attr(node, 'if', [self.ws, 'if', self.ws], default=' if ') + self.visit(node.test) + self.attr(node, 'else', [self.ws, 'else', self.ws], default=' else ') + self.visit(node.orelse) + + @expression + def visit_Lambda(self, node): + self.attr(node, 'lambda_def', ['lambda', self.ws], default='lambda ') + self.visit(node.args) + self.attr(node, 'open_lambda', [self.ws, ':', self.ws], default=': ') + self.visit(node.body) + + @expression + def visit_List(self, node): + self.attr(node, 'list_open', ['[', self.ws], default='[') + + for i, elt in enumerate(node.elts): + self.visit(elt) + if elt is not node.elts[-1]: + self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ') + if node.elts: + self.optional_token(node, 'extracomma', ',', allow_whitespace_prefix=True) + + self.attr(node, 'list_close', [self.ws, ']'], default=']') + + @expression + def visit_ListComp(self, node): + self._comp_exp(node, open_brace='[', close_brace=']') + + def _comp_exp(self, node, open_brace=None, close_brace=None): + if open_brace: + self.attr(node, 'compexp_open', [open_brace, self.ws], default=open_brace) + self.visit(node.elt) + for i, comp in enumerate(node.generators): + self.visit(comp) + if close_brace: + self.attr(node, 'compexp_close', [self.ws, close_brace], + default=close_brace) + + @expression + def visit_Name(self, node): + self.token(node.id) + + @expression + def visit_NameConstant(self, node): + self.token(str(node.value)) + + @expression + def visit_Repr(self, node): + self.attr(node, 'repr_open', ['`', self.ws], default='`') + self.visit(node.value) + self.attr(node, 'repr_close', [self.ws, '`'], default='`') + + @expression + def visit_Set(self, node): + self.attr(node, 'set_open', ['{', self.ws], default='{') + + for i, elt in enumerate(node.elts): + self.visit(elt) + if elt is not node.elts[-1]: + self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], default=', ') + else: + self.optional_token(node, 'extracomma', ',', + allow_whitespace_prefix=True) + + self.attr(node, 'set_close', [self.ws, '}'], default='}') + + @expression + def visit_SetComp(self, node): + self._comp_exp(node, open_brace='{', close_brace='}') + + @expression + def visit_Subscript(self, node): + self.visit(node.value) + self.attr(node, 'slice_open', [self.ws, '[', self.ws], default='[') + self.visit(node.slice) + self.attr(node, 'slice_close', [self.ws, ']'], default=']') + + @expression + def visit_Tuple(self, node): + with self.scope(node, 'elts', default_parens=True): + for i, elt in enumerate(node.elts): + self.visit(elt) + if elt is not node.elts[-1]: + self.attr(node, 'comma_%d' % i, [self.ws, ',', self.ws], + default=', ') + else: + self.optional_token(node, 'extracomma', ',', + allow_whitespace_prefix=True, + default=len(node.elts) == 1) + + @expression + def visit_UnaryOp(self, node): + op_symbol = ast_constants.NODE_TYPE_TO_TOKENS[type(node.op)][0] + self.attr(node, 'op', [op_symbol, self.ws], default=op_symbol, deps=('op',)) + self.visit(node.operand) + + # ============================================================================ + # == OPERATORS AND TOKENS: Anything that's just whitespace and tokens == + # ============================================================================ + + @space_around + def visit_Ellipsis(self, node): + self.token('...') + + def visit_Add(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Sub(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Mult(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Div(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Mod(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Pow(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_LShift(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_RShift(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_BitAnd(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_BitOr(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_BitXor(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_FloorDiv(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Invert(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Not(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_UAdd(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_USub(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Eq(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_NotEq(self, node): + self.attr(node, 'operator', [self.one_of_symbols('!=', '<>')]) + + def visit_Lt(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_LtE(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Gt(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_GtE(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_Is(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_IsNot(self, node): + self.attr(node, 'content', ['is', self.ws, 'not'], default='is not') + + def visit_In(self, node): + self.token(ast_constants.NODE_TYPE_TO_TOKENS[type(node)][0]) + + def visit_NotIn(self, node): + self.attr(node, 'content', ['not', self.ws, 'in'], default='not in') + + # ============================================================================ + # == MISC NODES: Nodes which are neither statements nor expressions == + # ============================================================================ + + def visit_alias(self, node): + name_pattern = [] + parts = node.name.split('.') + for part in parts[:-1]: + name_pattern += [self.ws, part, self.ws, '.'] + name_pattern += [self.ws, parts[-1]] + self.attr(node, 'name', name_pattern, + deps=('name',), + default=node.name) + if node.asname is not None: + self.attr(node, 'asname', [self.ws, 'as', self.ws], default=' as ') + self.token(node.asname) + + @space_around + def visit_arg(self, node): + self.token(node.arg) + if node.annotation is not None: + self.attr(node, 'annotation_prefix', [self.ws, ':', self.ws], + default=': ') + self.visit(node.annotation) + + @space_around + def visit_arguments(self, node): + # In Python 3, args appearing after *args must be kwargs + kwonlyargs = getattr(node, 'kwonlyargs', []) + kw_defaults = getattr(node, 'kw_defaults', []) + assert len(kwonlyargs) == len(kw_defaults) + + total_args = sum((len(node.args + kwonlyargs), + len(getattr(node, 'posonlyargs', [])), + 1 if node.vararg else 0, + 1 if node.kwarg else 0)) + arg_i = 0 + + pos_args = getattr(node, 'posonlyargs', []) + node.args + positional = pos_args[:-len(node.defaults)] if node.defaults else pos_args + keyword = node.args[-len(node.defaults):] if node.defaults else node.args + + for arg in positional: + self.visit(arg) + arg_i += 1 + if arg_i < total_args: + self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws], + default=', ') + if arg_i == len(getattr(node, 'posonlyargs', [])): + self.attr(node, 'posonly_sep', [self.ws, '/', self.ws, ',', self.ws], + default='/, ') + + for i, (arg, default) in enumerate(zip(keyword, node.defaults)): + self.visit(arg) + self.attr(node, 'default_%d' % i, [self.ws, '=', self.ws], + default='=') + self.visit(default) + arg_i += 1 + if arg_i < total_args: + self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws], + default=', ') + + if node.vararg: + self.attr(node, 'vararg_prefix', [self.ws, '*', self.ws], default='*') + if isinstance(node.vararg, ast.AST): + self.visit(node.vararg) + else: + self.token(node.vararg) + self.attr(node, 'vararg_suffix', [self.ws]) + arg_i += 1 + if arg_i < total_args: + self.token(',') + elif kwonlyargs: + # If no vararg, but we have kwonlyargs, insert a naked *, which will + # definitely not be the last arg. + self.attr(node, 'kwonly_sep', [self.ws, '*', self.ws, ',', self.ws]); + + for i, (arg, default) in enumerate(zip(kwonlyargs, kw_defaults)): + self.visit(arg) + if default is not None: + self.attr(node, 'kw_default_%d' % i, [self.ws, '=', self.ws], + default='=') + self.visit(default) + arg_i += 1 + if arg_i < total_args: + self.attr(node, 'comma_%d' % arg_i, [self.ws, ',', self.ws], + default=', ') + + if node.kwarg: + self.attr(node, 'kwarg_prefix', [self.ws, '**', self.ws], default='**') + if isinstance(node.kwarg, ast.AST): + self.visit(node.kwarg) + else: + self.token(node.kwarg) + self.attr(node, 'kwarg_suffix', [self.ws]) + + @space_around + def visit_comprehension(self, node): + if getattr(node, 'is_async', False): + self.attr(node, 'for', [self.ws, 'async', self.ws, 'for', self.ws], + default=' async for ') + else: + self.attr(node, 'for', [self.ws, 'for', self.ws], default=' for ') + self.visit(node.target) + self.attr(node, 'in', [self.ws, 'in', self.ws], default=' in ') + self.visit(node.iter) + for i, if_expr in enumerate(node.ifs): + self.attr(node, 'if_%d' % i, [self.ws, 'if', self.ws], default=' if ') + self.visit(if_expr) + + @space_around + def visit_keyword(self, node): + if node.arg is None: + self.attr(node, 'stars', ['**', self.ws], default='**') + else: + self.token(node.arg) + self.attr(node, 'eq', [self.ws, '='], default='=') + self.visit(node.value) + + @space_left + def visit_Index(self, node): + self.visit(node.value) + + @space_left + def visit_ExtSlice(self, node): + for i, dim in enumerate(node.dims): + self.visit(dim) + if dim is not node.dims[-1]: + self.attr(node, 'dim_sep_%d' % i, [self.ws, ',', self.ws], default=', ') + self.optional_token(node, 'trailing_comma', ',', default=False) + + @space_left + def visit_Slice(self, node): + if node.lower: + self.visit(node.lower) + self.attr(node, 'lowerspace', [self.ws, ':', self.ws], default=':') + if node.upper: + self.visit(node.upper) + + self.attr(node, 'stepspace1', [self.ws]) + self.optional_token(node, 'step_colon', ':') + self.attr(node, 'stepspace2', [self.ws]) + if node.step and self.check_slice_includes_step(node): + self.optional_token(node, 'step_colon_2', ':', default=True) + node.step.is_explicit_step = True + self.visit(node.step) + + def check_slice_includes_step(self, node): + """Helper function for Slice node to determine whether to visit its step.""" + # This is needed because of a bug in the 2.7 parser which treats + # a[::] as Slice(lower=None, upper=None, step=Name(id='None')) + # but also treats a[::None] exactly the same. + if not node.step: + return False + if getattr(node.step, 'is_explicit_step', False): + return True + return not (isinstance(node.step, ast.Name) and node.step.id == 'None') + + @fstring_expression + def visit_FormattedValue(self, node): + self.visit(node.value) + if node.conversion != -1: + self.attr(node, 'conversion', + [self.ws, '!', chr(node.conversion)], deps=('conversion',), + default='!%c' % node.conversion) + if node.format_spec: + self.attr(node, 'format_spec_prefix', [self.ws, ':', self.ws], + default=':') + self.visit(node.format_spec) + + +class AnnotationError(Exception): + """An exception for when we failed to annotate the tree.""" + + +class AstAnnotator(BaseVisitor): + + def __init__(self, source): + super(AstAnnotator, self).__init__() + self.tokens = token_generator.TokenGenerator(source) + + def visit(self, node): + try: + fmt.set(node, 'indent', self._indent) + fmt.set(node, 'indent_diff', self._indent_diff) + super(AstAnnotator, self).visit(node) + except (TypeError, ValueError, IndexError, KeyError) as e: + raise AnnotationError(e) + + def indented(self, node, children_attr): + """Generator which annotates child nodes with their indentation level.""" + children = getattr(node, children_attr) + cur_loc = self.tokens._loc + next_loc = self.tokens.peek_non_whitespace().start + # Special case: if the children are on the same line, then there is no + # indentation level to track. + if cur_loc[0] == next_loc[0]: + indent_diff = self._indent_diff + self._indent_diff = None + for child in children: + yield child + self._indent_diff = indent_diff + return + + prev_indent = self._indent + prev_indent_diff = self._indent_diff + + # Find the indent level of the first child + indent_token = self.tokens.peek_conditional( + lambda t: t.type == token_generator.TOKENS.INDENT) + new_indent = indent_token.src + new_diff = _get_indent_diff(prev_indent, new_indent) + if not new_diff: + new_diff = ' ' * 4 # Sensible default + print('Indent detection failed (line %d); inner indentation level is not ' + 'more than the outer indentation.' % cur_loc[0], file=sys.stderr) + + # Set the indent level to the child's indent and iterate over the children + self._indent = new_indent + self._indent_diff = new_diff + for child in children: + yield child + # Store the suffix at this indentation level, which could be many lines + fmt.set(node, 'block_suffix_%s' % children_attr, + self.tokens.block_whitespace(self._indent)) + + # Dedent back to the previous level + self._indent = prev_indent + self._indent_diff = prev_indent_diff + + @expression + def visit_Num(self, node): + """Annotate a Num node with the exact number format.""" + token_number_type = token_generator.TOKENS.NUMBER + contentargs = [lambda: self.tokens.next_of_type(token_number_type).src] + if self.tokens.peek().src == '-': + contentargs.insert(0, '-') + self.attr(node, 'content', contentargs, deps=('n',), default=str(node.n)) + + @expression + def visit_Str(self, node): + """Annotate a Str node with the exact string format.""" + self.attr(node, 'content', [self.tokens.str], deps=('s',), default=node.s) + + @expression + def visit_JoinedStr(self, node): + """Annotate a JoinedStr node with the fstr formatting metadata.""" + fstr_iter = self.tokens.fstr()() + res = '' + values = (v for v in node.values if isinstance(v, ast.FormattedValue)) + while True: + res_part, tg = next(fstr_iter) + res += res_part + if tg is None: + break + prev_tokens = self.tokens + self.tokens = tg + self.visit(next(values)) + self.tokens = prev_tokens + + self.attr(node, 'content', [lambda: res], default=res) + + @expression + def visit_Bytes(self, node): + """Annotate a Bytes node with the exact string format.""" + self.attr(node, 'content', [self.tokens.str], deps=('s',), default=node.s) + + @space_around + def visit_Ellipsis(self, node): + # Ellipsis is sometimes split into 3 tokens and other times a single token + # Account for both forms when parsing the input. + if self.tokens.peek().src == '...': + self.token('...') + else: + for i in range(3): + self.token('.') + + def check_is_elif(self, node): + """Return True iff the If node is an `elif` in the source.""" + next_tok = self.tokens.next_name() + return isinstance(node, ast.If) and next_tok.src == 'elif' + + def check_is_continued_try(self, node): + """Return True iff the TryExcept node is a continued `try` in the source.""" + return (isinstance(node, ast.TryExcept) and + self.tokens.peek_non_whitespace().src != 'try') + + def check_is_continued_with(self, node): + """Return True iff the With node is a continued `with` in the source.""" + return isinstance(node, ast.With) and self.tokens.peek().src == ',' + + def check_slice_includes_step(self, node): + """Helper function for Slice node to determine whether to visit its step.""" + # This is needed because of a bug in the 2.7 parser which treats + # a[::] as Slice(lower=None, upper=None, step=Name(id='None')) + # but also treats a[::None] exactly the same. + return self.tokens.peek_non_whitespace().src not in '],' + + def ws(self, max_lines=None, semicolon=False, comment=True): + """Parse some whitespace from the source tokens and return it.""" + next_token = self.tokens.peek() + if semicolon and next_token and next_token.src == ';': + result = self.tokens.whitespace() + self.token(';') + next_token = self.tokens.peek() + if next_token.type in (token_generator.TOKENS.NL, + token_generator.TOKENS.NEWLINE): + result += self.tokens.whitespace(max_lines=1) + return result + return self.tokens.whitespace(max_lines=max_lines, comment=comment) + + def dots(self, num_dots): + """Parse a number of dots.""" + def _parse_dots(): + return self.tokens.dots(num_dots) + return _parse_dots + + def block_suffix(self, node, indent_level): + fmt.set(node, 'suffix', self.tokens.block_whitespace(indent_level)) + + def token(self, token_val): + """Parse a single token with exactly the given value.""" + token = self.tokens.next() + if token.src != token_val: + raise AnnotationError("Expected %r but found %r\nline %d: %s" % ( + token_val, token.src, token.start[0], token.line)) + + # If the token opens or closes a parentheses scope, keep track of it + if token.src in '({[': + self.tokens.hint_open() + elif token.src in ')}]': + self.tokens.hint_closed() + + return token.src + + def optional_token(self, node, attr_name, token_val, + allow_whitespace_prefix=False, default=False): + """Try to parse a token and attach it to the node.""" + del default + fmt.append(node, attr_name, '') + token = (self.tokens.peek_non_whitespace() + if allow_whitespace_prefix else self.tokens.peek()) + if token and token.src == token_val: + parsed = '' + if allow_whitespace_prefix: + parsed += self.ws() + fmt.append(node, attr_name, + parsed + self.tokens.next().src + self.ws()) + + def one_of_symbols(self, *symbols): + """Account for one of the given symbols.""" + def _one_of_symbols(): + next_token = self.tokens.next() + found = next((s for s in symbols if s == next_token.src), None) + if found is None: + raise AnnotationError( + 'Expected one of: %r, but found: %r' % (symbols, next_token.src)) + return found + return _one_of_symbols + + def attr(self, node, attr_name, attr_vals, deps=None, default=None): + """Parses some source and sets an attribute on the given node. + + Stores some arbitrary formatting information on the node. This takes a list + attr_vals which tell what parts of the source to parse. The result of each + function is concatenated onto the formatting data, and strings in this list + are a shorthand to look for an exactly matching token. + + For example: + self.attr(node, 'foo', ['(', self.ws, 'Hello, world!', self.ws, ')'], + deps=('s',), default=node.s) + + is a rudimentary way to parse a parenthesized string. After running this, + the matching source code for this node will be stored in its formatting + dict under the key 'foo'. The result might be `(\n 'Hello, world!'\n)`. + + This also keeps track of the current value of each of the dependencies. + In the above example, we would have looked for the string 'Hello, world!' + because that's the value of node.s, however, when we print this back, we + want to know if the value of node.s has changed since this time. If any of + the dependent values has changed, the default would be used instead. + + Arguments: + node: (ast.AST) An AST node to attach formatting information to. + attr_name: (string) Name to store the formatting information under. + attr_vals: (list of functions/strings) Each item is either a function + that parses some source and return a string OR a string to match + exactly (as a token). + deps: (optional, set of strings) Attributes of the node which attr_vals + depends on. + default: (string) Unused here. + """ + del default # unused + if deps: + for dep in deps: + fmt.set(node, dep + '__src', getattr(node, dep, None)) + attr_parts = [] + for attr_val in attr_vals: + if isinstance(attr_val, six.string_types): + attr_parts.append(self.token(attr_val)) + else: + attr_parts.append(attr_val()) + fmt.set(node, attr_name, ''.join(attr_parts)) + + def scope(self, node, attr=None, trailing_comma=False, default_parens=False): + """Return a context manager to handle a parenthesized scope. + + Arguments: + node: (ast.AST) Node to store the scope prefix and suffix on. + attr: (string, optional) Attribute of the node contained in the scope, if + any. For example, as `None`, the scope would wrap the entire node, but + as 'bases', the scope might wrap only the bases of a class. + trailing_comma: (boolean) If True, allow a trailing comma at the end. + default_parens: (boolean) If True and no formatting information is + present, the scope would be assumed to be parenthesized. + """ + del default_parens + return self.tokens.scope(node, attr=attr, trailing_comma=trailing_comma) + + def _optional_token(self, token_type, token_val): + token = self.tokens.peek() + if not token or token.type != token_type or token.src != token_val: + return '' + else: + self.tokens.next() + return token.src + self.ws() + + +def _get_indent_width(indent): + width = 0 + for c in indent: + if c == ' ': + width += 1 + elif c == '\t': + width += 8 - (width % 8) + return width + + +def _ltrim_indent(indent, remove_width): + width = 0 + for i, c in enumerate(indent): + if width == remove_width: + break + if c == ' ': + width += 1 + elif c == '\t': + if width + 8 - (width % 8) <= remove_width: + width += 8 - (width % 8) + else: + return ' ' * (width + 8 - remove_width) + indent[i + 1:] + return indent[i:] + + +def _get_indent_diff(outer, inner): + """Computes the whitespace added to an indented block. + + Finds the portion of an indent prefix that is added onto the outer indent. In + most cases, the inner indent starts with the outer indent, but this is not + necessarily true. For example, the outer block could be indented to four + spaces and its body indented with one tab (effectively 8 spaces). + + Arguments: + outer: (string) Indentation of the outer block. + inner: (string) Indentation of the inner block. + Returns: + The string whitespace which is added to the indentation level when moving + from outer to inner. + """ + outer_w = _get_indent_width(outer) + inner_w = _get_indent_width(inner) + diff_w = inner_w - outer_w + + if diff_w <= 0: + return None + + return _ltrim_indent(inner, inner_w - diff_w) diff --git a/lib/python3.10/site-packages/pasta/base/annotate_test.py b/lib/python3.10/site-packages/pasta/base/annotate_test.py new file mode 100644 index 0000000000000000000000000000000000000000..44c969f735926b7ab116df2c64e9c952036a3d19 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/annotate_test.py @@ -0,0 +1,477 @@ +# coding=utf-8 +"""Tests for annotate.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import difflib +import itertools +import os.path +from six import with_metaclass +import sys +import textwrap +import unittest + +import pasta +from pasta.base import annotate +from pasta.base import ast_utils +from pasta.base import codegen +from pasta.base import formatting as fmt +from pasta.base import test_utils + +TESTDATA_DIR = os.path.realpath( + os.path.join(os.path.dirname(pasta.__file__), '../testdata')) + + +class PrefixSuffixTest(test_utils.TestCase): + + def test_block_suffix(self): + src_tpl = textwrap.dedent('''\ + {open_block} + pass #a + #b + #c + + #d + #e + a + ''') + test_cases = ( + # first: attribute of the node with the last block + # second: code snippet to open a block + ('body', 'def x():'), + ('body', 'class X:'), + ('body', 'if x:'), + ('orelse', 'if x:\n y\nelse:'), + ('body', 'if x:\n y\nelif y:'), + ('body', 'while x:'), + ('orelse', 'while x:\n y\nelse:'), + ('finalbody', 'try:\n x\nfinally:'), + ('body', 'try:\n x\nexcept:'), + ('orelse', 'try:\n x\nexcept:\n y\nelse:'), + ('body', 'with x:'), + ('body', 'with x, y:'), + ('body', 'with x:\n with y:'), + ('body', 'for x in y:'), + ) + def is_node_for_suffix(node, children_attr): + # Return True if this node contains the 'pass' statement + val = getattr(node, children_attr, None) + return isinstance(val, list) and type(val[0]) == ast.Pass + + for children_attr, open_block in test_cases: + src = src_tpl.format(open_block=open_block) + t = pasta.parse(src) + node_finder = ast_utils.FindNodeVisitor( + lambda node: is_node_for_suffix(node, children_attr)) + node_finder.visit(t) + node = node_finder.results[0] + expected = ' #b\n #c\n\n #d\n' + actual = str(fmt.get(node, 'block_suffix_%s' % children_attr)) + self.assertMultiLineEqual( + expected, actual, + 'Incorrect suffix for code:\n%s\nNode: %s (line %d)\nDiff:\n%s' % ( + src, node, node.lineno, '\n'.join(_get_diff(actual, expected)))) + self.assertMultiLineEqual(src, pasta.dump(t)) + + def test_module_suffix(self): + src = 'foo\n#bar\n\n#baz\n' + t = pasta.parse(src) + self.assertEqual(src[src.index('#bar'):], fmt.get(t, 'suffix')) + + def test_no_block_suffix_for_single_line_statement(self): + src = 'if x: return y\n #a\n#b\n' + t = pasta.parse(src) + self.assertIsNone(fmt.get(t.body[0], 'block_suffix_body')) + + def test_expression_prefix_suffix(self): + src = 'a\n\nfoo\n\n\nb\n' + t = pasta.parse(src) + self.assertEqual('\n', fmt.get(t.body[1], 'prefix')) + self.assertEqual('\n', fmt.get(t.body[1], 'suffix')) + + def test_statement_prefix_suffix(self): + src = 'a\n\ndef foo():\n return bar\n\n\nb\n' + t = pasta.parse(src) + self.assertEqual('\n', fmt.get(t.body[1], 'prefix')) + self.assertEqual('', fmt.get(t.body[1], 'suffix')) + + +class IndentationTest(test_utils.TestCase): + + def test_indent_levels(self): + src = textwrap.dedent('''\ + foo('begin') + if a: + foo('a1') + if b: + foo('b1') + if c: + foo('c1') + foo('b2') + foo('a2') + foo('end') + ''') + t = pasta.parse(src) + call_nodes = ast_utils.find_nodes_by_type(t, (ast.Call,)) + call_nodes.sort(key=lambda node: node.lineno) + begin, a1, b1, c1, b2, a2, end = call_nodes + + self.assertEqual('', fmt.get(begin, 'indent')) + self.assertEqual(' ', fmt.get(a1, 'indent')) + self.assertEqual(' ', fmt.get(b1, 'indent')) + self.assertEqual(' ', fmt.get(c1, 'indent')) + self.assertEqual(' ', fmt.get(b2, 'indent')) + self.assertEqual(' ', fmt.get(a2, 'indent')) + self.assertEqual('', fmt.get(end, 'indent')) + + def test_indent_levels_same_line(self): + src = 'if a: b; c\n' + t = pasta.parse(src) + if_node = t.body[0] + b, c = if_node.body + self.assertIsNone(fmt.get(b, 'indent_diff')) + self.assertIsNone(fmt.get(c, 'indent_diff')) + + def test_indent_depths(self): + template = 'if a:\n{first}if b:\n{first}{second}foo()\n' + indents = (' ', ' ' * 2, ' ' * 4, ' ' * 8, '\t', '\t' * 2) + + for first, second in itertools.product(indents, indents): + src = template.format(first=first, second=second) + t = pasta.parse(src) + outer_if_node = t.body[0] + inner_if_node = outer_if_node.body[0] + call_node = inner_if_node.body[0] + + self.assertEqual('', fmt.get(outer_if_node, 'indent')) + self.assertEqual('', fmt.get(outer_if_node, 'indent_diff')) + self.assertEqual(first, fmt.get(inner_if_node, 'indent')) + self.assertEqual(first, fmt.get(inner_if_node, 'indent_diff')) + self.assertEqual(first + second, fmt.get(call_node, 'indent')) + self.assertEqual(second, fmt.get(call_node, 'indent_diff')) + + def test_indent_multiline_string(self): + src = textwrap.dedent('''\ + class A: + """Doc + string.""" + pass + ''') + t = pasta.parse(src) + docstring, pass_stmt = t.body[0].body + self.assertEqual(' ', fmt.get(docstring, 'indent')) + self.assertEqual(' ', fmt.get(pass_stmt, 'indent')) + + def test_indent_multiline_string_with_newline(self): + src = textwrap.dedent('''\ + class A: + """Doc\n + string.""" + pass + ''') + t = pasta.parse(src) + docstring, pass_stmt = t.body[0].body + self.assertEqual(' ', fmt.get(docstring, 'indent')) + self.assertEqual(' ', fmt.get(pass_stmt, 'indent')) + + def test_scope_trailing_comma(self): + template = 'def foo(a, b{trailing_comma}): pass' + for trailing_comma in ('', ',', ' , '): + tree = pasta.parse(template.format(trailing_comma=trailing_comma)) + self.assertEqual(trailing_comma.lstrip(' ') + ')', + fmt.get(tree.body[0], 'args_suffix')) + + template = 'class Foo(a, b{trailing_comma}): pass' + for trailing_comma in ('', ',', ' , '): + tree = pasta.parse(template.format(trailing_comma=trailing_comma)) + self.assertEqual(trailing_comma.lstrip(' ') + ')', + fmt.get(tree.body[0], 'bases_suffix')) + + template = 'from mod import (a, b{trailing_comma})' + for trailing_comma in ('', ',', ' , '): + tree = pasta.parse(template.format(trailing_comma=trailing_comma)) + self.assertEqual(trailing_comma + ')', + fmt.get(tree.body[0], 'names_suffix')) + + def test_indent_extra_newlines(self): + src = textwrap.dedent('''\ + if a: + + b + ''') + t = pasta.parse(src) + if_node = t.body[0] + b = if_node.body[0] + self.assertEqual(' ', fmt.get(b, 'indent_diff')) + + def test_indent_extra_newlines_with_comment(self): + src = textwrap.dedent('''\ + if a: + #not here + + b + ''') + t = pasta.parse(src) + if_node = t.body[0] + b = if_node.body[0] + self.assertEqual(' ', fmt.get(b, 'indent_diff')) + + def test_autoindent(self): + src = textwrap.dedent('''\ + def a(): + b + c + ''') + expected = textwrap.dedent('''\ + def a(): + b + new_node + ''') + t = pasta.parse(src) + # Repace the second node and make sure the indent level is corrected + t.body[0].body[1] = ast.Expr(ast.Name(id='new_node')) + self.assertMultiLineEqual(expected, codegen.to_str(t)) + + @test_utils.requires_features('mixed_tabs_spaces') + def test_mixed_tabs_spaces_indentation(self): + pasta.parse(textwrap.dedent('''\ + if a: + b + {ONETAB}c + ''').format(ONETAB='\t')) + + @test_utils.requires_features('mixed_tabs_spaces') + def test_tab_below_spaces(self): + for num_spaces in range(1, 8): + t = pasta.parse(textwrap.dedent('''\ + if a: + {WS}if b: + {ONETAB}c + ''').format(ONETAB='\t', WS=' ' * num_spaces)) + node_c = t.body[0].body[0].body[0] + self.assertEqual(fmt.get(node_c, 'indent_diff'), ' ' * (8 - num_spaces)) + + @test_utils.requires_features('mixed_tabs_spaces') + def test_tabs_below_spaces_and_tab(self): + for num_spaces in range(1, 8): + t = pasta.parse(textwrap.dedent('''\ + if a: + {WS}{ONETAB}if b: + {ONETAB}{ONETAB}c + ''').format(ONETAB='\t', WS=' ' * num_spaces)) + node_c = t.body[0].body[0].body[0] + self.assertEqual(fmt.get(node_c, 'indent_diff'), '\t') + + +def _is_syntax_valid(filepath): + with open(filepath, 'r') as f: + try: + ast.parse(f.read()) + except SyntaxError: + return False + return True + + +class SymmetricTestMeta(type): + + def __new__(mcs, name, bases, inst_dict): + # Helper function to generate a test method + def symmetric_test_generator(filepath): + def test(self): + with open(filepath, 'r') as handle: + src = handle.read() + t = ast_utils.parse(src) + annotator = annotate.AstAnnotator(src) + annotator.visit(t) + self.assertMultiLineEqual(codegen.to_str(t), src) + self.assertEqual([], annotator.tokens._parens, 'Unmatched parens') + return test + + # Add a test method for each input file + test_method_prefix = 'test_symmetric_' + data_dir = os.path.join(TESTDATA_DIR, 'ast') + for dirpath, dirs, files in os.walk(data_dir): + for filename in files: + if filename.endswith('.in'): + full_path = os.path.join(dirpath, filename) + inst_dict[test_method_prefix + filename[:-3]] = unittest.skipIf( + not _is_syntax_valid(full_path), + 'Test contains syntax not supported by this version.', + )(symmetric_test_generator(full_path)) + return type.__new__(mcs, name, bases, inst_dict) + + +class SymmetricTest(with_metaclass(SymmetricTestMeta, test_utils.TestCase)): + """Validates the symmetry property. + + After parsing + annotating a module, regenerating the source code for it + should yield the same result. + """ + + +def _get_node_identifier(node): + for attr in ('id', 'name', 'attr', 'arg', 'module'): + if isinstance(getattr(node, attr, None), str): + return getattr(node, attr, '') + return '' + + +class PrefixSuffixGoldenTestMeta(type): + + def __new__(mcs, name, bases, inst_dict): + # Helper function to generate a test method + def golden_test_generator(input_file, golden_file): + def test(self): + with open(input_file, 'r') as handle: + src = handle.read() + t = ast_utils.parse(src) + annotator = annotate.AstAnnotator(src) + annotator.visit(t) + + def escape(s): + return '' if s is None else s.replace('\n', '\\n') + + result = '\n'.join( + "{0:12} {1:20} \tprefix=|{2}|\tsuffix=|{3}|\tindent=|{4}|".format( + str((getattr(n, 'lineno', -1), getattr(n, 'col_offset', -1))), + type(n).__name__ + ' ' + _get_node_identifier(n), + escape(fmt.get(n, 'prefix')), + escape(fmt.get(n, 'suffix')), + escape(fmt.get(n, 'indent'))) + for n in ast.walk(t)) + '\n' + + # If specified, write the golden data instead of checking it + if getattr(self, 'generate_goldens', False): + if not os.path.isdir(os.path.dirname(golden_file)): + os.makedirs(os.path.dirname(golden_file)) + with open(golden_file, 'w') as f: + f.write(result) + print('Wrote: ' + golden_file) + return + + try: + with open(golden_file, 'r') as f: + golden = f.read() + except IOError: + self.fail('Missing golden data.') + + self.assertMultiLineEqual(golden, result) + return test + + # Add a test method for each input file + test_method_prefix = 'test_golden_prefix_suffix_' + data_dir = os.path.join(TESTDATA_DIR, 'ast') + python_version = '%d.%d' % sys.version_info[:2] + for dirpath, dirs, files in os.walk(data_dir): + for filename in files: + if filename.endswith('.in'): + full_path = os.path.join(dirpath, filename) + golden_path = os.path.join(dirpath, 'golden', python_version, + filename[:-3] + '.out') + inst_dict[test_method_prefix + filename[:-3]] = unittest.skipIf( + not _is_syntax_valid(full_path), + 'Test contains syntax not supported by this version.', + )(golden_test_generator(full_path, golden_path)) + return type.__new__(mcs, name, bases, inst_dict) + + +class PrefixSuffixGoldenTest(with_metaclass(PrefixSuffixGoldenTestMeta, + test_utils.TestCase)): + """Checks the prefix and suffix on each node in the AST. + + This uses golden files in testdata/ast/golden. To regenerate these files, run + python setup.py test -s pasta.base.annotate_test.generate_goldens + """ + + maxDiff = None + + +class ManualEditsTest(test_utils.TestCase): + """Tests that we can handle ASTs that have been modified. + + Such ASTs may lack position information (lineno/col_offset) on some nodes. + """ + + def test_call_no_pos(self): + """Tests that Call node traversal works without position information.""" + src = 'f(a)' + t = pasta.parse(src) + node = ast_utils.find_nodes_by_type(t, (ast.Call,))[0] + node.keywords.append(ast.keyword(arg='b', value=ast.Num(n=0))) + self.assertEqual('f(a, b=0)', pasta.dump(t)) + + def test_call_illegal_pos(self): + """Tests that Call node traversal works even with illegal positions.""" + src = 'f(a)' + t = pasta.parse(src) + node = ast_utils.find_nodes_by_type(t, (ast.Call,))[0] + node.keywords.append(ast.keyword(arg='b', value=ast.Num(n=0))) + + # This position would put b=0 before a, so it should be ignored. + node.keywords[-1].value.lineno = 0 + node.keywords[-1].value.col_offset = 0 + + self.assertEqual('f(a, b=0)', pasta.dump(t)) + + +class FstringTest(test_utils.TestCase): + """Tests fstring support more in-depth.""" + + @test_utils.requires_features('fstring') + def test_fstring(self): + src = 'f"a {b} c d {e}"' + t = pasta.parse(src) + node = t.body[0].value + self.assertEqual( + fmt.get(node, 'content'), + 'f"a {__pasta_fstring_val_0__} c d {__pasta_fstring_val_1__}"') + + @test_utils.requires_features('fstring') + def test_fstring_escaping(self): + src = 'f"a {{{b} {{c}}"' + t = pasta.parse(src) + node = t.body[0].value + self.assertEqual( + fmt.get(node, 'content'), + 'f"a {{{__pasta_fstring_val_0__} {{c}}"') + + +def _get_diff(before, after): + return difflib.ndiff(after.splitlines(), before.splitlines()) + + +def suite(): + result = unittest.TestSuite() + result.addTests(unittest.makeSuite(ManualEditsTest)) + result.addTests(unittest.makeSuite(SymmetricTest)) + result.addTests(unittest.makeSuite(PrefixSuffixTest)) + result.addTests(unittest.makeSuite(PrefixSuffixGoldenTest)) + result.addTests(unittest.makeSuite(FstringTest)) + return result + + +def generate_goldens(): + result = unittest.TestSuite() + result.addTests(unittest.makeSuite(PrefixSuffixGoldenTest)) + setattr(PrefixSuffixGoldenTest, 'generate_goldens', True) + return result + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/pasta/base/ast_constants.py b/lib/python3.10/site-packages/pasta/base/ast_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..c3ffad7100e9d6d107a77ce3fccffd426b9936ef --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/ast_constants.py @@ -0,0 +1,38 @@ +"""Constants relevant to ast code.""" + +import ast + +NODE_TYPE_TO_TOKENS = { + ast.Add: ('+',), + ast.And: ('and',), + ast.BitAnd: ('&',), + ast.BitOr: ('|',), + ast.BitXor: ('^',), + ast.Div: ('/',), + ast.Eq: ('==',), + ast.FloorDiv: ('//',), + ast.Gt: ('>',), + ast.GtE: ('>=',), + ast.In: ('in',), + ast.Invert: ('~',), + ast.Is: ('is',), + ast.IsNot: ('is', 'not',), + ast.LShift: ('<<',), + ast.Lt: ('<',), + ast.LtE: ('<=',), + ast.Mod: ('%',), + ast.Mult: ('*',), + ast.Not: ('not',), + ast.NotEq: ('!=',), + ast.NotIn: ('not', 'in',), + ast.Or: ('or',), + ast.Pow: ('**',), + ast.RShift: ('>>',), + ast.Sub: ('-',), + ast.UAdd: ('+',), + ast.USub: ('-',), +} + + +if hasattr(ast, 'MatMult'): + NODE_TYPE_TO_TOKENS[ast.MatMult] = ('@',) diff --git a/lib/python3.10/site-packages/pasta/base/ast_utils.py b/lib/python3.10/site-packages/pasta/base/ast_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7cdf827745a33f1eaa6c3440572edb056b6f5225 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/ast_utils.py @@ -0,0 +1,179 @@ +# coding=utf-8 +"""Helpers for working with python ASTs.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import re + +from pasta.augment import errors +from pasta.base import formatting as fmt + +# From PEP-0263 -- https://www.python.org/dev/peps/pep-0263/ +_CODING_PATTERN = re.compile('^[ \t\v]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)') + + +_AST_OP_NODES = ( + ast.And, ast.Or, ast.Eq, ast.NotEq, ast.Is, ast.IsNot, ast.In, ast.NotIn, + ast.Lt, ast.LtE, ast.Gt, ast.GtE, ast.Add, ast.Sub, ast.Mult, ast.Div, + ast.Mod, ast.Pow, ast.LShift, ast.RShift, ast.BitAnd, ast.BitOr, ast.BitXor, + ast.FloorDiv, ast.Invert, ast.Not, ast.UAdd, ast.USub +) + + +class _TreeNormalizer(ast.NodeTransformer): + """Replaces all op nodes with unique instances.""" + + def visit(self, node): + if isinstance(node, _AST_OP_NODES): + return node.__class__() + return super(_TreeNormalizer, self).visit(node) + + +_tree_normalizer = _TreeNormalizer() + + +def parse(src): + """Replaces ast.parse; ensures additional properties on the parsed tree. + + This enforces the assumption that each node in the ast is unique. + """ + tree = ast.parse(sanitize_source(src)) + _tree_normalizer.visit(tree) + return tree + + +def sanitize_source(src): + """Strip the 'coding' directive from python source code, if present. + + This is a workaround for https://bugs.python.org/issue18960. Also see PEP-0263. + """ + src_lines = src.splitlines(True) + for i, line in enumerate(src_lines[:2]): + if _CODING_PATTERN.match(line): + src_lines[i] = re.sub('#.*$', '# (removed coding)', line) + return ''.join(src_lines) + + +def find_nodes_by_type(node, accept_types): + visitor = FindNodeVisitor(lambda n: isinstance(n, accept_types)) + visitor.visit(node) + return visitor.results + + +class FindNodeVisitor(ast.NodeVisitor): + + def __init__(self, condition): + self._condition = condition + self.results = [] + + def visit(self, node): + if self._condition(node): + self.results.append(node) + super(FindNodeVisitor, self).visit(node) + + +def get_last_child(node): + """Get the last child node of a block statement. + + The input must be a block statement (e.g. ast.For, ast.With, etc). + + Examples: + 1. with first(): + second() + last() + + 2. try: + first() + except: + second() + finally: + last() + + In both cases, the last child is the node for `last`. + """ + if isinstance(node, ast.Module): + try: + return node.body[-1] + except IndexError: + return None + if isinstance(node, ast.If): + if (len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If) and + fmt.get(node.orelse[0], 'is_elif')): + return get_last_child(node.orelse[0]) + if node.orelse: + return node.orelse[-1] + elif isinstance(node, ast.With): + if (len(node.body) == 1 and isinstance(node.body[0], ast.With) and + fmt.get(node.body[0], 'is_continued')): + return get_last_child(node.body[0]) + elif hasattr(ast, 'Try') and isinstance(node, ast.Try): + if node.finalbody: + return node.finalbody[-1] + if node.orelse: + return node.orelse[-1] + elif hasattr(ast, 'TryFinally') and isinstance(node, ast.TryFinally): + if node.finalbody: + return node.finalbody[-1] + elif hasattr(ast, 'TryExcept') and isinstance(node, ast.TryExcept): + if node.orelse: + return node.orelse[-1] + if node.handlers: + return get_last_child(node.handlers[-1]) + return node.body[-1] + + +def remove_child(parent, child): + for _, field_value in ast.iter_fields(parent): + if isinstance(field_value, list) and child in field_value: + field_value.remove(child) + return + raise errors.InvalidAstError('Unable to find list containing child %r on ' + 'parent node %r' % (child, parent)) + + +def replace_child(parent, node, replace_with): + """Replace a node's child with another node while preserving formatting. + + Arguments: + parent: (ast.AST) Parent node to replace a child of. + node: (ast.AST) Child node to replace. + replace_with: (ast.AST) New child node. + """ + # TODO(soupytwist): Don't refer to the formatting dict directly + if hasattr(node, fmt.PASTA_DICT): + fmt.set(replace_with, 'prefix', fmt.get(node, 'prefix')) + fmt.set(replace_with, 'suffix', fmt.get(node, 'suffix')) + for field in parent._fields: + field_val = getattr(parent, field, None) + if field_val == node: + setattr(parent, field, replace_with) + return + elif isinstance(field_val, list): + try: + field_val[field_val.index(node)] = replace_with + return + except ValueError: + pass + raise errors.InvalidAstError('Node %r is not a child of %r' % (node, parent)) + + +def has_docstring(node): + return (hasattr(node, 'body') and node.body and + isinstance(node.body[0], ast.Expr) and + isinstance(node.body[0].value, ast.Str)) diff --git a/lib/python3.10/site-packages/pasta/base/ast_utils_test.py b/lib/python3.10/site-packages/pasta/base/ast_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d750fb86f3a42d1873f05e97fcc703b09cd4d286 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/ast_utils_test.py @@ -0,0 +1,123 @@ +# coding=utf-8 +"""Tests for ast_utils.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pasta +from pasta.augment import errors +from pasta.base import ast_utils +from pasta.base import test_utils + + +class UtilsTest(test_utils.TestCase): + + def test_sanitize_source(self): + coding_lines = ( + '# -*- coding: latin-1 -*-', + '# -*- coding: iso-8859-15 -*-', + '# vim: set fileencoding=ascii :', + '# This Python file uses the following encoding: utf-8', + ) + src_template = '{coding}\na = 123\n' + sanitized_src = '# (removed coding)\na = 123\n' + for line in coding_lines: + src = src_template.format(coding=line) + + # Replaced on lines 1 and 2 + self.assertEqual(sanitized_src, ast_utils.sanitize_source(src)) + src_prefix = '"""Docstring."""\n' + self.assertEqual(src_prefix + sanitized_src, + ast_utils.sanitize_source(src_prefix + src)) + + # Unchanged on line 3 + src_prefix = '"""Docstring."""\n# line 2\n' + self.assertEqual(src_prefix + src, + ast_utils.sanitize_source(src_prefix + src)) + + +class AlterChildTest(test_utils.TestCase): + + def testRemoveChildMethod(self): + src = """\ +class C(): + def f(x): + return x + 2 + def g(x): + return x + 3 +""" + tree = pasta.parse(src) + class_node = tree.body[0] + meth1_node = class_node.body[0] + + ast_utils.remove_child(class_node, meth1_node) + + result = pasta.dump(tree) + expected = """\ +class C(): + def g(x): + return x + 3 +""" + self.assertEqual(result, expected) + + def testRemoveAlias(self): + src = "from a import b, c" + tree = pasta.parse(src) + import_node = tree.body[0] + alias1 = import_node.names[0] + ast_utils.remove_child(import_node, alias1) + + self.assertEqual(pasta.dump(tree), "from a import c") + + def testRemoveFromBlock(self): + src = """\ +if a: + print("foo!") + x = 1 +""" + tree = pasta.parse(src) + if_block = tree.body[0] + print_stmt = if_block.body[0] + ast_utils.remove_child(if_block, print_stmt) + + expected = """\ +if a: + x = 1 +""" + self.assertEqual(pasta.dump(tree), expected) + + def testReplaceChildInBody(self): + src = 'def foo():\n a = 0\n a += 1 # replace this\n return a\n' + replace_with = pasta.parse('foo(a + 1) # trailing comment\n').body[0] + expected = 'def foo():\n a = 0\n foo(a + 1) # replace this\n return a\n' + t = pasta.parse(src) + + parent = t.body[0] + node_to_replace = parent.body[1] + ast_utils.replace_child(parent, node_to_replace, replace_with) + + self.assertEqual(expected, pasta.dump(t)) + + def testReplaceChildInvalid(self): + src = 'def foo():\n return 1\nx = 1\n' + replace_with = pasta.parse('bar()').body[0] + t = pasta.parse(src) + + parent = t.body[0] + node_to_replace = t.body[1] + with self.assertRaises(errors.InvalidAstError): + ast_utils.replace_child(parent, node_to_replace, replace_with) diff --git a/lib/python3.10/site-packages/pasta/base/codegen.py b/lib/python3.10/site-packages/pasta/base/codegen.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc74aaeb543af105e84e1a0879c1580ed4827e1 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/codegen.py @@ -0,0 +1,160 @@ +# coding=utf-8 +"""Generate code from an annotated syntax tree.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import collections +import six + +from pasta.base import annotate +from pasta.base import formatting as fmt +from pasta.base import fstring_utils + + +class PrintError(Exception): + """An exception for when we failed to print the tree.""" + + +class Printer(annotate.BaseVisitor): + """Traverses an AST and generates formatted python source code. + + This uses the same base visitor as annotating the AST, but instead of eating a + token it spits one out. For special formatting information which was stored on + the node, this is output exactly as it was read in unless one or more of the + dependency attributes used to generate it has changed, in which case its + default formatting is used. + """ + + def __init__(self): + super(Printer, self).__init__() + self.code = '' + + def visit(self, node): + node._printer_info = collections.defaultdict(lambda: False) + try: + super(Printer, self).visit(node) + except (TypeError, ValueError, IndexError, KeyError) as e: + raise PrintError(e) + del node._printer_info + + def visit_Num(self, node): + self.prefix(node) + content = fmt.get(node, 'content') + self.code += content if content is not None else repr(node.n) + self.suffix(node) + + def visit_Str(self, node): + self.prefix(node) + content = fmt.get(node, 'content') + self.code += content if content is not None else repr(node.s) + self.suffix(node) + + def visit_JoinedStr(self, node): + self.prefix(node) + content = fmt.get(node, 'content') + + if content is None: + parts = [] + for val in node.values: + if isinstance(val, ast.Str): + parts.append(val.s) + else: + parts.append(fstring_utils.placeholder(len(parts))) + content = repr(''.join(parts)) + + values = [to_str(v) for v in fstring_utils.get_formatted_values(node)] + self.code += fstring_utils.perform_replacements(content, values) + self.suffix(node) + + def visit_Bytes(self, node): + self.prefix(node) + content = fmt.get(node, 'content') + self.code += content if content is not None else repr(node.s) + self.suffix(node) + + def token(self, value): + self.code += value + + def optional_token(self, node, attr_name, token_val, + allow_whitespace_prefix=False, default=False): + del allow_whitespace_prefix + value = fmt.get(node, attr_name) + if value is None and default: + value = token_val + self.code += value or '' + + def attr(self, node, attr_name, attr_vals, deps=None, default=None): + """Add the formatted data stored for a given attribute on this node. + + If any of the dependent attributes of the node have changed since it was + annotated, then the stored formatted data for this attr_name is no longer + valid, and we must use the default instead. + + Arguments: + node: (ast.AST) An AST node to retrieve formatting information from. + attr_name: (string) Name to load the formatting information from. + attr_vals: (list of functions/strings) Unused here. + deps: (optional, set of strings) Attributes of the node which the stored + formatting data depends on. + default: (string) Default formatted data for this attribute. + """ + del attr_vals + if not hasattr(node, '_printer_info') or node._printer_info[attr_name]: + return + node._printer_info[attr_name] = True + val = fmt.get(node, attr_name) + if (val is None or deps and + any(getattr(node, dep, None) != fmt.get(node, dep + '__src') + for dep in deps)): + val = default + self.code += val if val is not None else '' + + def check_is_elif(self, node): + try: + return fmt.get(node, 'is_elif') + except AttributeError: + return False + + def check_is_continued_try(self, node): + # TODO: Don't set extra attributes on nodes + return getattr(node, 'is_continued', False) + + def check_is_continued_with(self, node): + # TODO: Don't set extra attributes on nodes + return getattr(node, 'is_continued', False) + + +def to_str(tree): + """Convenient function to get the python source for an AST.""" + p = Printer() + + # Detect the most prevalent indentation style in the file and use it when + # printing indented nodes which don't have formatting data. + seen_indent_diffs = collections.defaultdict(lambda: 0) + for node in ast.walk(tree): + indent_diff = fmt.get(node, 'indent_diff', '') + if indent_diff: + seen_indent_diffs[indent_diff] += 1 + if seen_indent_diffs: + indent_diff, _ = max(six.iteritems(seen_indent_diffs), + key=lambda tup: tup[1] if tup[0] else -1) + p.set_default_indent_diff(indent_diff) + + p.visit(tree) + return p.code diff --git a/lib/python3.10/site-packages/pasta/base/codegen_test.py b/lib/python3.10/site-packages/pasta/base/codegen_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8a0f6acedaacf3aa33240b7232b402ce34c72625 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/codegen_test.py @@ -0,0 +1,106 @@ +# coding=utf-8 +"""Tests for generating code from a non-annotated ast.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import os.path +import unittest +from six import with_metaclass + +import pasta +from pasta.base import codegen +from pasta.base import test_utils + +TESTDATA_DIR = os.path.realpath( + os.path.join(os.path.dirname(pasta.__file__), '../testdata')) + + +def _is_syntax_valid(filepath): + with open(filepath, 'r') as f: + try: + ast.parse(f.read()) + except SyntaxError: + return False + return True + + +class AutoFormatTestMeta(type): + + def __new__(mcs, name, bases, inst_dict): + # Helper function to generate a test method + def auto_format_test_generator(input_file): + def test(self): + with open(input_file, 'r') as handle: + src = handle.read() + t = ast.parse(src) + auto_formatted = codegen.to_str(t) + self.assertMultiLineEqual(src, auto_formatted) + return test + + # Add a test method for each input file + test_method_prefix = 'test_auto_format_' + data_dir = os.path.join(TESTDATA_DIR, 'codegen') + for dirpath, _, files in os.walk(data_dir): + for filename in files: + if filename.endswith('.in'): + full_path = os.path.join(dirpath, filename) + inst_dict[test_method_prefix + filename[:-3]] = unittest.skipIf( + not _is_syntax_valid(full_path), + 'Test contains syntax not supported by this version.', + )(auto_format_test_generator(full_path)) + return type.__new__(mcs, name, bases, inst_dict) + + +class AutoFormatTest(with_metaclass(AutoFormatTestMeta, test_utils.TestCase)): + """Tests that code without formatting info is printed neatly.""" + + def test_imports(self): + src = 'from a import b\nimport c, d\nfrom ..e import f, g\n' + t = ast.parse(src) + self.assertEqual(src, pasta.dump(t)) + + @test_utils.requires_features('exec_node') + def test_exec_node_default(self): + src = 'exec foo in bar' + t = ast.parse(src) + self.assertEqual('exec(foo, bar)\n', pasta.dump(t)) + + @test_utils.requires_features('bytes_node') + def test_bytes(self): + src = "b'foo'" + t = ast.parse(src) + self.assertEqual("b'foo'\n", pasta.dump(t)) + + def test_default_indentation(self): + for indent in (' ', ' ', '\t'): + src ='def a():\n' + indent + 'b\n' + t = pasta.parse(src) + t.body.extend(ast.parse('def c(): d').body) + self.assertEqual(codegen.to_str(t), + src + 'def c():\n' + indent + 'd\n') + + +def suite(): + result = unittest.TestSuite() + result.addTests(unittest.makeSuite(AutoFormatTest)) + return result + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/pasta/base/formatting.py b/lib/python3.10/site-packages/pasta/base/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..6f68b411a7d8917444564373b5293e3452801f97 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/formatting.py @@ -0,0 +1,49 @@ +# coding=utf-8 +"""Operations for storing and retrieving formatting info on ast nodes.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +PASTA_DICT = '__pasta__' + + +def get(node, name, default=None): + try: + return _formatting_dict(node).get(name, default) + except AttributeError: + return default + + +def set(node, name, value): + if not hasattr(node, PASTA_DICT): + try: + setattr(node, PASTA_DICT, {}) + except AttributeError: + pass + _formatting_dict(node)[name] = value + + +def append(node, name, value): + set(node, name, get(node, name, '') + value) + + +def prepend(node, name, value): + set(node, name, value + get(node, name, '')) + + +def _formatting_dict(node): + return getattr(node, PASTA_DICT) diff --git a/lib/python3.10/site-packages/pasta/base/fstring_utils.py b/lib/python3.10/site-packages/pasta/base/fstring_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..57306e14efd7ee61d4eb9d2740a2bc0ea632ad30 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/fstring_utils.py @@ -0,0 +1,44 @@ +# coding=utf-8 +"""Helpers for working with fstrings (python3.6+).""" +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast + +_FSTRING_VAL_PLACEHOLDER = '__pasta_fstring_val_{index}__' + + +def get_formatted_values(joined_str): + """Get all FormattedValues from a JoinedStr, in order.""" + return [v for v in joined_str.values if isinstance(v, ast.FormattedValue)] + + +def placeholder(val_index): + """Get the placeholder token for a FormattedValue in an fstring.""" + return _FSTRING_VAL_PLACEHOLDER.format(index=val_index) + + +def perform_replacements(fstr, values): + """Replace placeholders in an fstring with subexpressions.""" + for i, value in enumerate(values): + fstr = fstr.replace(_wrap(placeholder(i)), _wrap(value)) + return fstr + + +def _wrap(s): + return '{%s}' % s diff --git a/lib/python3.10/site-packages/pasta/base/scope.py b/lib/python3.10/site-packages/pasta/base/scope.py new file mode 100644 index 0000000000000000000000000000000000000000..380a0d5e3f0988eeeb50c7ac51f6f26dc9a05740 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/scope.py @@ -0,0 +1,277 @@ +# coding=utf-8 +"""Perform static analysis on python syntax trees.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import collections +import six + +# TODO: Support relative imports + +# Represents a reference to something external to the module. +# Fields: +# name: (string) The full dotted name being referenced. +# node: (ast.AST) The AST node where the reference is defined. +# name_ref: (Name) The name object that refers to the imported name, if +# applicable. This may not be the same id if the import is aliased. +ExternalReference = collections.namedtuple('ExternalReference', + ('name', 'node', 'name_ref')) + + +class ScopeVisitor(ast.NodeVisitor): + + def __init__(self): + super(ScopeVisitor, self).__init__() + self._parent = None + self.root_scope = self.scope = RootScope(None) + + def visit(self, node): + if node is None: + return + if self.root_scope.node is None: + self.root_scope.node = node + self.root_scope.set_parent(node, self._parent) + tmp = self._parent + self._parent = node + super(ScopeVisitor, self).visit(node) + self._parent = tmp + + def visit_in_order(self, node, *attrs): + for attr in attrs: + val = getattr(node, attr, None) + if val is None: + continue + if isinstance(val, list): + for item in val: + self.visit(item) + elif isinstance(val, ast.AST): + self.visit(val) + + def visit_Import(self, node): + for alias in node.names: + name_parts = alias.name.split('.') + + if not alias.asname: + # If not aliased, define the top-level module of the import + cur_name = self.scope.define_name(name_parts[0], alias) + self.root_scope.add_external_reference(name_parts[0], alias, + name_ref=cur_name) + + # Define names of sub-modules imported + partial_name = name_parts[0] + for part in name_parts[1:]: + partial_name += '.' + part + cur_name = cur_name.lookup_name(part) + cur_name.define(alias) + self.root_scope.add_external_reference(partial_name, alias, + name_ref=cur_name) + + else: + # If the imported name is aliased, define that name only + name = self.scope.define_name(alias.asname, alias) + + # Define names of sub-modules imported + for i in range(1, len(name_parts)): + self.root_scope.add_external_reference('.'.join(name_parts[:i]), + alias) + self.root_scope.add_external_reference(alias.name, alias, name_ref=name) + + self.generic_visit(node) + + def visit_ImportFrom(self, node): + if node.module: + name_parts = node.module.split('.') + for i in range(1, len(name_parts) + 1): + self.root_scope.add_external_reference('.'.join(name_parts[:i]), node) + for alias in node.names: + name = self.scope.define_name(alias.asname or alias.name, alias) + if node.module: + self.root_scope.add_external_reference( + '.'.join((node.module, alias.name)), alias, name_ref=name) + # TODO: else? relative imports + self.generic_visit(node) + + def visit_Name(self, node): + if isinstance(node.ctx, (ast.Store, ast.Param)): + self.scope.define_name(node.id, node) + elif isinstance(node.ctx, ast.Load): + self.scope.lookup_name(node.id).add_reference(node) + self.root_scope.set_name_for_node(node, self.scope.lookup_name(node.id)) + self.generic_visit(node) + + def visit_FunctionDef(self, node): + # Visit decorator list first to avoid declarations in args + self.visit_in_order(node, 'decorator_list') + if isinstance(self.root_scope.parent(node), ast.ClassDef): + pass # TODO: Support referencing methods by "self" where possible + else: + self.scope.define_name(node.name, node) + try: + self.scope = self.scope.create_scope(node) + self.visit_in_order(node, 'args', 'returns', 'body') + finally: + self.scope = self.scope.parent_scope + + def visit_arguments(self, node): + self.visit_in_order(node, 'defaults', 'args') + if six.PY2: + # In python 2.x, these names are not Name nodes. Define them explicitly + # to be able to find references in the function body. + for arg_attr_name in ('vararg', 'kwarg'): + arg_name = getattr(node, arg_attr_name, None) + if arg_name is not None: + self.scope.define_name(arg_name, node) + else: + # Visit defaults first to avoid declarations in args + self.visit_in_order(node, 'vararg', 'kwarg') + + def visit_arg(self, node): + self.scope.define_name(node.arg, node) + self.generic_visit(node) + + def visit_ClassDef(self, node): + self.visit_in_order(node, 'decorator_list', 'bases') + self.scope.define_name(node.name, node) + try: + self.scope = self.scope.create_scope(node) + self.visit_in_order(node, 'body') + finally: + self.scope = self.scope.parent_scope + + def visit_Attribute(self, node): + self.generic_visit(node) + node_value_name = self.root_scope.get_name_for_node(node.value) + if node_value_name: + node_name = node_value_name.lookup_name(node.attr) + self.root_scope.set_name_for_node(node, node_name) + node_name.add_reference(node) + + +class Scope(object): + + def __init__(self, parent_scope, node): + self.parent_scope = parent_scope + self.names = {} + self.node = node + + def define_name(self, name, node): + try: + name_obj = self.names[name] + except KeyError: + name_obj = self.names[name] = Name(name) + name_obj.define(node) + return name_obj + + def lookup_name(self, name): + try: + return self.names[name] + except KeyError: + pass + if self.parent_scope is None: + name_obj = self.names[name] = Name(name) + return name_obj + return self.parent_scope.lookup_name(name) + + def get_root_scope(self): + return self.parent_scope.get_root_scope() + + def lookup_scope(self, node): + return self.get_root_scope().lookup_scope(node) + + def create_scope(self, node): + subscope = Scope(self, node) + self.get_root_scope()._set_scope_for_node(node, subscope) + return subscope + + +class RootScope(Scope): + + def __init__(self, node): + super(RootScope, self).__init__(None, node) + self.external_references = {} + self._parents = {} + self._nodes_to_names = {} + self._node_scopes = {} + + def add_external_reference(self, name, node, name_ref=None): + ref = ExternalReference(name=name, node=node, name_ref=name_ref) + if name in self.external_references: + self.external_references[name].append(ref) + else: + self.external_references[name] = [ref] + + def get_root_scope(self): + return self + + def parent(self, node): + return self._parents.get(node, None) + + def set_parent(self, node, parent): + self._parents[node] = parent + if parent is None: + self._node_scopes[node] = self + + def get_name_for_node(self, node): + return self._nodes_to_names.get(node, None) + + def set_name_for_node(self, node, name): + self._nodes_to_names[node] = name + + def lookup_scope(self, node): + while node: + try: + return self._node_scopes[node] + except KeyError: + node = self.parent(node) + return None + + def _set_scope_for_node(self, node, node_scope): + self._node_scopes[node] = node_scope + + +# Should probably also have a scope? +class Name(object): + + def __init__(self, id): + self.id = id + self.definition = None + self.reads = [] + self.attrs = {} + + def add_reference(self, node): + self.reads.append(node) + + def define(self, node): + if self.definition: + self.reads.append(node) + else: + self.definition = node + + def lookup_name(self, name): + try: + return self.attrs[name] + except KeyError: + name_obj = self.attrs[name] = Name('.'.join((self.id, name))) + return name_obj + + +def analyze(tree): + v = ScopeVisitor() + v.visit(tree) + return v.scope diff --git a/lib/python3.10/site-packages/pasta/base/scope_test.py b/lib/python3.10/site-packages/pasta/base/scope_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a753fd5f2e19eba6e25cb3eedee474c3f972196b --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/scope_test.py @@ -0,0 +1,467 @@ +# coding=utf-8 +"""Tests for scope.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import textwrap +import unittest + +from pasta.base import ast_utils +from pasta.base import scope +from pasta.base import test_utils + + +class ScopeTest(test_utils.TestCase): + + def test_top_level_imports(self): + self.maxDiff = None + source = textwrap.dedent("""\ + import aaa + import bbb, ccc.ddd + import aaa.bbb.ccc + from eee import fff + from ggg.hhh import iii, jjj + """) + tree = ast.parse(source) + nodes = tree.body + + node_1_aaa = nodes[0].names[0] + node_2_bbb = nodes[1].names[0] + node_2_ccc_ddd = nodes[1].names[1] + node_3_aaa_bbb_ccc = nodes[2].names[0] + node_4_eee = nodes[3] + node_4_fff = nodes[3].names[0] + node_5_ggg_hhh = nodes[4] + node_5_iii = nodes[4].names[0] + node_5_jjj = nodes[4].names[1] + + s = scope.analyze(tree) + + self.assertItemsEqual( + s.names.keys(), { + 'aaa', 'bbb', 'ccc', 'fff', 'iii', 'jjj' + }) + self.assertItemsEqual( + s.external_references.keys(), { + 'aaa', 'bbb', 'ccc', 'ccc.ddd', 'aaa.bbb', 'aaa.bbb.ccc', 'eee', + 'eee.fff', 'ggg', 'ggg.hhh', 'ggg.hhh.iii', 'ggg.hhh.jjj' + }) + self.assertItemsEqual(s.external_references['aaa'], [ + scope.ExternalReference('aaa', node_1_aaa, s.names['aaa']), + scope.ExternalReference('aaa', node_3_aaa_bbb_ccc, s.names['aaa']), + ]) + self.assertItemsEqual(s.external_references['bbb'], [ + scope.ExternalReference('bbb', node_2_bbb, s.names['bbb']), + ]) + self.assertItemsEqual(s.external_references['ccc'], [ + scope.ExternalReference('ccc', node_2_ccc_ddd, s.names['ccc']), + ]) + self.assertItemsEqual(s.external_references['ccc.ddd'], [ + scope.ExternalReference('ccc.ddd', node_2_ccc_ddd, + s.names['ccc'].attrs['ddd']), + ]) + self.assertItemsEqual(s.external_references['aaa.bbb'], [ + scope.ExternalReference('aaa.bbb', node_3_aaa_bbb_ccc, + s.names['aaa'].attrs['bbb']), + ]) + self.assertItemsEqual(s.external_references['aaa.bbb.ccc'], [ + scope.ExternalReference('aaa.bbb.ccc', node_3_aaa_bbb_ccc, + s.names['aaa'].attrs['bbb'].attrs['ccc']), + ]) + self.assertItemsEqual(s.external_references['eee'], [ + scope.ExternalReference('eee', node_4_eee, None), + ]) + self.assertItemsEqual(s.external_references['eee.fff'], [ + scope.ExternalReference('eee.fff', node_4_fff, s.names['fff']), + ]) + self.assertItemsEqual(s.external_references['ggg'], [ + scope.ExternalReference('ggg', node_5_ggg_hhh, None), + ]) + self.assertItemsEqual(s.external_references['ggg.hhh'], [ + scope.ExternalReference('ggg.hhh', node_5_ggg_hhh, None), + ]) + self.assertItemsEqual(s.external_references['ggg.hhh.iii'], [ + scope.ExternalReference('ggg.hhh.iii', node_5_iii, s.names['iii']), + ]) + self.assertItemsEqual(s.external_references['ggg.hhh.jjj'], [ + scope.ExternalReference('ggg.hhh.jjj', node_5_jjj, s.names['jjj']), + ]) + + self.assertIs(s.names['aaa'].definition, node_1_aaa) + self.assertIs(s.names['bbb'].definition, node_2_bbb) + self.assertIs(s.names['ccc'].definition, node_2_ccc_ddd) + self.assertIs(s.names['fff'].definition, node_4_fff) + self.assertIs(s.names['iii'].definition, node_5_iii) + self.assertIs(s.names['jjj'].definition, node_5_jjj) + + self.assertItemsEqual(s.names['aaa'].reads, [node_3_aaa_bbb_ccc]) + for ref in {'bbb', 'ccc', 'fff', 'iii', 'jjj'}: + self.assertEqual(s.names[ref].reads, [], 'Expected no reads for %s' % ref) + + def test_if_nested_imports(self): + source = textwrap.dedent("""\ + if a: + import aaa + elif b: + import bbb + else: + import ccc + """) + tree = ast.parse(source) + nodes = tree.body + + node_aaa, node_bbb, node_ccc = ast_utils.find_nodes_by_type(tree, ast.alias) + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'bbb', 'ccc', 'a', 'b'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa', 'bbb', 'ccc'}) + + self.assertEqual(s.names['aaa'].definition, node_aaa) + self.assertEqual(s.names['bbb'].definition, node_bbb) + self.assertEqual(s.names['ccc'].definition, node_ccc) + + self.assertIsNone(s.names['a'].definition) + self.assertIsNone(s.names['b'].definition) + + for ref in {'aaa', 'bbb', 'ccc'}: + self.assertEqual(s.names[ref].reads, [], + 'Expected no reads for %s' % ref) + + def test_try_nested_imports(self): + source = textwrap.dedent("""\ + try: + import aaa + except: + import bbb + finally: + import ccc + """) + tree = ast.parse(source) + nodes = tree.body + + node_aaa, node_bbb, node_ccc = ast_utils.find_nodes_by_type(tree, ast.alias) + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'bbb', 'ccc'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa', 'bbb', 'ccc'}) + + self.assertEqual(s.names['aaa'].definition, node_aaa) + self.assertEqual(s.names['bbb'].definition, node_bbb) + self.assertEqual(s.names['ccc'].definition, node_ccc) + + for ref in {'aaa', 'bbb', 'ccc'}: + self.assertEqual(s.names[ref].reads, [], + 'Expected no reads for %s' % ref) + + def test_functiondef_nested_imports(self): + source = textwrap.dedent("""\ + def foo(bar): + import aaa + """) + tree = ast.parse(source) + nodes = tree.body + + node_aaa = ast_utils.find_nodes_by_type(tree, ast.alias)[0] + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + + def test_classdef_nested_imports(self): + source = textwrap.dedent("""\ + class Foo(): + import aaa + """) + tree = ast.parse(source) + nodes = tree.body + + node_aaa = nodes[0].body[0].names[0] + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'Foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + + def test_multilevel_import_reads(self): + source = textwrap.dedent("""\ + import aaa.bbb.ccc + aaa.bbb.ccc.foo() + """) + tree = ast.parse(source) + nodes = tree.body + + node_ref = nodes[1].value.func.value + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa'}) + self.assertItemsEqual(s.external_references.keys(), + {'aaa', 'aaa.bbb', 'aaa.bbb.ccc'}) + self.assertItemsEqual(s.names['aaa'].reads, [node_ref.value.value]) + self.assertItemsEqual(s.names['aaa'].attrs['bbb'].reads, [node_ref.value]) + self.assertItemsEqual(s.names['aaa'].attrs['bbb'].attrs['ccc'].reads, + [node_ref]) + + def test_import_reads_in_functiondef(self): + source = textwrap.dedent("""\ + import aaa + @aaa.x + def foo(bar): + return aaa + """) + tree = ast.parse(source) + nodes = tree.body + + return_value = nodes[1].body[0].value + decorator = nodes[1].decorator_list[0].value + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + self.assertItemsEqual(s.names['aaa'].reads, [decorator, return_value]) + + def test_import_reads_in_classdef(self): + source = textwrap.dedent("""\ + import aaa + @aaa.x + class Foo(aaa.Bar): + pass + """) + tree = ast.parse(source) + nodes = tree.body + + node_aaa = nodes[0].names[0] + decorator = nodes[1].decorator_list[0].value + base = nodes[1].bases[0].value + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'Foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + self.assertItemsEqual(s.names['aaa'].reads, [decorator, base]) + + def test_import_masked_by_function_arg(self): + source = textwrap.dedent("""\ + import aaa + def foo(aaa=aaa): + return aaa + """) + tree = ast.parse(source) + nodes = tree.body + + argval = nodes[1].args.defaults[0] + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + self.assertItemsEqual(s.names['aaa'].reads, [argval]) + + def test_import_masked_by_assign(self): + source = textwrap.dedent("""\ + import aaa + def foo(): + aaa = 123 + return aaa + aaa + """) + tree = ast.parse(source) + nodes = tree.body + + node_aaa = nodes[2].value + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + self.assertItemsEqual(s.names['aaa'].reads, [node_aaa]) + + def test_import_in_decortator(self): + source = textwrap.dedent("""\ + import aaa + @aaa.wrapper + def foo(aaa=1): + pass + """) + tree = ast.parse(source) + nodes = tree.body + + decorator = nodes[1].decorator_list[0].value + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + self.assertItemsEqual(s.names['aaa'].reads, [decorator]) + + @test_utils.requires_features('type_annotations') + def test_import_in_return_type(self): + source = textwrap.dedent("""\ + import aaa + def foo() -> aaa.Foo: + pass + """) + tree = ast.parse(source) + nodes = tree.body + + func = nodes[1] + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + self.assertItemsEqual(s.names['aaa'].reads, [func.returns.value]) + + @test_utils.requires_features('type_annotations') + def test_import_in_argument_type(self): + source = textwrap.dedent("""\ + import aaa + def foo(bar: aaa.Bar): + pass + """) + tree = ast.parse(source) + nodes = tree.body + + func = nodes[1] + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'foo'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + self.assertItemsEqual(s.names['aaa'].reads, + [func.args.args[0].annotation.value]) + + def test_import_attribute_references(self): + source = textwrap.dedent("""\ + import aaa.bbb.ccc, ddd.eee + aaa.x() + aaa.bbb.y() + aaa.bbb.ccc.z() + """) + tree = ast.parse(source) + nodes = tree.body + + call1 = nodes[1].value.func.value + call2 = nodes[2].value.func.value + call3 = nodes[3].value.func.value + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'ddd'}) + self.assertItemsEqual(s.external_references.keys(), + {'aaa', 'aaa.bbb', 'aaa.bbb.ccc', 'ddd', 'ddd.eee'}) + self.assertItemsEqual(s.names['aaa'].reads, + [call1, call2.value, call3.value.value]) + self.assertItemsEqual(s.names['aaa'].attrs['bbb'].reads, + [call2, call3.value]) + self.assertItemsEqual(s.names['aaa'].attrs['bbb'].attrs['ccc'].reads, + [call3]) + + def test_lookup_scope(self): + src = textwrap.dedent("""\ + import a + def b(c, d, e=1): + class F(d): + g = 1 + return c + """) + t = ast.parse(src) + import_node, func_node = t.body + class_node, return_node = func_node.body + + sc = scope.analyze(t) + import_node_scope = sc.lookup_scope(import_node) + self.assertIs(import_node_scope.node, t) + self.assertIs(import_node_scope, sc) + self.assertItemsEqual(import_node_scope.names, ['a', 'b']) + + func_node_scope = sc.lookup_scope(func_node) + self.assertIs(func_node_scope.node, func_node) + self.assertIs(func_node_scope.parent_scope, sc) + self.assertItemsEqual(func_node_scope.names, ['c', 'd', 'e', 'F']) + + class_node_scope = sc.lookup_scope(class_node) + self.assertIs(class_node_scope.node, class_node) + self.assertIs(class_node_scope.parent_scope, func_node_scope) + self.assertItemsEqual(class_node_scope.names, ['g']) + + return_node_scope = sc.lookup_scope(return_node) + self.assertIs(return_node_scope.node, func_node) + self.assertIs(return_node_scope, func_node_scope) + self.assertItemsEqual(return_node_scope.names, ['c', 'd', 'e', 'F']) + + self.assertIs(class_node_scope.lookup_scope(func_node), + func_node_scope) + + self.assertIsNone(sc.lookup_scope(ast.Name(id='foo'))) + + def test_class_methods(self): + source = textwrap.dedent("""\ + import aaa + class C: + def aaa(self): + return aaa + + def bbb(self): + return aaa + """) + tree = ast.parse(source) + importstmt, classdef = tree.body + method_aaa, method_bbb = classdef.body + + s = scope.analyze(tree) + + self.assertItemsEqual(s.names.keys(), {'aaa', 'C'}) + self.assertItemsEqual(s.external_references.keys(), {'aaa'}) + self.assertItemsEqual(s.names['aaa'].reads, + [method_aaa.body[0].value, method_bbb.body[0].value]) + # TODO: Test references to C.aaa, C.bbb once supported + + def test_vararg_kwarg_references_in_function_body(self): + source = textwrap.dedent("""\ + def aaa(bbb, *ccc, **ddd): + ccc + ddd + eee(ccc, ddd) + """) + tree = ast.parse(source) + funcdef, call = tree.body + ccc_expr, ddd_expr = funcdef.body + + sc = scope.analyze(tree) + + func_scope = sc.lookup_scope(funcdef) + self.assertIn('ccc', func_scope.names) + self.assertItemsEqual(func_scope.names['ccc'].reads, [ccc_expr.value]) + self.assertIn('ddd', func_scope.names) + self.assertItemsEqual(func_scope.names['ddd'].reads, [ddd_expr.value]) + + +def suite(): + result = unittest.TestSuite() + result.addTests(unittest.makeSuite(ScopeTest)) + return result + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/pasta/base/test_utils.py b/lib/python3.10/site-packages/pasta/base/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2d459050b845157cd4535fa4d96429bb5dd31301 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/test_utils.py @@ -0,0 +1,89 @@ +# coding=utf-8 +"""Useful stuff for tests.""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import sys +import unittest + +from six.moves import zip + + +class TestCase(unittest.TestCase): + + def checkAstsEqual(self, a, b): + """Compares two ASTs and fails if there are differences. + + Ignores `ctx` fields and formatting info. + """ + if a is None and b is None: + return + try: + self.assertIsNotNone(a) + self.assertIsNotNone(b) + for node_a, node_b in zip(ast.walk(a), ast.walk(b)): + self.assertEqual(type(node_a), type(node_b)) + for field in type(node_a)()._fields: + a_val = getattr(node_a, field, None) + b_val = getattr(node_b, field, None) + + if isinstance(a_val, list): + for item_a, item_b in zip(a_val, b_val): + self.checkAstsEqual(item_a, item_b) + elif isinstance(a_val, ast.AST) or isinstance(b_val, ast.AST): + if (not isinstance(a_val, (ast.Load, ast.Store, ast.Param)) and + not isinstance(b_val, (ast.Load, ast.Store, ast.Param))): + self.assertIsNotNone(a_val) + self.assertIsNotNone(b_val) + self.checkAstsEqual(a_val, b_val) + else: + self.assertEqual(a_val, b_val) + except AssertionError as ae: + self.fail('ASTs differ:\n%s\n !=\n%s\n\n%s' % ( + ast.dump(a), ast.dump(b), ae)) + + +if not hasattr(TestCase, 'assertItemsEqual'): + setattr(TestCase, 'assertItemsEqual', TestCase.assertCountEqual) + + +def requires_features(*features): + return unittest.skipIf( + any(not supports_feature(feature) for feature in features), + 'Tests features which are not supported by this version of python. ' + 'Missing: %r' % [f for f in features if not supports_feature(f)]) + + +def supports_feature(feature): + if feature == 'bytes_node': + return hasattr(ast, 'Bytes') and issubclass(ast.Bytes, ast.AST) + if feature == 'exec_node': + return hasattr(ast, 'Exec') and issubclass(ast.Exec, ast.AST) + if feature == 'type_annotations': + try: + ast.parse('def foo(bar: str=123) -> None: pass') + except SyntaxError: + return False + return True + if feature == 'fstring': + return hasattr(ast, 'JoinedStr') and issubclass(ast.JoinedStr, ast.AST) + # Python 2 counts tabs as 8 spaces for indentation + if feature == 'mixed_tabs_spaces': + return sys.version_info[0] < 3 + return False diff --git a/lib/python3.10/site-packages/pasta/base/test_utils_test.py b/lib/python3.10/site-packages/pasta/base/test_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f6dcc7a9dfdda007ff18dad5421c2ce14b506df1 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/test_utils_test.py @@ -0,0 +1,66 @@ +# coding=utf-8 +"""Tests for google3.third_party.py.pasta.base.test_utils.""" +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import unittest + +from pasta.base import test_utils + + +class CheckAstEqualityTest(test_utils.TestCase): + + def test_empty(self): + src = "" + t = ast.parse(src) + self.checkAstsEqual(t, t) + + def test_one_global(self): + src = "X = 1\n" + t = ast.parse(src) + self.checkAstsEqual(t, t) + + def test_two_globals(self): + src = "X = 1\nY = 2\n" + t = ast.parse(src) + self.checkAstsEqual(t, t) + + def test_different_number_of_nodes(self): + src1 = "X = 1\ndef Foo():\n return None\n" + src2 = src1 + "Y = 2\n" + t1 = ast.parse(src1) + t2 = ast.parse(src2) + with self.assertRaises(AssertionError): + self.checkAstsEqual(t1, t2) + + def test_simple_function_def(self): + code = ("def foo(x):\n" + " return x + 1\n") + t = ast.parse(code) + self.checkAstsEqual(t, t) + + +def suite(): + result = unittest.TestSuite() + result.addTests(unittest.makeSuite(CheckAstEqualityTest)) + return result + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/pasta/base/token_generator.py b/lib/python3.10/site-packages/pasta/base/token_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..23fe7edb5bb4af8dbc00768ef34cef362eedbe91 --- /dev/null +++ b/lib/python3.10/site-packages/pasta/base/token_generator.py @@ -0,0 +1,513 @@ +# coding=utf-8 +"""Token generator for analyzing source code in logical units. + +This module contains the TokenGenerator used for annotating a parsed syntax tree +with source code formatting. +""" +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import collections +import contextlib +import itertools +import tokenize +from six import StringIO + +from pasta.base import formatting as fmt +from pasta.base import fstring_utils + +# Alias for extracting token names +TOKENS = tokenize +Token = collections.namedtuple('Token', ('type', 'src', 'start', 'end', 'line')) +FORMATTING_TOKENS = (TOKENS.INDENT, TOKENS.DEDENT, TOKENS.NL, TOKENS.NEWLINE, + TOKENS.COMMENT) + + +class TokenGenerator(object): + """Helper for sequentially parsing Python source code, token by token. + + Holds internal state during parsing, including: + _tokens: List of tokens in the source code, as parsed by `tokenize` module. + _parens: Stack of open parenthesis at the current point in parsing. + _hints: Number of open parentheses, brackets, etc. at the current point. + _scope_stack: Stack containing tuples of nodes where the last parenthesis that + was open is related to one of the nodes on the top of the stack. + _lines: Full lines of the source code. + _i: Index of the last token that was parsed. Initially -1. + _loc: (lineno, column_offset) pair of the position in the source that has been + parsed to. This should be either the start or end of the token at index _i. + + Arguments: + ignore_error_tokens: If True, will ignore error tokens. Otherwise, an error + token will cause an exception. This is useful when the source being parsed + contains invalid syntax, e.g. if it is in an fstring context. + """ + + def __init__(self, source, ignore_error_token=False): + self.lines = source.splitlines(True) + self._tokens = list(_generate_tokens(source, ignore_error_token)) + self._parens = [] + self._hints = 0 + self._scope_stack = [] + self._len = len(self._tokens) + self._i = -1 + self._loc = self.loc_begin() + + def chars_consumed(self): + return len(self._space_between((1, 0), self._tokens[self._i].end)) + + def loc_begin(self): + """Get the start column of the current location parsed to.""" + if self._i < 0: + return (1, 0) + return self._tokens[self._i].start + + def loc_end(self): + """Get the end column of the current location parsed to.""" + if self._i < 0: + return (1, 0) + return self._tokens[self._i].end + + def peek(self): + """Get the next token without advancing.""" + if self._i + 1 >= self._len: + return None + return self._tokens[self._i + 1] + + def peek_non_whitespace(self): + """Get the next non-whitespace token without advancing.""" + return self.peek_conditional(lambda t: t.type not in FORMATTING_TOKENS) + + def peek_conditional(self, condition): + """Get the next token of the given type without advancing.""" + return next((t for t in self._tokens[self._i + 1:] if condition(t)), None) + + def next(self, advance=True): + """Consume the next token and optionally advance the current location.""" + self._i += 1 + if self._i >= self._len: + return None + if advance: + self._loc = self._tokens[self._i].end + return self._tokens[self._i] + + def rewind(self, amount=1): + """Rewind the token iterator.""" + self._i -= amount + + def whitespace(self, max_lines=None, comment=False): + """Parses whitespace from the current _loc to the next non-whitespace. + + Arguments: + max_lines: (optional int) Maximum number of lines to consider as part of + the whitespace. Valid values are None, 0 and 1. + comment: (boolean) If True, look for a trailing comment even when not in + a parenthesized scope. + + Pre-condition: + `_loc' represents the point before which everything has been parsed and + after which nothing has been parsed. + Post-condition: + `_loc' is exactly at the character that was parsed to. + """ + next_token = self.peek() + if not comment and next_token and next_token.type == TOKENS.COMMENT: + return '' + def predicate(token): + return (token.type in (TOKENS.INDENT, TOKENS.DEDENT) or + token.type == TOKENS.COMMENT and (comment or self._hints) or + token.type == TOKENS.ERRORTOKEN and token.src == ' ' or + max_lines is None and token.type in (TOKENS.NL, TOKENS.NEWLINE)) + whitespace = list(self.takewhile(predicate, advance=False)) + next_token = self.peek() + + result = '' + for tok in itertools.chain(whitespace, + ((next_token,) if next_token else ())): + result += self._space_between(self._loc, tok.start) + if tok != next_token: + result += tok.src + self._loc = tok.end + else: + self._loc = tok.start + + # Eat a single newline character + if ((max_lines is None or max_lines > 0) and + next_token and next_token.type in (TOKENS.NL, TOKENS.NEWLINE)): + result += self.next().src + + return result + + def block_whitespace(self, indent_level): + """Parses whitespace from the current _loc to the end of the block.""" + # Get the normal suffix lines, but don't advance the token index unless + # there is no indentation to account for + start_i = self._i + full_whitespace = self.whitespace(comment=True) + if not indent_level: + return full_whitespace + self._i = start_i + + # Trim the full whitespace into only lines that match the indentation level + lines = full_whitespace.splitlines(True) + try: + last_line_idx = next(i for i, line in reversed(list(enumerate(lines))) + if line.startswith(indent_level + '#')) + except StopIteration: + # No comment lines at the end of this block + self._loc = self._tokens[self._i].end + return '' + lines = lines[:last_line_idx + 1] + + # Advance the current location to the last token in the lines we've read + end_line = self._tokens[self._i].end[0] + 1 + len(lines) + list(self.takewhile(lambda tok: tok.start[0] < end_line)) + self._loc = self._tokens[self._i].end + return ''.join(lines) + + def dots(self, num_dots): + """Parse a number of dots. + + This is to work around an oddity in python3's tokenizer, which treats three + `.` tokens next to each other in a FromImport's level as an ellipsis. This + parses until the expected number of dots have been seen. + """ + result = '' + dots_seen = 0 + prev_loc = self._loc + while dots_seen < num_dots: + tok = self.next() + assert tok.src in ('.', '...') + result += self._space_between(prev_loc, tok.start) + tok.src + dots_seen += tok.src.count('.') + prev_loc = self._loc + return result + + def open_scope(self, node, single_paren=False): + """Open a parenthesized scope on the given node.""" + result = '' + parens = [] + start_i = self._i + start_loc = prev_loc = self._loc + + # Eat whitespace or '(' tokens one at a time + for tok in self.takewhile( + lambda t: t.type in FORMATTING_TOKENS or t.src == '('): + # Stores all the code up to and including this token + result += self._space_between(prev_loc, tok.start) + + if tok.src == '(' and single_paren and parens: + self.rewind() + self._loc = tok.start + break + + result += tok.src + if tok.src == '(': + # Start a new scope + parens.append(result) + result = '' + start_i = self._i + start_loc = self._loc + prev_loc = self._loc + + if parens: + # Add any additional whitespace on to the last open-paren + next_tok = self.peek() + parens[-1] += result + self._space_between(self._loc, next_tok.start) + self._loc = next_tok.start + # Add each paren onto the stack + for paren in parens: + self._parens.append(paren) + self._scope_stack.append(_scope_helper(node)) + else: + # No parens were encountered, then reset like this method did nothing + self._i = start_i + self._loc = start_loc + + def close_scope(self, node, prefix_attr='prefix', suffix_attr='suffix', + trailing_comma=False, single_paren=False): + """Close a parenthesized scope on the given node, if one is open.""" + # Ensures the prefix + suffix are not None + if fmt.get(node, prefix_attr) is None: + fmt.set(node, prefix_attr, '') + if fmt.get(node, suffix_attr) is None: + fmt.set(node, suffix_attr, '') + + if not self._parens or node not in self._scope_stack[-1]: + return + symbols = {')'} + if trailing_comma: + symbols.add(',') + parsed_to_i = self._i + parsed_to_loc = prev_loc = self._loc + encountered_paren = False + result = '' + + for tok in self.takewhile( + lambda t: t.type in FORMATTING_TOKENS or t.src in symbols): + # Consume all space up to this token + result += self._space_between(prev_loc, tok.start) + if tok.src == ')' and single_paren and encountered_paren: + self.rewind() + parsed_to_i = self._i + parsed_to_loc = tok.start + fmt.append(node, suffix_attr, result) + break + + # Consume the token itself + result += tok.src + + if tok.src == ')': + # Close out the open scope + encountered_paren = True + self._scope_stack.pop() + fmt.prepend(node, prefix_attr, self._parens.pop()) + fmt.append(node, suffix_attr, result) + result = '' + parsed_to_i = self._i + parsed_to_loc = tok.end + if not self._parens or node not in self._scope_stack[-1]: + break + prev_loc = tok.end + + # Reset back to the last place where we parsed anything + self._i = parsed_to_i + self._loc = parsed_to_loc + + def hint_open(self): + """Indicates opening a group of parentheses or brackets.""" + self._hints += 1 + + def hint_closed(self): + """Indicates closing a group of parentheses or brackets.""" + self._hints -= 1 + if self._hints < 0: + raise ValueError('Hint value negative') + + @contextlib.contextmanager + def scope(self, node, attr=None, trailing_comma=False): + """Context manager to handle a parenthesized scope.""" + self.open_scope(node, single_paren=(attr is not None)) + yield + if attr: + self.close_scope(node, prefix_attr=attr + '_prefix', + suffix_attr=attr + '_suffix', + trailing_comma=trailing_comma, + single_paren=True) + else: + self.close_scope(node, trailing_comma=trailing_comma) + + def is_in_scope(self): + """Return True iff there is a scope open.""" + return self._parens or self._hints + + def str(self): + """Parse a full string literal from the input.""" + def predicate(token): + return (token.type in (TOKENS.STRING, TOKENS.COMMENT) or + self.is_in_scope() and token.type in (TOKENS.NL, TOKENS.NEWLINE)) + + return self.eat_tokens(predicate) + + def eat_tokens(self, predicate): + """Parse input from tokens while a given condition is met.""" + content = '' + prev_loc = self._loc + tok = None + for tok in self.takewhile(predicate, advance=False): + content += self._space_between(prev_loc, tok.start) + content += tok.src + prev_loc = tok.end + + if tok: + self._loc = tok.end + return content + + def fstr(self): + """Parses an fstring, including subexpressions. + + Returns: + A generator function which, when repeatedly reads a chunk of the fstring + up until the next subexpression and yields that chunk, plus a new token + generator to use to parse the subexpression. The subexpressions in the + original fstring data are replaced by placeholders to make it possible to + fill them in with new values, if desired. + """ + def fstr_parser(): + # Reads the whole fstring as a string, then parses it char by char + if self.peek_non_whitespace().type == TOKENS.STRING: + # Normal fstrings are one ore more STRING tokens, maybe mixed with + # spaces, e.g.: f"Hello, {name}" + str_content = self.str() + else: + # Format specifiers in fstrings are also JoinedStr nodes, but these are + # arbitrary expressions, e.g. in: f"{value:{width}.{precision}}", the + # format specifier is an fstring: "{width}.{precision}" but these are + # not STRING tokens. + def fstr_eater(tok): + if tok.type == TOKENS.OP and tok.src == '}': + if fstr_eater.level <= 0: + return False + fstr_eater.level -= 1 + if tok.type == TOKENS.OP and tok.src == '{': + fstr_eater.level += 1 + return True + fstr_eater.level = 0 + str_content = self.eat_tokens(fstr_eater) + + indexed_chars = enumerate(str_content) + val_idx = 0 + i = -1 + result = '' + while i < len(str_content) - 1: + i, c = next(indexed_chars) + result += c + + # When an open bracket is encountered, start parsing a subexpression + if c == '{': + # First check if this is part of an escape sequence + # (f"{{" is used to escape a bracket literal) + nexti, nextc = next(indexed_chars) + if nextc == '{': + result += c + continue + indexed_chars = itertools.chain([(nexti, nextc)], indexed_chars) + + # Add a placeholder onto the result + result += fstring_utils.placeholder(val_idx) + '}' + val_idx += 1 + + # Yield a new token generator to parse the subexpression only + tg = TokenGenerator(str_content[i+1:], ignore_error_token=True) + yield (result, tg) + result = '' + + # Skip the number of characters consumed by the subexpression + for tg_i in range(tg.chars_consumed()): + i, c = next(indexed_chars) + + # Eat up to and including the close bracket + i, c = next(indexed_chars) + while c != '}': + i, c = next(indexed_chars) + # Yield the rest of the fstring, when done + yield (result, None) + return fstr_parser + + def _space_between(self, start_loc, end_loc): + """Parse the space between a location and the next token""" + if start_loc > end_loc: + raise ValueError('start_loc > end_loc', start_loc, end_loc) + if start_loc[0] > len(self.lines): + return '' + + prev_row, prev_col = start_loc + end_row, end_col = end_loc + if prev_row == end_row: + return self.lines[prev_row - 1][prev_col:end_col] + + return ''.join(itertools.chain( + (self.lines[prev_row - 1][prev_col:],), + self.lines[prev_row:end_row - 1], + (self.lines[end_row - 1][:end_col],) if end_col > 0 else '', + )) + + def next_name(self): + """Parse the next name token.""" + last_i = self._i + def predicate(token): + return token.type != TOKENS.NAME + + unused_tokens = list(self.takewhile(predicate, advance=False)) + result = self.next(advance=False) + self._i = last_i + return result + + def next_of_type(self, token_type): + """Parse a token of the given type and return it.""" + token = self.next() + if token.type != token_type: + raise ValueError("Expected %r but found %r\nline %d: %s" % ( + tokenize.tok_name[token_type], token.src, token.start[0], + self.lines[token.start[0] - 1])) + return token + + def takewhile(self, condition, advance=True): + """Parse tokens as long as a condition holds on the next token.""" + prev_loc = self._loc + token = self.next(advance=advance) + while token is not None and condition(token): + yield token + prev_loc = self._loc + token = self.next(advance=advance) + self.rewind() + self._loc = prev_loc + + +def _scope_helper(node): + """Get the closure of nodes that could begin a scope at this point. + + For instance, when encountering a `(` when parsing a BinOp node, this could + indicate that the BinOp itself is parenthesized OR that the BinOp's left node + could be parenthesized. + + E.g.: (a + b * c) or (a + b) * c or (a) + b * c + ^ ^ ^ + + Arguments: + node: (ast.AST) Node encountered when opening a scope. + + Returns: + A closure of nodes which that scope might apply to. + """ + if isinstance(node, ast.Attribute): + return (node,) + _scope_helper(node.value) + if isinstance(node, ast.Subscript): + return (node,) + _scope_helper(node.value) + if isinstance(node, ast.Assign): + return (node,) + _scope_helper(node.targets[0]) + if isinstance(node, ast.AugAssign): + return (node,) + _scope_helper(node.target) + if isinstance(node, ast.Expr): + return (node,) + _scope_helper(node.value) + if isinstance(node, ast.Compare): + return (node,) + _scope_helper(node.left) + if isinstance(node, ast.BoolOp): + return (node,) + _scope_helper(node.values[0]) + if isinstance(node, ast.BinOp): + return (node,) + _scope_helper(node.left) + if isinstance(node, ast.Tuple) and node.elts: + return (node,) + _scope_helper(node.elts[0]) + if isinstance(node, ast.Call): + return (node,) + _scope_helper(node.func) + if isinstance(node, ast.GeneratorExp): + return (node,) + _scope_helper(node.elt) + if isinstance(node, ast.IfExp): + return (node,) + _scope_helper(node.body) + return (node,) + + +def _generate_tokens(source, ignore_error_token=False): + token_generator = tokenize.generate_tokens(StringIO(source).readline) + try: + for tok in token_generator: + yield Token(*tok) + except tokenize.TokenError: + if not ignore_error_token: + raise diff --git a/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/INSTALLER b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/LICENSE b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..85f4dd63d2da8e31d7e84d5180f016fdfe315c2c --- /dev/null +++ b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/METADATA b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..2d697b0d7219c58fa370de4c2eeca04e0afed575 --- /dev/null +++ b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/METADATA @@ -0,0 +1,155 @@ +Metadata-Version: 2.1 +Name: pluggy +Version: 1.5.0 +Summary: plugin and hook calling mechanisms for python +Home-page: https://github.com/pytest-dev/pluggy +Author: Holger Krekel +Author-email: holger@merlinux.eu +License: MIT +Platform: unix +Platform: linux +Platform: osx +Platform: win32 +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Topic :: Software Development :: Testing +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: dev +Requires-Dist: pre-commit ; extra == 'dev' +Requires-Dist: tox ; extra == 'dev' +Provides-Extra: testing +Requires-Dist: pytest ; extra == 'testing' +Requires-Dist: pytest-benchmark ; extra == 'testing' + +==================================================== +pluggy - A minimalist production ready plugin system +==================================================== + +|pypi| |conda-forge| |versions| |github-actions| |gitter| |black| |codecov| + +This is the core framework used by the `pytest`_, `tox`_, and `devpi`_ projects. + +Please `read the docs`_ to learn more! + +A definitive example +==================== +.. code-block:: python + + import pluggy + + hookspec = pluggy.HookspecMarker("myproject") + hookimpl = pluggy.HookimplMarker("myproject") + + + class MySpec: + """A hook specification namespace.""" + + @hookspec + def myhook(self, arg1, arg2): + """My special little hook that you can customize.""" + + + class Plugin_1: + """A hook implementation namespace.""" + + @hookimpl + def myhook(self, arg1, arg2): + print("inside Plugin_1.myhook()") + return arg1 + arg2 + + + class Plugin_2: + """A 2nd hook implementation namespace.""" + + @hookimpl + def myhook(self, arg1, arg2): + print("inside Plugin_2.myhook()") + return arg1 - arg2 + + + # create a manager and add the spec + pm = pluggy.PluginManager("myproject") + pm.add_hookspecs(MySpec) + + # register plugins + pm.register(Plugin_1()) + pm.register(Plugin_2()) + + # call our ``myhook`` hook + results = pm.hook.myhook(arg1=1, arg2=2) + print(results) + + +Running this directly gets us:: + + $ python docs/examples/toy-example.py + inside Plugin_2.myhook() + inside Plugin_1.myhook() + [-1, 3] + + +.. badges + +.. |pypi| image:: https://img.shields.io/pypi/v/pluggy.svg + :target: https://pypi.org/pypi/pluggy + +.. |versions| image:: https://img.shields.io/pypi/pyversions/pluggy.svg + :target: https://pypi.org/pypi/pluggy + +.. |github-actions| image:: https://github.com/pytest-dev/pluggy/workflows/main/badge.svg + :target: https://github.com/pytest-dev/pluggy/actions + +.. |conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pluggy.svg + :target: https://anaconda.org/conda-forge/pytest + +.. |gitter| image:: https://badges.gitter.im/pytest-dev/pluggy.svg + :alt: Join the chat at https://gitter.im/pytest-dev/pluggy + :target: https://gitter.im/pytest-dev/pluggy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + +.. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/ambv/black + +.. |codecov| image:: https://codecov.io/gh/pytest-dev/pluggy/branch/master/graph/badge.svg + :target: https://codecov.io/gh/pytest-dev/pluggy + :alt: Code coverage Status + +.. links +.. _pytest: + http://pytest.org +.. _tox: + https://tox.readthedocs.org +.. _devpi: + http://doc.devpi.net +.. _read the docs: + https://pluggy.readthedocs.io/en/latest/ + + +Support pluggy +-------------- + +`Open Collective`_ is an online funding platform for open and transparent communities. +It provides tools to raise money and share your finances in full transparency. + +It is the platform of choice for individuals and companies that want to make one-time or +monthly donations directly to the project. + +``pluggy`` is part of the ``pytest-dev`` project, see more details in the `pytest collective`_. + +.. _Open Collective: https://opencollective.com +.. _pytest collective: https://opencollective.com/pytest diff --git a/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/RECORD b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..7de96b35e2d0de48975cb48fdf1c8a801abf07e5 --- /dev/null +++ b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/RECORD @@ -0,0 +1,16 @@ +pluggy-1.5.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +pluggy-1.5.0.dist-info/LICENSE,sha256=1rZebCE6XQtXeRHTTW5ZSbn1nXbCOMUHGi8_wWz7JgY,1110 +pluggy-1.5.0.dist-info/METADATA,sha256=6JeHn3o9P9iqwK20MgVHdoqxick1SS3SORb65Iyb-Fw,4812 +pluggy-1.5.0.dist-info/RECORD,, +pluggy-1.5.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pluggy-1.5.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +pluggy-1.5.0.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7 +pluggy/__init__.py,sha256=U8qtIRmmr0SRdbxAF8VJJs01jMUYgKAc9oAjYYCLgz4,980 +pluggy/_callers.py,sha256=8k8i3GVBT_gtccCPFpN8Ww0towWSnSazrl0vbP9UXSY,7316 +pluggy/_hooks.py,sha256=m-3qVLDdn4S9y3pffLOpMQeDI4PDw8hrATK1SC8rQkU,25108 +pluggy/_manager.py,sha256=ylIDFwrUP_mMAGpdRPj9zwxukG7nWJAfY1yylXyXAMo,20265 +pluggy/_result.py,sha256=eEak-7Ie88bRkylsgbLwB6iMogogIMZheq8W3bImmcs,2849 +pluggy/_tracing.py,sha256=kSBr25F_rNklV2QhLD6h1jx6Z1kcKDRbuYvF5jv35pU,2089 +pluggy/_version.py,sha256=OYzqgMEgfFG0au4hzbEdgYI-c7Hxo3wdBtrpEjK1RoY,411 +pluggy/_warnings.py,sha256=td0AvZBpfamriCC3OqsLwxMh-SzAMjfjmc58T5vP3lw,828 +pluggy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/REQUESTED b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/WHEEL b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/top_level.txt b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..11bdb5c1f5fcdd91af5d587c352039cb8476af49 --- /dev/null +++ b/lib/python3.10/site-packages/pluggy-1.5.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pluggy diff --git a/lib/python3.10/site-packages/rust/Cargo.toml b/lib/python3.10/site-packages/rust/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9eb165a96f14fdf44da47f9e7eb5ca5e9c33b84d --- /dev/null +++ b/lib/python3.10/site-packages/rust/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "cryptography-rust" +version.workspace = true +authors.workspace = true +edition.workspace = true +publish.workspace = true +rust-version.workspace = true + +[dependencies] +once_cell = "1" +cfg-if = "1" +pyo3.workspace = true +asn1.workspace = true +cryptography-cffi = { path = "cryptography-cffi" } +cryptography-keepalive = { path = "cryptography-keepalive" } +cryptography-key-parsing = { path = "cryptography-key-parsing" } +cryptography-x509 = { path = "cryptography-x509" } +cryptography-x509-verification = { path = "cryptography-x509-verification" } +cryptography-openssl = { path = "cryptography-openssl" } +pem = { version = "3", default-features = false } +openssl = "0.10.68" +openssl-sys = "0.9.104" +foreign-types-shared = "0.1" +self_cell = "1" + +[features] +extension-module = ["pyo3/extension-module"] +default = ["extension-module"] + +[lib] +name = "cryptography_rust" +crate-type = ["cdylib"] + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(CRYPTOGRAPHY_OPENSSL_300_OR_GREATER)', 'cfg(CRYPTOGRAPHY_OPENSSL_309_OR_GREATER)', 'cfg(CRYPTOGRAPHY_OPENSSL_320_OR_GREATER)', 'cfg(CRYPTOGRAPHY_IS_LIBRESSL)', 'cfg(CRYPTOGRAPHY_IS_BORINGSSL)', 'cfg(CRYPTOGRAPHY_OSSLCONF, values("OPENSSL_NO_IDEA", "OPENSSL_NO_CAST", "OPENSSL_NO_BF", "OPENSSL_NO_CAMELLIA", "OPENSSL_NO_SEED", "OPENSSL_NO_SM4"))'] } diff --git a/lib/python3.10/site-packages/rust/cryptography-cffi/Cargo.toml b/lib/python3.10/site-packages/rust/cryptography-cffi/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9408de8b44152d0c472e60400b3dd8f9e2e71584 --- /dev/null +++ b/lib/python3.10/site-packages/rust/cryptography-cffi/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "cryptography-cffi" +version.workspace = true +authors.workspace = true +edition.workspace = true +publish.workspace = true +rust-version.workspace = true + +[dependencies] +pyo3.workspace = true +openssl-sys = "0.9.104" + +[build-dependencies] +cc = "1.2.1" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(python_implementation, values("CPython", "PyPy"))'] } diff --git a/lib/python3.10/site-packages/rust/cryptography-keepalive/Cargo.toml b/lib/python3.10/site-packages/rust/cryptography-keepalive/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..baf8d9342119eab7e4a26355db530bcd0feaea20 --- /dev/null +++ b/lib/python3.10/site-packages/rust/cryptography-keepalive/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "cryptography-keepalive" +version.workspace = true +authors.workspace = true +edition.workspace = true +publish.workspace = true +rust-version.workspace = true + +[dependencies] +pyo3.workspace = true diff --git a/lib/python3.10/site-packages/rust/cryptography-key-parsing/Cargo.toml b/lib/python3.10/site-packages/rust/cryptography-key-parsing/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9b96b736c405f8c746f7c8e22ae60f15f00bb8f8 --- /dev/null +++ b/lib/python3.10/site-packages/rust/cryptography-key-parsing/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "cryptography-key-parsing" +version.workspace = true +authors.workspace = true +edition.workspace = true +publish.workspace = true +rust-version.workspace = true + +[dependencies] +asn1.workspace = true +cfg-if = "1" +openssl = "0.10.68" +openssl-sys = "0.9.104" +cryptography-x509 = { path = "../cryptography-x509" } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(CRYPTOGRAPHY_IS_LIBRESSL)', 'cfg(CRYPTOGRAPHY_IS_BORINGSSL)'] } diff --git a/lib/python3.10/site-packages/rust/cryptography-openssl/Cargo.toml b/lib/python3.10/site-packages/rust/cryptography-openssl/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..3d4c17ebaafdc35a75d6c58739873b896d07ec65 --- /dev/null +++ b/lib/python3.10/site-packages/rust/cryptography-openssl/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "cryptography-openssl" +version.workspace = true +authors.workspace = true +edition.workspace = true +publish.workspace = true +rust-version.workspace = true + +[dependencies] +cfg-if = "1" +openssl = "0.10.68" +ffi = { package = "openssl-sys", version = "0.9.101" } +foreign-types = "0.3" +foreign-types-shared = "0.1" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(CRYPTOGRAPHY_OPENSSL_300_OR_GREATER)', 'cfg(CRYPTOGRAPHY_OPENSSL_320_OR_GREATER)', 'cfg(CRYPTOGRAPHY_IS_LIBRESSL)', 'cfg(CRYPTOGRAPHY_IS_BORINGSSL)'] } diff --git a/lib/python3.10/site-packages/rust/cryptography-x509-verification/Cargo.toml b/lib/python3.10/site-packages/rust/cryptography-x509-verification/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2cc2ff48829cc0dc9cecf34a7f3af61216891da6 --- /dev/null +++ b/lib/python3.10/site-packages/rust/cryptography-x509-verification/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "cryptography-x509-verification" +version.workspace = true +authors.workspace = true +edition.workspace = true +publish.workspace = true +rust-version.workspace = true + +[dependencies] +asn1.workspace = true +cryptography-x509 = { path = "../cryptography-x509" } +cryptography-key-parsing = { path = "../cryptography-key-parsing" } +once_cell = "1" + +[dev-dependencies] +pem = { version = "3", default-features = false } diff --git a/lib/python3.10/site-packages/rust/cryptography-x509/Cargo.toml b/lib/python3.10/site-packages/rust/cryptography-x509/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..03f2c260890edb66ac0ef5649d67daded646f244 --- /dev/null +++ b/lib/python3.10/site-packages/rust/cryptography-x509/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "cryptography-x509" +version = "0.1.0" +authors = ["The cryptography developers "] +edition = "2021" +publish = false +# This specifies the MSRV +rust-version = "1.65.0" + +[dependencies] +asn1.workspace = true