Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- testbed/google-deepmind__optax/.gitignore +26 -0
- testbed/google-deepmind__optax/CONTRIBUTING.md +34 -0
- testbed/google-deepmind__optax/LICENSE +202 -0
- testbed/google-deepmind__optax/README.md +144 -0
- testbed/google-deepmind__optax/optax/__init__.py +459 -0
- testbed/google-deepmind__optax/optax/losses/_classification.py +681 -0
- testbed/google-deepmind__optax/optax/losses/_classification_test.py +867 -0
- testbed/google-deepmind__optax/optax/losses/_fenchel_young_test.py +63 -0
- testbed/google-deepmind__optax/optax/losses/_regression_test.py +178 -0
- testbed/google-deepmind__optax/optax/monte_carlo/__init__.py +22 -0
- testbed/google-deepmind__optax/optax/monte_carlo/control_variates.py +423 -0
- testbed/google-deepmind__optax/optax/monte_carlo/control_variates_test.py +597 -0
- testbed/google-deepmind__optax/optax/monte_carlo/stochastic_gradient_estimators.py +317 -0
- testbed/google-deepmind__optax/optax/monte_carlo/stochastic_gradient_estimators_test.py +371 -0
- testbed/google-deepmind__optax/optax/optax_test.py +32 -0
- testbed/google-deepmind__optax/optax/projections/__init__.py +21 -0
- testbed/google-deepmind__optax/optax/projections/_projections.py +163 -0
- testbed/google-deepmind__optax/optax/projections/_projections_test.py +172 -0
- testbed/google-deepmind__optax/optax/schedules/_inject_test.py +249 -0
- testbed/google-deepmind__optax/optax/schedules/_join.py +45 -0
- testbed/google-deepmind__optax/optax/schedules/_join_test.py +44 -0
- testbed/google-deepmind__optax/optax/second_order/__init__.py +19 -0
- testbed/google-deepmind__optax/optax/second_order/_base.py +30 -0
- testbed/google-deepmind__optax/optax/second_order/_hessian.py +80 -0
- testbed/google-deepmind__optax/optax/second_order/_hessian_test.py +88 -0
- testbed/google-deepmind__optax/optax/transforms/__init__.py +92 -0
- testbed/google-deepmind__optax/optax/transforms/_accumulation.py +393 -0
- testbed/google-deepmind__optax/optax/transforms/_accumulation_test.py +298 -0
- testbed/google-deepmind__optax/optax/transforms/_adding.py +105 -0
- testbed/google-deepmind__optax/optax/transforms/_adding_test.py +96 -0
- testbed/google-deepmind__optax/optax/transforms/_clipping.py +282 -0
- testbed/google-deepmind__optax/optax/transforms/_clipping_test.py +143 -0
- testbed/google-deepmind__optax/optax/transforms/_combining.py +255 -0
- testbed/google-deepmind__optax/optax/transforms/_combining_test.py +284 -0
- testbed/google-deepmind__optax/optax/transforms/_conditionality.py +253 -0
- testbed/google-deepmind__optax/optax/transforms/_conditionality_test.py +286 -0
- testbed/google-deepmind__optax/optax/transforms/_constraining.py +93 -0
- testbed/google-deepmind__optax/optax/transforms/_constraining_test.py +119 -0
- testbed/google-deepmind__optax/optax/transforms/_layouts.py +77 -0
- testbed/google-deepmind__optax/optax/transforms/_layouts_test.py +59 -0
- testbed/google-deepmind__optax/optax/transforms/_masking.py +136 -0
- testbed/google-deepmind__optax/optax/transforms/_masking_test.py +348 -0
- testbed/google-deepmind__optax/optax/tree_utils/__init__.py +41 -0
- testbed/google-deepmind__optax/optax/tree_utils/_casting.py +31 -0
- testbed/google-deepmind__optax/optax/tree_utils/_casting_test.py +50 -0
- testbed/google-deepmind__optax/optax/tree_utils/_random.py +68 -0
- testbed/google-deepmind__optax/optax/tree_utils/_random_test.py +102 -0
- testbed/google-deepmind__optax/optax/tree_utils/_state_utils.py +786 -0
- testbed/google-deepmind__optax/optax/tree_utils/_state_utils_test.py +599 -0
- testbed/google-deepmind__optax/optax/tree_utils/_tree_math.py +318 -0
testbed/google-deepmind__optax/.gitignore
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Building and releasing library:
|
| 2 |
+
*.egg-info
|
| 3 |
+
*.pyc
|
| 4 |
+
*.so
|
| 5 |
+
build/
|
| 6 |
+
dist/
|
| 7 |
+
venv/
|
| 8 |
+
_testing/
|
| 9 |
+
|
| 10 |
+
# Building the documentation
|
| 11 |
+
docs/_autosummary
|
| 12 |
+
docs/_collections
|
| 13 |
+
docs/modules/generated
|
| 14 |
+
|
| 15 |
+
# Mac OS
|
| 16 |
+
.DS_Store
|
| 17 |
+
|
| 18 |
+
# Python tools
|
| 19 |
+
.mypy_cache/
|
| 20 |
+
.pytype/
|
| 21 |
+
.ipynb_checkpoints
|
| 22 |
+
|
| 23 |
+
# Editors
|
| 24 |
+
.idea
|
| 25 |
+
.vscode
|
| 26 |
+
|
testbed/google-deepmind__optax/CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# How to Contribute
|
| 2 |
+
|
| 3 |
+
We'd love to accept your patches and contributions to this project. There are
|
| 4 |
+
just a few small guidelines you need to follow.
|
| 5 |
+
|
| 6 |
+
## Contributor License Agreement
|
| 7 |
+
|
| 8 |
+
Contributions to this project must be accompanied by a Contributor License
|
| 9 |
+
Agreement. You (or your employer) retain the copyright to your contribution;
|
| 10 |
+
this simply gives us permission to use and redistribute your contributions as
|
| 11 |
+
part of the project. Head over to <https://cla.developers.google.com/> to see
|
| 12 |
+
your current agreements on file or to sign a new one.
|
| 13 |
+
|
| 14 |
+
You generally only need to submit a CLA once, so if you've already submitted one
|
| 15 |
+
(even if it was for a different project), you probably don't need to do it
|
| 16 |
+
again.
|
| 17 |
+
|
| 18 |
+
## Code reviews
|
| 19 |
+
|
| 20 |
+
All submissions, including submissions by project members, require review. We
|
| 21 |
+
use GitHub pull requests for this purpose. Consult
|
| 22 |
+
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
|
| 23 |
+
information on using pull requests.
|
| 24 |
+
|
| 25 |
+
## Testing
|
| 26 |
+
|
| 27 |
+
Please make sure that your PR passes all tests by running `bash test.sh` on your
|
| 28 |
+
local machine. Also, you can run only tests that are affected by your code
|
| 29 |
+
changes, but you will need to select them manually.
|
| 30 |
+
|
| 31 |
+
## Community Guidelines
|
| 32 |
+
|
| 33 |
+
This project follows [Google's Open Source Community
|
| 34 |
+
Guidelines](https://opensource.google.com/conduct/).
|
testbed/google-deepmind__optax/LICENSE
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright [yyyy] [name of copyright owner]
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
testbed/google-deepmind__optax/README.md
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Optax
|
| 2 |
+
|
| 3 |
+

|
| 4 |
+
[](http://optax.readthedocs.io)
|
| 5 |
+

|
| 6 |
+
|
| 7 |
+
## Introduction
|
| 8 |
+
|
| 9 |
+
Optax is a gradient processing and optimization library for JAX.
|
| 10 |
+
|
| 11 |
+
Optax is designed to facilitate research by providing building blocks
|
| 12 |
+
that can be easily recombined in custom ways.
|
| 13 |
+
|
| 14 |
+
Our goals are to
|
| 15 |
+
|
| 16 |
+
* Provide simple, well-tested, efficient implementations of core components.
|
| 17 |
+
* Improve research productivity by enabling to easily combine low-level
|
| 18 |
+
ingredients into custom optimisers (or other gradient processing components).
|
| 19 |
+
* Accelerate adoption of new ideas by making it easy for anyone to contribute.
|
| 20 |
+
|
| 21 |
+
We favour focusing on small composable building blocks that can be effectively
|
| 22 |
+
combined into custom solutions. Others may build upon these basic components
|
| 23 |
+
in more complicated abstractions. Whenever reasonable, implementations prioritise
|
| 24 |
+
readability and structuring code to match standard equations, over code reuse.
|
| 25 |
+
|
| 26 |
+
An initial prototype of this library was made available in JAX's experimental
|
| 27 |
+
folder as `jax.experimental.optix`. Given the wide adoption across DeepMind
|
| 28 |
+
of `optix`, and after a few iterations on the API, `optix` was eventually moved
|
| 29 |
+
out of `experimental` as a standalone open-source library, and renamed `optax`.
|
| 30 |
+
|
| 31 |
+
Documentation on Optax can be found at [optax.readthedocs.io](https://optax.readthedocs.io/).
|
| 32 |
+
|
| 33 |
+
## Installation
|
| 34 |
+
|
| 35 |
+
You can install the latest released version of Optax from PyPI via:
|
| 36 |
+
|
| 37 |
+
```sh
|
| 38 |
+
pip install optax
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
or you can install the latest development version from GitHub:
|
| 42 |
+
|
| 43 |
+
```sh
|
| 44 |
+
pip install git+https://github.com/google-deepmind/optax.git
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
## Quickstart
|
| 48 |
+
|
| 49 |
+
Optax contains implementations of [many popular optimizers](https://optax.readthedocs.io/en/latest/api/optimizers.html) and
|
| 50 |
+
[loss functions](https://optax.readthedocs.io/en/latest/api/losses.html).
|
| 51 |
+
For example, the following code snippet uses the Adam optimizer from `optax.adam`
|
| 52 |
+
and the mean squared error from `optax.l2_loss`. We initialize the optimizer
|
| 53 |
+
state using the `init` function and `params` of the model.
|
| 54 |
+
|
| 55 |
+
```python
|
| 56 |
+
optimizer = optax.adam(learning_rate)
|
| 57 |
+
# Obtain the `opt_state` that contains statistics for the optimizer.
|
| 58 |
+
params = {'w': jnp.ones((num_weights,))}
|
| 59 |
+
opt_state = optimizer.init(params)
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
To write the update loop we need a loss function that can be differentiated by
|
| 63 |
+
Jax (with `jax.grad` in this
|
| 64 |
+
example) to obtain the gradients.
|
| 65 |
+
|
| 66 |
+
```python
|
| 67 |
+
compute_loss = lambda params, x, y: optax.l2_loss(params['w'].dot(x), y)
|
| 68 |
+
grads = jax.grad(compute_loss)(params, xs, ys)
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
The gradients are then converted via `optimizer.update` to obtain the updates
|
| 72 |
+
that should be applied to the current parameters to obtain the new ones.
|
| 73 |
+
`optax.apply_updates` is a convenience utility to do this.
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
updates, opt_state = optimizer.update(grads, opt_state)
|
| 77 |
+
params = optax.apply_updates(params, updates)
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
You can continue the quick start in [the Optax 🚀 Getting started notebook.](https://github.com/google-deepmind/optax/blob/main/docs/getting_started.ipynb)
|
| 81 |
+
|
| 82 |
+
## Development
|
| 83 |
+
|
| 84 |
+
We welcome new contributors.
|
| 85 |
+
|
| 86 |
+
### Source code
|
| 87 |
+
|
| 88 |
+
You can check the latest sources with the following command.
|
| 89 |
+
|
| 90 |
+
```sh
|
| 91 |
+
git clone https://github.com/google-deepmind/optax.git
|
| 92 |
+
```
|
| 93 |
+
### Testing
|
| 94 |
+
|
| 95 |
+
To run the tests, please execute the following script.
|
| 96 |
+
|
| 97 |
+
```sh
|
| 98 |
+
sh ./test.sh
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
### Documentation
|
| 102 |
+
|
| 103 |
+
To build the documentation, first ensure that all the dependencies are installed.
|
| 104 |
+
```sh
|
| 105 |
+
pip install -e ".[docs]"
|
| 106 |
+
```
|
| 107 |
+
Then, execute the following.
|
| 108 |
+
```sh
|
| 109 |
+
cd docs/
|
| 110 |
+
make html
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
## Benchmarks
|
| 114 |
+
If you feel lost in the crowd of available optimizers for deep learning, there
|
| 115 |
+
exist some extensive benchmarks:
|
| 116 |
+
|
| 117 |
+
[Benchmarking Neural Network Training Algorithms, Dahl G. et al, 2023](https://arxiv.org/pdf/2306.07179),
|
| 118 |
+
|
| 119 |
+
[Descending through a Crowded Valley — Benchmarking Deep Learning Optimizers, Schmidt R. et al, 2021](https://proceedings.mlr.press/v139/schmidt21a).
|
| 120 |
+
|
| 121 |
+
If you are interested in developing your own benchmark for some tasks,
|
| 122 |
+
consider the following framework
|
| 123 |
+
|
| 124 |
+
[Benchopt: Reproducible, efficient and collaborative optimization benchmarks, Moreau T. et al, 2022](https://arxiv.org/abs/2206.13424).
|
| 125 |
+
|
| 126 |
+
Finally, if you are searching for some recommendations on tuning optimizers,
|
| 127 |
+
consider taking a look at
|
| 128 |
+
|
| 129 |
+
[Deep Learning Tuning Playbook, Godbole V. et al, 2023](https://github.com/google-research/tuning_playbook).
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
## Citing Optax
|
| 133 |
+
|
| 134 |
+
This repository is part of the DeepMind JAX Ecosystem, to cite Optax
|
| 135 |
+
please use the citation:
|
| 136 |
+
|
| 137 |
+
```bibtex
|
| 138 |
+
@software{deepmind2020jax,
|
| 139 |
+
title = {The {D}eep{M}ind {JAX} {E}cosystem},
|
| 140 |
+
author = {DeepMind and Babuschkin, Igor and Baumli, Kate and Bell, Alison and Bhupatiraju, Surya and Bruce, Jake and Buchlovsky, Peter and Budden, David and Cai, Trevor and Clark, Aidan and Danihelka, Ivo and Dedieu, Antoine and Fantacci, Claudio and Godwin, Jonathan and Jones, Chris and Hemsley, Ross and Hennigan, Tom and Hessel, Matteo and Hou, Shaobo and Kapturowski, Steven and Keck, Thomas and Kemaev, Iurii and King, Michael and Kunesch, Markus and Martens, Lena and Merzic, Hamza and Mikulik, Vladimir and Norman, Tamara and Papamakarios, George and Quan, John and Ring, Roman and Ruiz, Francisco and Sanchez, Alvaro and Sartran, Laurent and Schneider, Rosalia and Sezener, Eren and Spencer, Stephen and Srinivasan, Srivatsan and Stanojevi\'{c}, Milo\v{s} and Stokowiec, Wojciech and Wang, Luyu and Zhou, Guangyao and Viola, Fabio},
|
| 141 |
+
url = {http://github.com/google-deepmind},
|
| 142 |
+
year = {2020},
|
| 143 |
+
}
|
| 144 |
+
```
|
testbed/google-deepmind__optax/optax/__init__.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Optax: composable gradient processing and optimization, in JAX."""
|
| 16 |
+
|
| 17 |
+
# pylint: disable=wrong-import-position
|
| 18 |
+
# pylint: disable=g-importing-member
|
| 19 |
+
|
| 20 |
+
from optax import contrib
|
| 21 |
+
from optax import losses
|
| 22 |
+
from optax import monte_carlo
|
| 23 |
+
from optax import projections
|
| 24 |
+
from optax import schedules
|
| 25 |
+
from optax import second_order
|
| 26 |
+
from optax import transforms
|
| 27 |
+
from optax import tree_utils
|
| 28 |
+
from optax._src.alias import adabelief
|
| 29 |
+
from optax._src.alias import adadelta
|
| 30 |
+
from optax._src.alias import adafactor
|
| 31 |
+
from optax._src.alias import adagrad
|
| 32 |
+
from optax._src.alias import adam
|
| 33 |
+
from optax._src.alias import adamax
|
| 34 |
+
from optax._src.alias import adamaxw
|
| 35 |
+
from optax._src.alias import adamw
|
| 36 |
+
from optax._src.alias import amsgrad
|
| 37 |
+
from optax._src.alias import fromage
|
| 38 |
+
from optax._src.alias import lamb
|
| 39 |
+
from optax._src.alias import lars
|
| 40 |
+
from optax._src.alias import lion
|
| 41 |
+
from optax._src.alias import MaskOrFn
|
| 42 |
+
from optax._src.alias import nadam
|
| 43 |
+
from optax._src.alias import nadamw
|
| 44 |
+
from optax._src.alias import noisy_sgd
|
| 45 |
+
from optax._src.alias import novograd
|
| 46 |
+
from optax._src.alias import optimistic_gradient_descent
|
| 47 |
+
from optax._src.alias import polyak_sgd
|
| 48 |
+
from optax._src.alias import radam
|
| 49 |
+
from optax._src.alias import rmsprop
|
| 50 |
+
from optax._src.alias import rprop
|
| 51 |
+
from optax._src.alias import sgd
|
| 52 |
+
from optax._src.alias import sm3
|
| 53 |
+
from optax._src.alias import yogi
|
| 54 |
+
from optax._src.base import EmptyState
|
| 55 |
+
from optax._src.base import GradientTransformation
|
| 56 |
+
from optax._src.base import GradientTransformationExtraArgs
|
| 57 |
+
from optax._src.base import identity
|
| 58 |
+
from optax._src.base import OptState
|
| 59 |
+
from optax._src.base import Params
|
| 60 |
+
from optax._src.base import ScalarOrSchedule
|
| 61 |
+
from optax._src.base import Schedule
|
| 62 |
+
from optax._src.base import set_to_zero
|
| 63 |
+
from optax._src.base import stateless
|
| 64 |
+
from optax._src.base import stateless_with_tree_map
|
| 65 |
+
from optax._src.base import TransformInitFn
|
| 66 |
+
from optax._src.base import TransformUpdateExtraArgsFn
|
| 67 |
+
from optax._src.base import TransformUpdateFn
|
| 68 |
+
from optax._src.base import Updates
|
| 69 |
+
from optax._src.base import with_extra_args_support
|
| 70 |
+
from optax._src.clipping import adaptive_grad_clip
|
| 71 |
+
from optax._src.clipping import AdaptiveGradClipState
|
| 72 |
+
from optax._src.clipping import clip
|
| 73 |
+
from optax._src.clipping import clip_by_block_rms
|
| 74 |
+
from optax._src.clipping import clip_by_global_norm
|
| 75 |
+
from optax._src.clipping import ClipByGlobalNormState
|
| 76 |
+
from optax._src.clipping import ClipState
|
| 77 |
+
from optax._src.clipping import per_example_global_norm_clip
|
| 78 |
+
from optax._src.clipping import per_example_layer_norm_clip
|
| 79 |
+
from optax._src.combine import chain
|
| 80 |
+
from optax._src.combine import multi_transform
|
| 81 |
+
from optax._src.combine import MultiTransformState
|
| 82 |
+
from optax._src.combine import named_chain
|
| 83 |
+
from optax._src.constrain import keep_params_nonnegative
|
| 84 |
+
from optax._src.constrain import NonNegativeParamsState
|
| 85 |
+
from optax._src.constrain import zero_nans
|
| 86 |
+
from optax._src.constrain import ZeroNansState
|
| 87 |
+
from optax._src.factorized import FactoredState
|
| 88 |
+
from optax._src.factorized import scale_by_factored_rms
|
| 89 |
+
from optax._src.linear_algebra import global_norm
|
| 90 |
+
from optax._src.linear_algebra import matrix_inverse_pth_root
|
| 91 |
+
from optax._src.linear_algebra import power_iteration
|
| 92 |
+
from optax._src.linesearch import scale_by_backtracking_linesearch
|
| 93 |
+
from optax._src.linesearch import ScaleByBacktrackingLinesearchState
|
| 94 |
+
from optax._src.lookahead import lookahead
|
| 95 |
+
from optax._src.lookahead import LookaheadParams
|
| 96 |
+
from optax._src.lookahead import LookaheadState
|
| 97 |
+
from optax._src.numerics import safe_int32_increment
|
| 98 |
+
from optax._src.numerics import safe_norm
|
| 99 |
+
from optax._src.numerics import safe_root_mean_squares
|
| 100 |
+
from optax._src.transform import add_decayed_weights
|
| 101 |
+
from optax._src.transform import add_noise
|
| 102 |
+
from optax._src.transform import AddDecayedWeightsState
|
| 103 |
+
from optax._src.transform import AddNoiseState
|
| 104 |
+
from optax._src.transform import apply_every
|
| 105 |
+
from optax._src.transform import ApplyEvery
|
| 106 |
+
from optax._src.transform import centralize
|
| 107 |
+
from optax._src.transform import ema
|
| 108 |
+
from optax._src.transform import EmaState
|
| 109 |
+
from optax._src.transform import scale
|
| 110 |
+
from optax._src.transform import scale_by_adadelta
|
| 111 |
+
from optax._src.transform import scale_by_adam
|
| 112 |
+
from optax._src.transform import scale_by_adamax
|
| 113 |
+
from optax._src.transform import scale_by_amsgrad
|
| 114 |
+
from optax._src.transform import scale_by_belief
|
| 115 |
+
from optax._src.transform import scale_by_distance_over_gradients
|
| 116 |
+
from optax._src.transform import scale_by_learning_rate
|
| 117 |
+
from optax._src.transform import scale_by_lion
|
| 118 |
+
from optax._src.transform import scale_by_novograd
|
| 119 |
+
from optax._src.transform import scale_by_optimistic_gradient
|
| 120 |
+
from optax._src.transform import scale_by_param_block_norm
|
| 121 |
+
from optax._src.transform import scale_by_param_block_rms
|
| 122 |
+
from optax._src.transform import scale_by_polyak
|
| 123 |
+
from optax._src.transform import scale_by_radam
|
| 124 |
+
from optax._src.transform import scale_by_rms
|
| 125 |
+
from optax._src.transform import scale_by_rprop
|
| 126 |
+
from optax._src.transform import scale_by_rss
|
| 127 |
+
from optax._src.transform import scale_by_schedule
|
| 128 |
+
from optax._src.transform import scale_by_sm3
|
| 129 |
+
from optax._src.transform import scale_by_stddev
|
| 130 |
+
from optax._src.transform import scale_by_trust_ratio
|
| 131 |
+
from optax._src.transform import scale_by_yogi
|
| 132 |
+
from optax._src.transform import ScaleByAdaDeltaState
|
| 133 |
+
from optax._src.transform import ScaleByAdamState
|
| 134 |
+
from optax._src.transform import ScaleByAmsgradState
|
| 135 |
+
from optax._src.transform import ScaleByBeliefState
|
| 136 |
+
from optax._src.transform import ScaleByLionState
|
| 137 |
+
from optax._src.transform import ScaleByNovogradState
|
| 138 |
+
from optax._src.transform import ScaleByRmsState
|
| 139 |
+
from optax._src.transform import ScaleByRpropState
|
| 140 |
+
from optax._src.transform import ScaleByRssState
|
| 141 |
+
from optax._src.transform import ScaleByRStdDevState
|
| 142 |
+
from optax._src.transform import ScaleByScheduleState
|
| 143 |
+
from optax._src.transform import ScaleBySM3State
|
| 144 |
+
from optax._src.transform import ScaleByTrustRatioState
|
| 145 |
+
from optax._src.transform import ScaleState
|
| 146 |
+
from optax._src.transform import trace
|
| 147 |
+
from optax._src.transform import TraceState
|
| 148 |
+
from optax._src.update import apply_updates
|
| 149 |
+
from optax._src.update import incremental_update
|
| 150 |
+
from optax._src.update import periodic_update
|
| 151 |
+
from optax._src.utils import multi_normal
|
| 152 |
+
from optax._src.utils import scale_gradient
|
| 153 |
+
from optax._src.utils import value_and_grad_from_state
|
| 154 |
+
from optax._src.wrappers import apply_if_finite
|
| 155 |
+
from optax._src.wrappers import ApplyIfFiniteState
|
| 156 |
+
from optax._src.wrappers import conditionally_mask
|
| 157 |
+
from optax._src.wrappers import conditionally_transform
|
| 158 |
+
from optax._src.wrappers import ConditionallyMaskState
|
| 159 |
+
from optax._src.wrappers import ConditionallyTransformState
|
| 160 |
+
from optax._src.wrappers import flatten
|
| 161 |
+
from optax._src.wrappers import masked
|
| 162 |
+
from optax._src.wrappers import MaskedNode
|
| 163 |
+
from optax._src.wrappers import MaskedState
|
| 164 |
+
from optax._src.wrappers import maybe_update
|
| 165 |
+
from optax._src.wrappers import MaybeUpdateState
|
| 166 |
+
from optax._src.wrappers import MultiSteps
|
| 167 |
+
from optax._src.wrappers import MultiStepsState
|
| 168 |
+
from optax._src.wrappers import ShouldSkipUpdateFunction
|
| 169 |
+
from optax._src.wrappers import skip_large_updates
|
| 170 |
+
from optax._src.wrappers import skip_not_finite
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# TODO(mtthss): remove tree_utils aliases after updates.
|
| 174 |
+
tree_map_params = tree_utils.tree_map_params
|
| 175 |
+
bias_correction = tree_utils.tree_bias_correction
|
| 176 |
+
update_infinity_moment = tree_utils.tree_update_infinity_moment
|
| 177 |
+
update_moment = tree_utils.tree_update_moment
|
| 178 |
+
update_moment_per_elem_norm = tree_utils.tree_update_moment_per_elem_norm
|
| 179 |
+
|
| 180 |
+
# TODO(mtthss): remove schedules alises from flat namespaces after user updates.
|
| 181 |
+
constant_schedule = schedules.constant_schedule
|
| 182 |
+
cosine_decay_schedule = schedules.cosine_decay_schedule
|
| 183 |
+
cosine_onecycle_schedule = schedules.cosine_onecycle_schedule
|
| 184 |
+
exponential_decay = schedules.exponential_decay
|
| 185 |
+
inject_hyperparams = schedules.inject_hyperparams
|
| 186 |
+
InjectHyperparamsState = schedules.InjectHyperparamsState
|
| 187 |
+
join_schedules = schedules.join_schedules
|
| 188 |
+
linear_onecycle_schedule = schedules.linear_onecycle_schedule
|
| 189 |
+
linear_schedule = schedules.linear_schedule
|
| 190 |
+
piecewise_constant_schedule = schedules.piecewise_constant_schedule
|
| 191 |
+
piecewise_interpolate_schedule = schedules.piecewise_interpolate_schedule
|
| 192 |
+
polynomial_schedule = schedules.polynomial_schedule
|
| 193 |
+
sgdr_schedule = schedules.sgdr_schedule
|
| 194 |
+
warmup_cosine_decay_schedule = schedules.warmup_cosine_decay_schedule
|
| 195 |
+
warmup_exponential_decay_schedule = schedules.warmup_exponential_decay_schedule
|
| 196 |
+
inject_stateful_hyperparams = schedules.inject_stateful_hyperparams
|
| 197 |
+
InjectStatefulHyperparamsState = schedules.InjectStatefulHyperparamsState
|
| 198 |
+
WrappedSchedule = schedules.WrappedSchedule
|
| 199 |
+
|
| 200 |
+
# TODO(mtthss): remove loss aliases from flat namespace once users have updated.
|
| 201 |
+
convex_kl_divergence = losses.convex_kl_divergence
|
| 202 |
+
cosine_distance = losses.cosine_distance
|
| 203 |
+
cosine_similarity = losses.cosine_similarity
|
| 204 |
+
ctc_loss = losses.ctc_loss
|
| 205 |
+
ctc_loss_with_forward_probs = losses.ctc_loss_with_forward_probs
|
| 206 |
+
hinge_loss = losses.hinge_loss
|
| 207 |
+
huber_loss = losses.huber_loss
|
| 208 |
+
kl_divergence = losses.kl_divergence
|
| 209 |
+
l2_loss = losses.l2_loss
|
| 210 |
+
log_cosh = losses.log_cosh
|
| 211 |
+
ntxent = losses.ntxent
|
| 212 |
+
sigmoid_binary_cross_entropy = losses.sigmoid_binary_cross_entropy
|
| 213 |
+
smooth_labels = losses.smooth_labels
|
| 214 |
+
safe_softmax_cross_entropy = losses.safe_softmax_cross_entropy
|
| 215 |
+
softmax_cross_entropy = losses.softmax_cross_entropy
|
| 216 |
+
softmax_cross_entropy_with_integer_labels = (
|
| 217 |
+
losses.softmax_cross_entropy_with_integer_labels
|
| 218 |
+
)
|
| 219 |
+
squared_error = losses.squared_error
|
| 220 |
+
sigmoid_focal_loss = losses.sigmoid_focal_loss
|
| 221 |
+
|
| 222 |
+
# pylint: disable=g-import-not-at-top
|
| 223 |
+
# TODO(mtthss): remove contrib aliases from flat namespace once users updated.
|
| 224 |
+
# Deprecated modules
|
| 225 |
+
from optax.contrib import differentially_private_aggregate as _deprecated_differentially_private_aggregate
|
| 226 |
+
from optax.contrib import DifferentiallyPrivateAggregateState as _deprecated_DifferentiallyPrivateAggregateState
|
| 227 |
+
from optax.contrib import dpsgd as _deprecated_dpsgd
|
| 228 |
+
|
| 229 |
+
_deprecations = {
|
| 230 |
+
# Added Apr 2024
|
| 231 |
+
"differentially_private_aggregate": (
|
| 232 |
+
(
|
| 233 |
+
"optax.differentially_private_aggregate is deprecated: use"
|
| 234 |
+
" optax.contrib.differentially_private_aggregate (optax v0.1.8 or"
|
| 235 |
+
" newer)."
|
| 236 |
+
),
|
| 237 |
+
_deprecated_differentially_private_aggregate,
|
| 238 |
+
),
|
| 239 |
+
"DifferentiallyPrivateAggregateState": (
|
| 240 |
+
(
|
| 241 |
+
"optax.DifferentiallyPrivateAggregateState is deprecated: use"
|
| 242 |
+
" optax.contrib.DifferentiallyPrivateAggregateState (optax v0.1.8"
|
| 243 |
+
" or newer)."
|
| 244 |
+
),
|
| 245 |
+
_deprecated_DifferentiallyPrivateAggregateState,
|
| 246 |
+
),
|
| 247 |
+
"dpsgd": (
|
| 248 |
+
(
|
| 249 |
+
"optax.dpsgd is deprecated: use optax.contrib.dpsgd (optax v0.1.8"
|
| 250 |
+
" or newer)."
|
| 251 |
+
),
|
| 252 |
+
_deprecated_dpsgd,
|
| 253 |
+
),
|
| 254 |
+
}
|
| 255 |
+
# pylint: disable=g-bad-import-order
|
| 256 |
+
import typing as _typing
|
| 257 |
+
|
| 258 |
+
if _typing.TYPE_CHECKING:
|
| 259 |
+
# pylint: disable=reimported
|
| 260 |
+
from optax.contrib import differentially_private_aggregate
|
| 261 |
+
from optax.contrib import DifferentiallyPrivateAggregateState
|
| 262 |
+
from optax.contrib import dpsgd
|
| 263 |
+
# pylint: enable=reimported
|
| 264 |
+
|
| 265 |
+
else:
|
| 266 |
+
from optax._src.deprecations import deprecation_getattr as _deprecation_getattr
|
| 267 |
+
|
| 268 |
+
__getattr__ = _deprecation_getattr(__name__, _deprecations)
|
| 269 |
+
del _deprecation_getattr
|
| 270 |
+
del _typing
|
| 271 |
+
# pylint: enable=g-bad-import-order
|
| 272 |
+
# pylint: enable=g-import-not-at-top
|
| 273 |
+
# pylint: enable=g-importing-member
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
__version__ = "0.2.3.dev"
|
| 277 |
+
|
| 278 |
+
__all__ = (
|
| 279 |
+
"adabelief",
|
| 280 |
+
"adadelta",
|
| 281 |
+
"adafactor",
|
| 282 |
+
"adagrad",
|
| 283 |
+
"adam",
|
| 284 |
+
"adamax",
|
| 285 |
+
"adamaxw",
|
| 286 |
+
"adamw",
|
| 287 |
+
"adaptive_grad_clip",
|
| 288 |
+
"AdaptiveGradClipState",
|
| 289 |
+
"add_decayed_weights",
|
| 290 |
+
"add_noise",
|
| 291 |
+
"AddDecayedWeightsState",
|
| 292 |
+
"AddNoiseState",
|
| 293 |
+
"amsgrad",
|
| 294 |
+
"apply_every",
|
| 295 |
+
"apply_if_finite",
|
| 296 |
+
"apply_updates",
|
| 297 |
+
"ApplyEvery",
|
| 298 |
+
"ApplyIfFiniteState",
|
| 299 |
+
"centralize",
|
| 300 |
+
"chain",
|
| 301 |
+
"clip_by_block_rms",
|
| 302 |
+
"clip_by_global_norm",
|
| 303 |
+
"clip",
|
| 304 |
+
"ClipByGlobalNormState",
|
| 305 |
+
"ClipState",
|
| 306 |
+
"conditionally_mask",
|
| 307 |
+
"ConditionallyMaskState",
|
| 308 |
+
"conditionally_transform",
|
| 309 |
+
"ConditionallyTransformState",
|
| 310 |
+
"constant_schedule",
|
| 311 |
+
"ctc_loss",
|
| 312 |
+
"ctc_loss_with_forward_probs",
|
| 313 |
+
"convex_kl_divergence",
|
| 314 |
+
"cosine_decay_schedule",
|
| 315 |
+
"cosine_distance",
|
| 316 |
+
"cosine_onecycle_schedule",
|
| 317 |
+
"cosine_similarity",
|
| 318 |
+
"differentially_private_aggregate",
|
| 319 |
+
"DifferentiallyPrivateAggregateState",
|
| 320 |
+
"dpsgd",
|
| 321 |
+
"ema",
|
| 322 |
+
"EmaState",
|
| 323 |
+
"EmptyState",
|
| 324 |
+
"exponential_decay",
|
| 325 |
+
"FactoredState",
|
| 326 |
+
"flatten",
|
| 327 |
+
"fromage",
|
| 328 |
+
"global_norm",
|
| 329 |
+
"GradientTransformation",
|
| 330 |
+
"GradientTransformationExtraArgs",
|
| 331 |
+
"hinge_loss",
|
| 332 |
+
"huber_loss",
|
| 333 |
+
"identity",
|
| 334 |
+
"incremental_update",
|
| 335 |
+
"inject_hyperparams",
|
| 336 |
+
"InjectHyperparamsState",
|
| 337 |
+
"join_schedules",
|
| 338 |
+
"keep_params_nonnegative",
|
| 339 |
+
"kl_divergence",
|
| 340 |
+
"l2_loss",
|
| 341 |
+
"lamb",
|
| 342 |
+
"lars",
|
| 343 |
+
"lion",
|
| 344 |
+
"linear_onecycle_schedule",
|
| 345 |
+
"linear_schedule",
|
| 346 |
+
"log_cosh",
|
| 347 |
+
"lookahead",
|
| 348 |
+
"LookaheadParams",
|
| 349 |
+
"LookaheadState",
|
| 350 |
+
"masked",
|
| 351 |
+
"MaskOrFn",
|
| 352 |
+
"MaskedState",
|
| 353 |
+
"matrix_inverse_pth_root",
|
| 354 |
+
"maybe_update",
|
| 355 |
+
"MaybeUpdateState",
|
| 356 |
+
"multi_normal",
|
| 357 |
+
"multi_transform",
|
| 358 |
+
"MultiSteps",
|
| 359 |
+
"MultiStepsState",
|
| 360 |
+
"MultiTransformState",
|
| 361 |
+
"nadam",
|
| 362 |
+
"nadamw",
|
| 363 |
+
"noisy_sgd",
|
| 364 |
+
"novograd",
|
| 365 |
+
"NonNegativeParamsState",
|
| 366 |
+
"ntxent",
|
| 367 |
+
"OptState",
|
| 368 |
+
"Params",
|
| 369 |
+
"periodic_update",
|
| 370 |
+
"per_example_global_norm_clip",
|
| 371 |
+
"per_example_layer_norm_clip",
|
| 372 |
+
"piecewise_constant_schedule",
|
| 373 |
+
"piecewise_interpolate_schedule",
|
| 374 |
+
"polynomial_schedule",
|
| 375 |
+
"power_iteration",
|
| 376 |
+
"polyak_sgd",
|
| 377 |
+
"radam",
|
| 378 |
+
"rmsprop",
|
| 379 |
+
"rprop",
|
| 380 |
+
"safe_int32_increment",
|
| 381 |
+
"safe_norm",
|
| 382 |
+
"safe_root_mean_squares",
|
| 383 |
+
"ScalarOrSchedule",
|
| 384 |
+
"scale_by_adadelta",
|
| 385 |
+
"scale_by_adam",
|
| 386 |
+
"scale_by_adamax",
|
| 387 |
+
"scale_by_amsgrad",
|
| 388 |
+
"scale_by_backtracking_linesearch",
|
| 389 |
+
"scale_by_belief",
|
| 390 |
+
"scale_by_lion",
|
| 391 |
+
"scale_by_factored_rms",
|
| 392 |
+
"scale_by_novograd",
|
| 393 |
+
"scale_by_param_block_norm",
|
| 394 |
+
"scale_by_param_block_rms",
|
| 395 |
+
"scale_by_polyak",
|
| 396 |
+
"scale_by_radam",
|
| 397 |
+
"scale_by_rms",
|
| 398 |
+
"scale_by_rprop",
|
| 399 |
+
"scale_by_rss",
|
| 400 |
+
"scale_by_schedule",
|
| 401 |
+
"scale_by_sm3",
|
| 402 |
+
"scale_by_stddev",
|
| 403 |
+
"scale_by_trust_ratio",
|
| 404 |
+
"scale_by_yogi",
|
| 405 |
+
"scale_gradient",
|
| 406 |
+
"scale",
|
| 407 |
+
"ScaleByAdaDeltaState",
|
| 408 |
+
"ScaleByAdamState",
|
| 409 |
+
"ScaleByAmsgradState",
|
| 410 |
+
"ScaleByBacktrackingLinesearchState",
|
| 411 |
+
"ScaleByBeliefState",
|
| 412 |
+
"ScaleByLionState",
|
| 413 |
+
"ScaleByNovogradState",
|
| 414 |
+
"ScaleByRmsState",
|
| 415 |
+
"ScaleByRpropState",
|
| 416 |
+
"ScaleByRssState",
|
| 417 |
+
"ScaleByRStdDevState",
|
| 418 |
+
"ScaleByScheduleState",
|
| 419 |
+
"ScaleBySM3State",
|
| 420 |
+
"ScaleByTrustRatioState",
|
| 421 |
+
"ScaleState",
|
| 422 |
+
"Schedule",
|
| 423 |
+
"set_to_zero",
|
| 424 |
+
"sgd",
|
| 425 |
+
"sgdr_schedule",
|
| 426 |
+
"ShouldSkipUpdateFunction",
|
| 427 |
+
"sigmoid_binary_cross_entropy",
|
| 428 |
+
"skip_large_updates",
|
| 429 |
+
"skip_not_finite",
|
| 430 |
+
"sm3",
|
| 431 |
+
"smooth_labels",
|
| 432 |
+
"softmax_cross_entropy",
|
| 433 |
+
"softmax_cross_entropy_with_integer_labels",
|
| 434 |
+
"stateless",
|
| 435 |
+
"stateless_with_tree_map",
|
| 436 |
+
"trace",
|
| 437 |
+
"TraceState",
|
| 438 |
+
"TransformInitFn",
|
| 439 |
+
"TransformUpdateFn",
|
| 440 |
+
"TransformUpdateExtraArgsFn",
|
| 441 |
+
"Updates",
|
| 442 |
+
"value_and_grad_from_state",
|
| 443 |
+
"warmup_cosine_decay_schedule",
|
| 444 |
+
"warmup_exponential_decay_schedule",
|
| 445 |
+
"yogi",
|
| 446 |
+
"zero_nans",
|
| 447 |
+
"ZeroNansState",
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
# _________________________________________
|
| 451 |
+
# / Please don't use symbols in `_src` they \
|
| 452 |
+
# \ are not part of the Optax public API. /
|
| 453 |
+
# -----------------------------------------
|
| 454 |
+
# \ ^__^
|
| 455 |
+
# \ (oo)\_______
|
| 456 |
+
# (__)\ )\/\
|
| 457 |
+
# ||----w |
|
| 458 |
+
# || ||
|
| 459 |
+
#
|
testbed/google-deepmind__optax/optax/losses/_classification.py
ADDED
|
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Classification losses."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
from typing import Optional
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def sigmoid_binary_cross_entropy(
|
| 26 |
+
logits,
|
| 27 |
+
labels,
|
| 28 |
+
):
|
| 29 |
+
"""Computes element-wise sigmoid cross entropy given logits and labels.
|
| 30 |
+
|
| 31 |
+
This function can be used for binary or multiclass classification (where each
|
| 32 |
+
class is an independent binary prediction and different classes are not
|
| 33 |
+
mutually exclusive e.g. predicting that an image contains both a cat
|
| 34 |
+
and a dog.)
|
| 35 |
+
|
| 36 |
+
Because this function is overloaded, please ensure your `logits` and `labels`
|
| 37 |
+
are compatible with each other. If you're passing in binary `labels` (values
|
| 38 |
+
in {0, 1}), ensure your `logits` correspond to class 1 only. If you're
|
| 39 |
+
passing in per-class target probabilities or one-hot `labels`, please ensure
|
| 40 |
+
your `logits` are also multiclass. Be particularly careful if you're relying
|
| 41 |
+
on implicit broadcasting to reshape `logits` or `labels`.
|
| 42 |
+
|
| 43 |
+
References:
|
| 44 |
+
[Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
logits: Each element is the unnormalized log probability of a binary
|
| 48 |
+
prediction. See note about compatibility with `labels` above.
|
| 49 |
+
labels: Binary labels whose values are {0,1} or multi-class target
|
| 50 |
+
probabilities. See note about compatibility with `logits` above.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
cross entropy for each binary prediction, same shape as `logits`.
|
| 54 |
+
"""
|
| 55 |
+
chex.assert_type([logits], float)
|
| 56 |
+
labels = labels.astype(logits.dtype)
|
| 57 |
+
log_p = jax.nn.log_sigmoid(logits)
|
| 58 |
+
# log(1 - sigmoid(x)) = log_sigmoid(-x), the latter more numerically stable
|
| 59 |
+
log_not_p = jax.nn.log_sigmoid(-logits)
|
| 60 |
+
return -labels * log_p - (1. - labels) * log_not_p
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@functools.partial(
|
| 64 |
+
chex.warn_deprecated_function,
|
| 65 |
+
replacement='sigmoid_binary_cross_entropy')
|
| 66 |
+
def binary_logistic_loss(logits, labels):
|
| 67 |
+
return sigmoid_binary_cross_entropy(logits, labels)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def hinge_loss(
|
| 71 |
+
predictor_outputs: chex.Array,
|
| 72 |
+
targets: chex.Array
|
| 73 |
+
) -> chex.Array:
|
| 74 |
+
"""Computes the hinge loss for binary classification.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
predictor_outputs: Outputs of the decision function.
|
| 78 |
+
targets: Target values. Target values should be strictly in the set {-1, 1}.
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
loss value.
|
| 82 |
+
"""
|
| 83 |
+
return jnp.maximum(0, 1 - predictor_outputs * targets)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def perceptron_loss(
|
| 87 |
+
predictor_outputs: chex.Numeric,
|
| 88 |
+
targets: chex.Numeric
|
| 89 |
+
) -> chex.Numeric:
|
| 90 |
+
"""Binary perceptron loss.
|
| 91 |
+
|
| 92 |
+
References:
|
| 93 |
+
https://en.wikipedia.org/wiki/Perceptron
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
predictor_outputs: score produced by the model (float).
|
| 97 |
+
targets: Target values. Target values should be strictly in the set {-1, 1}.
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
loss value.
|
| 101 |
+
"""
|
| 102 |
+
chex.assert_equal_shape([predictor_outputs, targets])
|
| 103 |
+
return jnp.maximum(0, - predictor_outputs * targets)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def sparsemax_loss(
|
| 107 |
+
logits: chex.Array,
|
| 108 |
+
labels: chex.Array,
|
| 109 |
+
) -> chex.Array:
|
| 110 |
+
"""Binary sparsemax loss.
|
| 111 |
+
|
| 112 |
+
This loss is zero if and only if `jax.nn.sparse_sigmoid(logits) == labels`.
|
| 113 |
+
|
| 114 |
+
References:
|
| 115 |
+
Learning with Fenchel-Young Losses. Mathieu Blondel, André F. T. Martins,
|
| 116 |
+
Vlad Niculae. JMLR 2020. (Sec. 4.4)
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
logits: score produced by the model (float).
|
| 120 |
+
labels: ground-truth integer label (0 or 1).
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
loss value
|
| 124 |
+
|
| 125 |
+
.. versionadded:: 0.2.3
|
| 126 |
+
"""
|
| 127 |
+
return jax.nn.sparse_plus(jnp.where(labels, -logits, logits))
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@functools.partial(
|
| 131 |
+
chex.warn_deprecated_function,
|
| 132 |
+
replacement='sparsemax_loss')
|
| 133 |
+
def binary_sparsemax_loss(logits, labels):
|
| 134 |
+
return sparsemax_loss(logits, labels)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@jax.custom_jvp
|
| 138 |
+
def weighted_logsoftmax(x: chex.Array, weights: chex.Array) -> chex.Array:
|
| 139 |
+
r"""Weighted logsoftmax.
|
| 140 |
+
|
| 141 |
+
Computes
|
| 142 |
+
.. math::
|
| 143 |
+
(w_i \log(\exp x_i /(\sum_i \exp x_i )) )_{i=1}^n
|
| 144 |
+
|
| 145 |
+
for :math:`x` the input ``x``, :math:`w` the ``weights``.
|
| 146 |
+
For :math:`w_i = 0`, :math:`x_i=-\infty`, this implementation ensures that the
|
| 147 |
+
output is 0 and not nan at the ith entry following the convention that
|
| 148 |
+
:math:`0 \log 0 = 0`.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
x: input array.
|
| 152 |
+
weights: weights.
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
logsoftmax of x multiplied elementwise by weights
|
| 156 |
+
"""
|
| 157 |
+
logsoftmax_x = jax.nn.log_softmax(x, axis=-1)
|
| 158 |
+
return jnp.where(
|
| 159 |
+
weights != 0.0, weights * logsoftmax_x, jnp.zeros_like(logsoftmax_x)
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def _weighted_logsoftmax_jvp(primals, tangents):
|
| 164 |
+
"""Custom JVP of weighted logsoftmax."""
|
| 165 |
+
(x, weights) = primals
|
| 166 |
+
(x_dot, weights_dot) = tangents
|
| 167 |
+
logsoftmax_x = jax.nn.log_softmax(x, axis=-1)
|
| 168 |
+
result = jnp.where(
|
| 169 |
+
weights != 0.0, weights * logsoftmax_x, jnp.zeros_like(logsoftmax_x)
|
| 170 |
+
)
|
| 171 |
+
out_tangents = (
|
| 172 |
+
weights * x_dot
|
| 173 |
+
- weights
|
| 174 |
+
* jnp.sum(x_dot * jax.nn.softmax(x, axis=-1), axis=-1, keepdims=True)
|
| 175 |
+
+ weights_dot * logsoftmax_x
|
| 176 |
+
)
|
| 177 |
+
return result, out_tangents
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
weighted_logsoftmax.defjvp(_weighted_logsoftmax_jvp)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def safe_softmax_cross_entropy(
|
| 184 |
+
logits: chex.Array,
|
| 185 |
+
labels: chex.Array,
|
| 186 |
+
) -> chex.Array:
|
| 187 |
+
"""Computes the softmax cross entropy between sets of logits and labels.
|
| 188 |
+
|
| 189 |
+
Contrarily to :func:`optax.softmax_cross_entropy` this function handles
|
| 190 |
+
``labels*logsoftmax(logits)`` as ``0`` when ``logits=-inf`` and ``labels=0``,
|
| 191 |
+
following the convention that ``0 log 0 = 0``.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
logits: Unnormalized log probabilities, with shape `[..., num_classes]`.
|
| 195 |
+
labels: Valid probability distributions (non-negative, sum to 1), e.g a
|
| 196 |
+
one hot encoding specifying the correct class for each input;
|
| 197 |
+
must have a shape broadcastable to `[..., num_classes]`.
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
cross entropy between each prediction and the corresponding target
|
| 201 |
+
distributions, with shape `[...]`.
|
| 202 |
+
"""
|
| 203 |
+
chex.assert_type([logits], float)
|
| 204 |
+
return -jnp.sum(weighted_logsoftmax(logits, labels), axis=-1)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def softmax_cross_entropy(
|
| 208 |
+
logits: chex.Array,
|
| 209 |
+
labels: chex.Array,
|
| 210 |
+
) -> chex.Array:
|
| 211 |
+
"""Computes the softmax cross entropy between sets of logits and labels.
|
| 212 |
+
|
| 213 |
+
Measures the probability error in discrete classification tasks in which
|
| 214 |
+
the classes are mutually exclusive (each entry is in exactly one class).
|
| 215 |
+
For example, each CIFAR-10 image is labeled with one and only one label:
|
| 216 |
+
an image can be a dog or a truck, but not both.
|
| 217 |
+
|
| 218 |
+
References:
|
| 219 |
+
[Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
logits: Unnormalized log probabilities, with shape `[..., num_classes]`.
|
| 223 |
+
labels: Valid probability distributions (non-negative, sum to 1), e.g a
|
| 224 |
+
one hot encoding specifying the correct class for each input;
|
| 225 |
+
must have a shape broadcastable to `[..., num_classes]`.
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
cross entropy between each prediction and the corresponding target
|
| 229 |
+
distributions, with shape `[...]`.
|
| 230 |
+
|
| 231 |
+
.. seealso:: :func:`optax.safe_softmax_cross_entropy`
|
| 232 |
+
"""
|
| 233 |
+
chex.assert_type([logits], float)
|
| 234 |
+
return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def softmax_cross_entropy_with_integer_labels(
|
| 238 |
+
logits: chex.Array,
|
| 239 |
+
labels: chex.Array,
|
| 240 |
+
) -> chex.Array:
|
| 241 |
+
"""Computes softmax cross entropy between sets of logits and integer labels.
|
| 242 |
+
|
| 243 |
+
Measures the probability error in discrete classification tasks in which
|
| 244 |
+
the classes are mutually exclusive (each entry is in exactly one class).
|
| 245 |
+
For example, each CIFAR-10 image is labeled with one and only one label:
|
| 246 |
+
an image can be a dog or a truck, but not both.
|
| 247 |
+
|
| 248 |
+
References:
|
| 249 |
+
[Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
logits: Unnormalized log probabilities, with shape `[..., num_classes]`.
|
| 253 |
+
labels: Integers specifying the correct class for each input, with shape
|
| 254 |
+
`[...]`.
|
| 255 |
+
|
| 256 |
+
Returns:
|
| 257 |
+
Cross entropy between each prediction and the corresponding target
|
| 258 |
+
distributions, with shape `[...]`.
|
| 259 |
+
"""
|
| 260 |
+
chex.assert_type([logits], float)
|
| 261 |
+
chex.assert_type([labels], int)
|
| 262 |
+
# This is like jnp.take_along_axis(jax.nn.log_softmax(...), ...) except that
|
| 263 |
+
# we avoid subtracting the normalizer from all values, just from the values
|
| 264 |
+
# for the correct labels.
|
| 265 |
+
logits_max = jnp.max(logits, axis=-1, keepdims=True)
|
| 266 |
+
logits -= jax.lax.stop_gradient(logits_max)
|
| 267 |
+
label_logits = jnp.take_along_axis(logits, labels[..., None], axis=-1)[..., 0]
|
| 268 |
+
log_normalizers = jnp.log(jnp.sum(jnp.exp(logits), axis=-1))
|
| 269 |
+
return log_normalizers - label_logits
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
@functools.partial(
|
| 273 |
+
chex.warn_deprecated_function,
|
| 274 |
+
replacement='softmax_cross_entropy_with_integer_labels')
|
| 275 |
+
def multiclass_logistic_loss(logits, labels):
|
| 276 |
+
return softmax_cross_entropy_with_integer_labels(logits, labels)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
_dot_last_dim = jnp.vectorize(jnp.dot, signature='(n),(n)->()')
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def multiclass_hinge_loss(
|
| 283 |
+
scores: chex.Array,
|
| 284 |
+
labels: chex.Array,
|
| 285 |
+
) -> chex.Array:
|
| 286 |
+
"""Multiclass hinge loss.
|
| 287 |
+
|
| 288 |
+
References:
|
| 289 |
+
https://en.wikipedia.org/wiki/Hinge_loss
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
scores: scores produced by the model (floats).
|
| 293 |
+
labels: ground-truth integer labels.
|
| 294 |
+
|
| 295 |
+
Returns:
|
| 296 |
+
loss values
|
| 297 |
+
|
| 298 |
+
.. versionadded:: 0.2.3
|
| 299 |
+
"""
|
| 300 |
+
one_hot_labels = jax.nn.one_hot(labels, scores.shape[-1])
|
| 301 |
+
return (jnp.max(scores + 1.0 - one_hot_labels, axis=-1) -
|
| 302 |
+
_dot_last_dim(scores, one_hot_labels))
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def multiclass_perceptron_loss(
|
| 306 |
+
scores: chex.Array,
|
| 307 |
+
labels: chex.Array,
|
| 308 |
+
) -> chex.Array:
|
| 309 |
+
"""Multiclass perceptron loss.
|
| 310 |
+
|
| 311 |
+
References:
|
| 312 |
+
Michael Collins. Discriminative training methods for Hidden Markov Models:
|
| 313 |
+
Theory and experiments with perceptron algorithms. EMNLP 2002
|
| 314 |
+
|
| 315 |
+
Args:
|
| 316 |
+
scores: scores produced by the model.
|
| 317 |
+
labels: ground-truth integer labels.
|
| 318 |
+
|
| 319 |
+
Returns:
|
| 320 |
+
loss values.
|
| 321 |
+
|
| 322 |
+
.. versionadded:: 0.2.2
|
| 323 |
+
"""
|
| 324 |
+
one_hot_labels = jax.nn.one_hot(labels, scores.shape[-1])
|
| 325 |
+
return jnp.max(scores, axis=-1) - _dot_last_dim(scores, one_hot_labels)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
@functools.partial(chex.warn_only_n_pos_args_in_future, n=2)
|
| 329 |
+
def poly_loss_cross_entropy(
|
| 330 |
+
logits: chex.Array,
|
| 331 |
+
labels: chex.Array,
|
| 332 |
+
epsilon: float = 2.0
|
| 333 |
+
) -> chex.Array:
|
| 334 |
+
r"""Computes PolyLoss between logits and labels.
|
| 335 |
+
|
| 336 |
+
The PolyLoss is a loss function that decomposes commonly
|
| 337 |
+
used classification loss functions into a series of weighted
|
| 338 |
+
polynomial bases. It is inspired by the Taylor expansion of
|
| 339 |
+
cross-entropy loss and focal loss in the bases of :math:`(1 − P_t)^j`.
|
| 340 |
+
|
| 341 |
+
.. math::
|
| 342 |
+
L_{Poly} = \sum_1^\infty \alpha_j \cdot (1 - P_t)^j \\
|
| 343 |
+
L_{Poly-N} = (\epsilon_1 + 1) \cdot (1 - P_t) + \ldots + \\
|
| 344 |
+
(\epsilon_N + \frac{1}{N}) \cdot (1 - P_t)^N +
|
| 345 |
+
\frac{1}{N + 1} \cdot (1 - P_t)^{N + 1} + \ldots = \\
|
| 346 |
+
- \log(P_t) + \sum_{j = 1}^N \epsilon_j \cdot (1 - P_t)^j
|
| 347 |
+
|
| 348 |
+
This function provides a simplified version of :math:`L_{Poly-N}`
|
| 349 |
+
with only the coefficient of the first polynomial term being changed.
|
| 350 |
+
|
| 351 |
+
References:
|
| 352 |
+
[Zhaoqi Leng et al, 2022](https://arxiv.org/pdf/2204.12511.pdf)
|
| 353 |
+
|
| 354 |
+
Args:
|
| 355 |
+
logits: Unnormalized log probabilities, with shape `[..., num_classes]`.
|
| 356 |
+
labels: Valid probability distributions (non-negative, sum to 1), e.g. a
|
| 357 |
+
one hot encoding specifying the correct class for each input;
|
| 358 |
+
must have a shape broadcastable to `[..., num_classes]`.
|
| 359 |
+
epsilon: The coefficient of the first polynomial term.
|
| 360 |
+
According to the paper, the following values are recommended:
|
| 361 |
+
- For the ImageNet 2d image classification, epsilon = 2.0.
|
| 362 |
+
- For the 2d Instance Segmentation and object detection, epsilon = -1.0.
|
| 363 |
+
- It is also recommended to adjust this value based on the task, e.g. by
|
| 364 |
+
using grid search.
|
| 365 |
+
|
| 366 |
+
Returns:
|
| 367 |
+
Poly loss between each prediction and the corresponding target
|
| 368 |
+
distributions, with shape `[...]`.
|
| 369 |
+
"""
|
| 370 |
+
chex.assert_type([logits, labels], float)
|
| 371 |
+
one_minus_pt = jnp.sum(labels * (1 - jax.nn.softmax(logits)), axis=-1)
|
| 372 |
+
cross_entropy = softmax_cross_entropy(logits=logits, labels=labels)
|
| 373 |
+
return cross_entropy + epsilon * one_minus_pt
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def kl_divergence(
|
| 377 |
+
log_predictions: chex.Array,
|
| 378 |
+
targets: chex.Array
|
| 379 |
+
) -> chex.Array:
|
| 380 |
+
"""Computes the Kullback-Leibler divergence (relative entropy) loss.
|
| 381 |
+
|
| 382 |
+
Measures the information gain achieved if target probability distribution
|
| 383 |
+
would be used instead of predicted probability distribution.
|
| 384 |
+
|
| 385 |
+
References:
|
| 386 |
+
[Kullback, Leibler, 1951](https://www.jstor.org/stable/2236703)
|
| 387 |
+
|
| 388 |
+
Args:
|
| 389 |
+
log_predictions: Probabilities of predicted distribution with shape [...,
|
| 390 |
+
dim]. Expected to be in the log-space to avoid underflow.
|
| 391 |
+
targets: Probabilities of target distribution with shape [..., dim].
|
| 392 |
+
Expected to be strictly positive.
|
| 393 |
+
|
| 394 |
+
Returns:
|
| 395 |
+
Kullback-Leibler divergence of predicted distribution from target
|
| 396 |
+
distribution with shape [...].
|
| 397 |
+
"""
|
| 398 |
+
chex.assert_type([log_predictions, targets], float)
|
| 399 |
+
loss = targets * (
|
| 400 |
+
jnp.where(targets == 0, 0, jnp.log(targets)) - log_predictions
|
| 401 |
+
)
|
| 402 |
+
return jnp.sum(loss, axis=-1)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def kl_divergence_with_log_targets(
|
| 406 |
+
log_predictions: chex.Array,
|
| 407 |
+
log_targets: chex.Array
|
| 408 |
+
) -> chex.Array:
|
| 409 |
+
"""Computes the Kullback-Leibler divergence (relative entropy) loss.
|
| 410 |
+
|
| 411 |
+
Version of kl_div_loss where targets are given in log-space.
|
| 412 |
+
|
| 413 |
+
Args:
|
| 414 |
+
log_predictions: Probabilities of predicted distribution with shape
|
| 415 |
+
[..., dim]. Expected to be in the log-space to avoid underflow.
|
| 416 |
+
log_targets: Probabilities of target distribution with shape [..., dim].
|
| 417 |
+
Expected to be in the log-space.
|
| 418 |
+
|
| 419 |
+
Returns:
|
| 420 |
+
Kullback-Leibler divergence of predicted distribution from target
|
| 421 |
+
distribution with shape [...].
|
| 422 |
+
"""
|
| 423 |
+
chex.assert_type([log_predictions, log_targets], float)
|
| 424 |
+
loss = jnp.exp(log_targets) * (log_targets - log_predictions)
|
| 425 |
+
return jnp.sum(loss, axis=-1)
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def convex_kl_divergence(
|
| 429 |
+
log_predictions: chex.Array,
|
| 430 |
+
targets: chex.Array
|
| 431 |
+
) -> chex.Array:
|
| 432 |
+
"""Computes a convex version of the Kullback-Leibler divergence loss.
|
| 433 |
+
|
| 434 |
+
Measures the information gain achieved if target probability distribution
|
| 435 |
+
would be used instead of predicted probability distribution.
|
| 436 |
+
This version is jointly convex in p (targets) and q (log_predictions).
|
| 437 |
+
|
| 438 |
+
References:
|
| 439 |
+
[Kullback, Leibler, 1951](https://www.jstor.org/stable/2236703)
|
| 440 |
+
|
| 441 |
+
Args:
|
| 442 |
+
log_predictions: Probabilities of predicted distribution with shape [...,
|
| 443 |
+
dim]. Expected to be in the log-space to avoid underflow.
|
| 444 |
+
targets: Probabilities of target distribution with shape [..., dim].
|
| 445 |
+
Expected to be strictly positive.
|
| 446 |
+
|
| 447 |
+
Returns:
|
| 448 |
+
Kullback-Leibler divergence of predicted distribution from target
|
| 449 |
+
distribution with shape [...].
|
| 450 |
+
"""
|
| 451 |
+
return kl_divergence(log_predictions, targets) + jnp.sum(
|
| 452 |
+
jnp.exp(log_predictions) - targets, axis=-1
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
@functools.partial(chex.warn_only_n_pos_args_in_future, n=4)
|
| 457 |
+
def ctc_loss_with_forward_probs(
|
| 458 |
+
logits: chex.Array,
|
| 459 |
+
logit_paddings: chex.Array,
|
| 460 |
+
labels: chex.Array,
|
| 461 |
+
label_paddings: chex.Array,
|
| 462 |
+
blank_id: int = 0,
|
| 463 |
+
log_epsilon: float = -1e5
|
| 464 |
+
) -> tuple[chex.Array, chex.Array, chex.Array]:
|
| 465 |
+
r"""Computes CTC loss and CTC forward-probabilities.
|
| 466 |
+
|
| 467 |
+
The CTC loss is a loss function based on log-likelihoods of the model that
|
| 468 |
+
introduces a special blank symbol :math:`\phi` to represent variable-length
|
| 469 |
+
output sequences.
|
| 470 |
+
|
| 471 |
+
Forward probabilities returned by this function, as auxiliary results, are
|
| 472 |
+
grouped into two part: blank alpha-probability and non-blank alpha
|
| 473 |
+
probability. Those are defined as follows:
|
| 474 |
+
|
| 475 |
+
.. math::
|
| 476 |
+
\alpha_{\mathrm{BLANK}}(t, n) =
|
| 477 |
+
\sum_{\pi_{1:t-1}} p(\pi_t = \phi | \pi_{1:t-1}, y_{1:n-1}, \cdots), \\
|
| 478 |
+
\alpha_{\mathrm{LABEL}}(t, n) =
|
| 479 |
+
\sum_{\pi_{1:t-1}} p(\pi_t = y_n | \pi_{1:t-1}, y_{1:n-1}, \cdots).
|
| 480 |
+
|
| 481 |
+
Here, :math:`\pi` denotes the alignment sequence in the reference
|
| 482 |
+
[Graves et al, 2006] that is blank-inserted representations of ``labels``.
|
| 483 |
+
The return values are the logarithms of the above probabilities.
|
| 484 |
+
|
| 485 |
+
References:
|
| 486 |
+
[Graves et al, 2006](https://dl.acm.org/doi/abs/10.1145/1143844.1143891)
|
| 487 |
+
|
| 488 |
+
Args:
|
| 489 |
+
logits: (B, T, K)-array containing logits of each class where B denotes
|
| 490 |
+
the batch size, T denotes the max time frames in ``logits``, and K
|
| 491 |
+
denotes the number of classes including a class for blanks.
|
| 492 |
+
logit_paddings: (B, T)-array. Padding indicators for ``logits``. Each
|
| 493 |
+
element must be either 1.0 or 0.0, and ``logitpaddings[b, t] == 1.0``
|
| 494 |
+
denotes that ``logits[b, t, :]`` are padded values.
|
| 495 |
+
labels: (B, N)-array containing reference integer labels where N denotes
|
| 496 |
+
the max time frames in the label sequence.
|
| 497 |
+
label_paddings: (B, N)-array. Padding indicators for ``labels``. Each
|
| 498 |
+
element must be either 1.0 or 0.0, and ``labelpaddings[b, n] == 1.0``
|
| 499 |
+
denotes that ``labels[b, n]`` is a padded label. In the current
|
| 500 |
+
implementation, ``labels`` must be right-padded, i.e. each row
|
| 501 |
+
``labelpaddings[b, :]`` must be repetition of zeroes, followed by
|
| 502 |
+
repetition of ones.
|
| 503 |
+
blank_id: Id for blank token. ``logits[b, :, blank_id]`` are used as
|
| 504 |
+
probabilities of blank symbols.
|
| 505 |
+
log_epsilon: Numerically-stable approximation of log(+0).
|
| 506 |
+
|
| 507 |
+
Returns:
|
| 508 |
+
A tuple ``(loss_value, logalpha_blank, logalpha_nonblank)``. Here,
|
| 509 |
+
``loss_value`` is a (B,)-array containing the loss values for each sequence
|
| 510 |
+
in the batch, ``logalpha_blank`` and ``logalpha_nonblank`` are
|
| 511 |
+
(T, B, N+1)-arrays where the (t, b, n)-th element denotes
|
| 512 |
+
\log \alpha_B(t, n) and \log \alpha_L(t, n), respectively, for ``b``-th
|
| 513 |
+
sequence in the batch.
|
| 514 |
+
"""
|
| 515 |
+
|
| 516 |
+
chex.assert_rank(logits, 3)
|
| 517 |
+
chex.assert_rank(labels, 2)
|
| 518 |
+
batchsize, unused_maxinputlen, num_classes = logits.shape
|
| 519 |
+
batchsize_of_labels, maxlabellen = labels.shape
|
| 520 |
+
chex.assert_equal(batchsize, batchsize_of_labels)
|
| 521 |
+
chex.assert_equal(labels.shape, label_paddings.shape)
|
| 522 |
+
chex.assert_equal(logits.shape[:2], logit_paddings.shape)
|
| 523 |
+
|
| 524 |
+
logprobs = jax.nn.log_softmax(logits)
|
| 525 |
+
labellens = maxlabellen - jnp.sum(label_paddings, axis=1).astype(jnp.int32)
|
| 526 |
+
|
| 527 |
+
# repeat[b, n] == 1.0 when label[b, n] == label[b, n+1].
|
| 528 |
+
repeat = (labels[:, :-1] == labels[:, 1:]).astype(jnp.float32)
|
| 529 |
+
repeat = jnp.pad(repeat, ((0, 0), (0, 1)))
|
| 530 |
+
|
| 531 |
+
logprobs_phi = logprobs[:, :, blank_id:blank_id + 1] # [B, T, 1]
|
| 532 |
+
logprobs_phi = jnp.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1]
|
| 533 |
+
|
| 534 |
+
one_hot = jax.nn.one_hot(labels, num_classes=num_classes) # [B, N, K]
|
| 535 |
+
logprobs_emit = jnp.einsum('btk,bnk->btn', logprobs, one_hot)
|
| 536 |
+
logprobs_emit = jnp.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N]
|
| 537 |
+
|
| 538 |
+
logalpha_phi_init = jnp.ones(
|
| 539 |
+
(batchsize, maxlabellen + 1)) * log_epsilon # [B, N]
|
| 540 |
+
logalpha_phi_init = logalpha_phi_init.at[:, 0].set(0.0)
|
| 541 |
+
logalpha_emit_init = jnp.ones((batchsize, maxlabellen)) * log_epsilon
|
| 542 |
+
|
| 543 |
+
def update_phi_score(phi, added_score):
|
| 544 |
+
# Update `phi[:, 1:]`` with adding `added_score` in log space.
|
| 545 |
+
return jnp.concatenate(
|
| 546 |
+
[phi[:, :1], jnp.logaddexp(phi[:, 1:], added_score)], axis=-1)
|
| 547 |
+
|
| 548 |
+
def loop_body(prev, x):
|
| 549 |
+
prev_phi, prev_emit = prev
|
| 550 |
+
# emit-to-phi epsilon transition, except if the next label is repetition
|
| 551 |
+
prev_phi_orig = prev_phi
|
| 552 |
+
prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat)
|
| 553 |
+
|
| 554 |
+
logprob_emit, logprob_phi, pad = x
|
| 555 |
+
|
| 556 |
+
# phi-to-emit transition
|
| 557 |
+
next_emit = jnp.logaddexp(prev_phi[:, :-1] + logprob_emit,
|
| 558 |
+
prev_emit + logprob_emit)
|
| 559 |
+
# self-loop transition
|
| 560 |
+
next_phi = prev_phi + logprob_phi
|
| 561 |
+
# emit-to-phi blank transition only when the next label is repetition
|
| 562 |
+
next_phi = update_phi_score(
|
| 563 |
+
next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat))
|
| 564 |
+
|
| 565 |
+
pad = pad.reshape((batchsize, 1))
|
| 566 |
+
next_emit = pad * prev_emit + (1.0 - pad) * next_emit
|
| 567 |
+
next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi
|
| 568 |
+
|
| 569 |
+
return (next_phi, next_emit), (next_phi, next_emit)
|
| 570 |
+
|
| 571 |
+
xs = (logprobs_emit, logprobs_phi, logit_paddings.transpose((1, 0)))
|
| 572 |
+
_, (logalpha_phi,
|
| 573 |
+
logalpha_emit) = jax.lax.scan(loop_body,
|
| 574 |
+
(logalpha_phi_init, logalpha_emit_init), xs)
|
| 575 |
+
|
| 576 |
+
# last row needs to be updated with the last epsilon transition
|
| 577 |
+
logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1])
|
| 578 |
+
logalpha_phi = logalpha_phi.at[-1].set(logalpha_phi_last)
|
| 579 |
+
|
| 580 |
+
# extract per_seq_loss
|
| 581 |
+
one_hot = jax.nn.one_hot(labellens, num_classes=maxlabellen + 1) # [B, N+1]
|
| 582 |
+
per_seq_loss = -jnp.einsum('bn,bn->b', logalpha_phi_last, one_hot) # pylint:disable=invalid-unary-operand-type
|
| 583 |
+
|
| 584 |
+
return per_seq_loss, logalpha_phi, logalpha_emit
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
@functools.partial(chex.warn_only_n_pos_args_in_future, n=4)
|
| 588 |
+
def ctc_loss(
|
| 589 |
+
logits: chex.Array,
|
| 590 |
+
logit_paddings: chex.Array,
|
| 591 |
+
labels: chex.Array,
|
| 592 |
+
label_paddings: chex.Array,
|
| 593 |
+
blank_id: int = 0,
|
| 594 |
+
log_epsilon: float = -1e5
|
| 595 |
+
) -> chex.Array:
|
| 596 |
+
"""Computes CTC loss.
|
| 597 |
+
|
| 598 |
+
See docstring for ``ctc_loss_with_forward_probs`` for details.
|
| 599 |
+
|
| 600 |
+
Args:
|
| 601 |
+
logits: (B, T, K)-array containing logits of each class where B denotes
|
| 602 |
+
the batch size, T denotes the max time frames in ``logits``, and K
|
| 603 |
+
denotes the number of classes including a class for blanks.
|
| 604 |
+
logit_paddings: (B, T)-array. Padding indicators for ``logits``. Each
|
| 605 |
+
element must be either 1.0 or 0.0, and ``logitpaddings[b, t] == 1.0``
|
| 606 |
+
denotes that ``logits[b, t, :]`` are padded values.
|
| 607 |
+
labels: (B, N)-array containing reference integer labels where N denotes
|
| 608 |
+
the max time frames in the label sequence.
|
| 609 |
+
label_paddings: (B, N)-array. Padding indicators for ``labels``. Each
|
| 610 |
+
element must be either 1.0 or 0.0, and ``labelpaddings[b, n] == 1.0``
|
| 611 |
+
denotes that ``labels[b, n]`` is a padded label. In the current
|
| 612 |
+
implementation, ``labels`` must be right-padded, i.e. each row
|
| 613 |
+
``labelpaddings[b, :]`` must be repetition of zeroes, followed by
|
| 614 |
+
repetition of ones.
|
| 615 |
+
blank_id: Id for blank token. ``logits[b, :, blank_id]`` are used as
|
| 616 |
+
probabilities of blank symbols.
|
| 617 |
+
log_epsilon: Numerically-stable approximation of log(+0).
|
| 618 |
+
|
| 619 |
+
Returns:
|
| 620 |
+
(B,)-array containing loss values for each sequence in the batch.
|
| 621 |
+
"""
|
| 622 |
+
per_seq_loss, _, _ = ctc_loss_with_forward_probs(
|
| 623 |
+
logits, logit_paddings, labels, label_paddings,
|
| 624 |
+
blank_id=blank_id, log_epsilon=log_epsilon)
|
| 625 |
+
return per_seq_loss
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
@functools.partial(chex.warn_only_n_pos_args_in_future, n=2)
|
| 629 |
+
def sigmoid_focal_loss(
|
| 630 |
+
logits: chex.Array,
|
| 631 |
+
labels: chex.Array,
|
| 632 |
+
alpha: Optional[float] = None,
|
| 633 |
+
gamma: float = 2.,
|
| 634 |
+
) -> chex.Array:
|
| 635 |
+
"""Sigmoid focal loss.
|
| 636 |
+
|
| 637 |
+
The focal loss is a re-weighted cross entropy for unbalanced problems.
|
| 638 |
+
Use this loss function if classes are not mutually exclusive.
|
| 639 |
+
See `sigmoid_binary_cross_entropy` for more information.
|
| 640 |
+
|
| 641 |
+
References:
|
| 642 |
+
Lin et al. 2018. https://arxiv.org/pdf/1708.02002.pdf
|
| 643 |
+
|
| 644 |
+
Args:
|
| 645 |
+
logits: Array of floats. The predictions for each example.
|
| 646 |
+
The predictions for each example.
|
| 647 |
+
labels: Array of floats. Labels and logits must have
|
| 648 |
+
the same shape. The label array must contain the binary
|
| 649 |
+
classification labels for each element in the data set
|
| 650 |
+
(0 for the out-of-class and 1 for in-class).
|
| 651 |
+
alpha: (optional) Weighting factor in range (0,1) to balance
|
| 652 |
+
positive vs negative examples. Default None (no weighting).
|
| 653 |
+
gamma: Exponent of the modulating factor (1 - p_t).
|
| 654 |
+
Balances easy vs hard examples.
|
| 655 |
+
|
| 656 |
+
Returns:
|
| 657 |
+
A loss value array with a shape identical to the logits and target
|
| 658 |
+
arrays.
|
| 659 |
+
"""
|
| 660 |
+
alpha = -1 if alpha is None else alpha
|
| 661 |
+
|
| 662 |
+
chex.assert_type([logits], float)
|
| 663 |
+
labels = labels.astype(logits.dtype)
|
| 664 |
+
# see also the original paper's implementation at:
|
| 665 |
+
# https://github.com/facebookresearch/fvcore/blob/main/fvcore/nn/focal_loss.py
|
| 666 |
+
p = jax.nn.sigmoid(logits)
|
| 667 |
+
ce_loss = sigmoid_binary_cross_entropy(logits, labels)
|
| 668 |
+
p_t = p * labels + (1 - p) * (1 - labels)
|
| 669 |
+
loss = ce_loss * ((1 - p_t) ** gamma)
|
| 670 |
+
|
| 671 |
+
weighted = lambda loss_arg: (alpha * labels
|
| 672 |
+
+ (1 - alpha) * (1 - labels)
|
| 673 |
+
)*loss_arg
|
| 674 |
+
not_weighted = lambda loss_arg: loss_arg
|
| 675 |
+
|
| 676 |
+
loss = jax.lax.cond(alpha >= 0,
|
| 677 |
+
weighted,
|
| 678 |
+
not_weighted,
|
| 679 |
+
loss)
|
| 680 |
+
|
| 681 |
+
return loss
|
testbed/google-deepmind__optax/optax/losses/_classification_test.py
ADDED
|
@@ -0,0 +1,867 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.losses._classification."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
|
| 19 |
+
from absl.testing import absltest
|
| 20 |
+
from absl.testing import parameterized
|
| 21 |
+
import chex
|
| 22 |
+
import jax
|
| 23 |
+
import jax.numpy as jnp
|
| 24 |
+
import jax.test_util as jaxtest
|
| 25 |
+
import numpy as np
|
| 26 |
+
from optax.losses import _classification
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class SoftmaxCrossEntropyTest(parameterized.TestCase):
|
| 30 |
+
|
| 31 |
+
def setUp(self):
|
| 32 |
+
super().setUp()
|
| 33 |
+
self.ys = np.array(
|
| 34 |
+
[
|
| 35 |
+
[10.0, 1.0, -2.0],
|
| 36 |
+
[1.0, 4.0, 0.2],
|
| 37 |
+
],
|
| 38 |
+
dtype=np.float32,
|
| 39 |
+
)
|
| 40 |
+
self.ts = np.array(
|
| 41 |
+
[
|
| 42 |
+
[0.0, 1.0, 0.0],
|
| 43 |
+
[1.0, 0.0, 0.0],
|
| 44 |
+
],
|
| 45 |
+
dtype=np.float32,
|
| 46 |
+
)
|
| 47 |
+
self.exp = np.array(
|
| 48 |
+
[
|
| 49 |
+
9.00013,
|
| 50 |
+
3.0696733,
|
| 51 |
+
],
|
| 52 |
+
dtype=np.float32,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
@chex.all_variants
|
| 56 |
+
def test_scalar(self):
|
| 57 |
+
"""Tests for a full batch."""
|
| 58 |
+
np.testing.assert_allclose(
|
| 59 |
+
self.variant(_classification.softmax_cross_entropy)(
|
| 60 |
+
self.ys[0], self.ts[0]
|
| 61 |
+
),
|
| 62 |
+
self.exp[0],
|
| 63 |
+
atol=1e-4,
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
@chex.all_variants
|
| 67 |
+
def test_batched(self):
|
| 68 |
+
"""Tests for a full batch."""
|
| 69 |
+
np.testing.assert_allclose(
|
| 70 |
+
self.variant(_classification.softmax_cross_entropy)(self.ys, self.ts),
|
| 71 |
+
self.exp,
|
| 72 |
+
atol=1e-4,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
def test_gradient(self):
|
| 76 |
+
"""Tests gradient ok."""
|
| 77 |
+
jaxtest.check_grads(
|
| 78 |
+
_classification.softmax_cross_entropy,
|
| 79 |
+
(self.ys[:2], self.ts[:2]),
|
| 80 |
+
order=1,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class SafeSoftmaxCrossEntropyTest(parameterized.TestCase):
|
| 85 |
+
|
| 86 |
+
def setUp(self):
|
| 87 |
+
super().setUp()
|
| 88 |
+
self.ys = np.array(
|
| 89 |
+
[
|
| 90 |
+
[10.0, 1.0, -2.0],
|
| 91 |
+
[1.0, 4.0, 0.2],
|
| 92 |
+
[-np.inf, 0.0, 0.0],
|
| 93 |
+
[-np.inf, 0.0, 0.0],
|
| 94 |
+
[-np.inf, 0.0, -np.inf],
|
| 95 |
+
],
|
| 96 |
+
dtype=np.float32,
|
| 97 |
+
)
|
| 98 |
+
self.ts = np.array(
|
| 99 |
+
[
|
| 100 |
+
[0.0, 1.0, 0.0],
|
| 101 |
+
[1.0, 0.0, 0.0],
|
| 102 |
+
[0.0, 0.5, 0.5],
|
| 103 |
+
[0.4, 0.3, 0.3],
|
| 104 |
+
[0.0, 1.0, 0.0],
|
| 105 |
+
],
|
| 106 |
+
dtype=np.float32,
|
| 107 |
+
)
|
| 108 |
+
self.exp = np.array(
|
| 109 |
+
[
|
| 110 |
+
9.00013,
|
| 111 |
+
3.0696733,
|
| 112 |
+
0.693147,
|
| 113 |
+
np.inf,
|
| 114 |
+
0.0,
|
| 115 |
+
],
|
| 116 |
+
dtype=np.float32,
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
@chex.all_variants
|
| 120 |
+
def test_scalar(self):
|
| 121 |
+
"""Tests for a full batch."""
|
| 122 |
+
np.testing.assert_allclose(
|
| 123 |
+
self.variant(_classification.safe_softmax_cross_entropy)(
|
| 124 |
+
self.ys[0], self.ts[0]
|
| 125 |
+
),
|
| 126 |
+
self.exp[0],
|
| 127 |
+
atol=1e-4,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
@chex.all_variants
|
| 131 |
+
def test_batched(self):
|
| 132 |
+
"""Tests for a full batch."""
|
| 133 |
+
np.testing.assert_allclose(
|
| 134 |
+
self.variant(_classification.safe_softmax_cross_entropy)(
|
| 135 |
+
self.ys, self.ts
|
| 136 |
+
),
|
| 137 |
+
self.exp,
|
| 138 |
+
atol=1e-4,
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
def test_gradient(self):
|
| 142 |
+
"""Tests gradient ok."""
|
| 143 |
+
jaxtest.check_grads(
|
| 144 |
+
_classification.safe_softmax_cross_entropy,
|
| 145 |
+
(self.ys[:2], self.ts[:2]),
|
| 146 |
+
order=1,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
def test_against_plain_implementation(self):
|
| 150 |
+
"""Tests against plain implementation which does not handle -inf."""
|
| 151 |
+
|
| 152 |
+
plain_val_and_grad = jax.value_and_grad(
|
| 153 |
+
_classification.softmax_cross_entropy
|
| 154 |
+
)(self.ys[0], self.ts[0])
|
| 155 |
+
val_and_grad = jax.value_and_grad(
|
| 156 |
+
_classification.safe_softmax_cross_entropy
|
| 157 |
+
)(self.ys[0], self.ts[0])
|
| 158 |
+
chex.assert_trees_all_close(plain_val_and_grad, val_and_grad, atol=1e-4)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class SoftmaxCrossEntropyWithIntegerLabelsTest(parameterized.TestCase):
|
| 162 |
+
|
| 163 |
+
def setUp(self):
|
| 164 |
+
super().setUp()
|
| 165 |
+
self.ys = np.array([[10.0, 1.0, -2.0], [1.0, 4.0, 0.2]], dtype=np.float32)
|
| 166 |
+
self.ts = np.array([1, 0], dtype=np.int32)
|
| 167 |
+
|
| 168 |
+
@chex.all_variants
|
| 169 |
+
def test_consistent_with_softmax_cross_entropy_scalar(self):
|
| 170 |
+
"""Tests for a scalar."""
|
| 171 |
+
exp = _classification.softmax_cross_entropy(
|
| 172 |
+
self.ys[0], jax.nn.one_hot(self.ts[0], 3)
|
| 173 |
+
)
|
| 174 |
+
np.testing.assert_allclose(
|
| 175 |
+
self.variant(_classification.softmax_cross_entropy_with_integer_labels)(
|
| 176 |
+
self.ys[0], self.ts[0]
|
| 177 |
+
),
|
| 178 |
+
exp,
|
| 179 |
+
rtol=1e-6,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
@chex.all_variants
|
| 183 |
+
def test_consistent_with_softmax_cross_entropy_batched(self):
|
| 184 |
+
"""Tests for a full batch."""
|
| 185 |
+
exp = _classification.softmax_cross_entropy(
|
| 186 |
+
self.ys, jax.nn.one_hot(self.ts, 3)
|
| 187 |
+
)
|
| 188 |
+
np.testing.assert_allclose(
|
| 189 |
+
self.variant(_classification.softmax_cross_entropy_with_integer_labels)(
|
| 190 |
+
self.ys, self.ts
|
| 191 |
+
),
|
| 192 |
+
exp,
|
| 193 |
+
rtol=1e-6,
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
def test_gradient(self):
|
| 197 |
+
"""Tests gradient ok."""
|
| 198 |
+
jaxtest.check_grads(
|
| 199 |
+
functools.partial(
|
| 200 |
+
_classification.softmax_cross_entropy_with_integer_labels,
|
| 201 |
+
labels=self.ts,
|
| 202 |
+
),
|
| 203 |
+
(self.ys,),
|
| 204 |
+
order=1,
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class SigmoidCrossEntropyTest(parameterized.TestCase):
|
| 209 |
+
|
| 210 |
+
@parameterized.parameters(
|
| 211 |
+
dict(
|
| 212 |
+
preds=np.array([-1e09, -1e-09]),
|
| 213 |
+
labels=np.array([1.0, 0.0]),
|
| 214 |
+
expected=5e08,
|
| 215 |
+
),
|
| 216 |
+
dict(
|
| 217 |
+
preds=np.array([-1e09, -1e-09]),
|
| 218 |
+
labels=np.array([0.0, 1.0]),
|
| 219 |
+
expected=0.3465736,
|
| 220 |
+
),
|
| 221 |
+
dict(
|
| 222 |
+
preds=np.array([1e09, 1e-09]),
|
| 223 |
+
labels=np.array([1.0, 0.0]),
|
| 224 |
+
expected=0.3465736,
|
| 225 |
+
),
|
| 226 |
+
dict(
|
| 227 |
+
preds=np.array([1e09, 1e-09]),
|
| 228 |
+
labels=np.array([0.0, 1.0]),
|
| 229 |
+
expected=5e08,
|
| 230 |
+
),
|
| 231 |
+
dict(
|
| 232 |
+
preds=np.array([-1e09, 1e-09]),
|
| 233 |
+
labels=np.array([1.0, 0.0]),
|
| 234 |
+
expected=5e08,
|
| 235 |
+
),
|
| 236 |
+
dict(
|
| 237 |
+
preds=np.array([-1e09, 1e-09]),
|
| 238 |
+
labels=np.array([0.0, 1.0]),
|
| 239 |
+
expected=0.3465736,
|
| 240 |
+
),
|
| 241 |
+
dict(
|
| 242 |
+
preds=np.array([1e09, -1e-09]),
|
| 243 |
+
labels=np.array([1.0, 0.0]),
|
| 244 |
+
expected=0.3465736,
|
| 245 |
+
),
|
| 246 |
+
dict(
|
| 247 |
+
preds=np.array([1e09, -1e-09]),
|
| 248 |
+
labels=np.array([0.0, 1.0]),
|
| 249 |
+
expected=5e08,
|
| 250 |
+
),
|
| 251 |
+
dict(
|
| 252 |
+
preds=np.array([0.0, 0.0]),
|
| 253 |
+
labels=np.array([1.0, 0.0]),
|
| 254 |
+
expected=0.6931472,
|
| 255 |
+
),
|
| 256 |
+
dict(
|
| 257 |
+
preds=np.array([0.0, 0.0]),
|
| 258 |
+
labels=np.array([0.0, 1.0]),
|
| 259 |
+
expected=0.6931472,
|
| 260 |
+
),
|
| 261 |
+
)
|
| 262 |
+
def testSigmoidCrossEntropy(self, preds, labels, expected):
|
| 263 |
+
tested = jnp.mean(
|
| 264 |
+
_classification.sigmoid_binary_cross_entropy(preds, labels)
|
| 265 |
+
)
|
| 266 |
+
np.testing.assert_allclose(tested, expected, rtol=1e-6, atol=1e-6)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class PolyLossTest(parameterized.TestCase):
|
| 270 |
+
|
| 271 |
+
def setUp(self):
|
| 272 |
+
super().setUp()
|
| 273 |
+
self.logits = np.array([0.14, 1.456, 2.356, -0.124, -2.47])
|
| 274 |
+
self.labels = np.array([0.1, 0.15, 0.2, 0.25, 0.3])
|
| 275 |
+
|
| 276 |
+
self.batched_logits = np.array([[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]])
|
| 277 |
+
self.batched_labels = np.array([[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]])
|
| 278 |
+
# all expected values are computed using tf version of `poly1_cross_entropy`
|
| 279 |
+
# see page 10 here https://arxiv.org/pdf/2204.12511.pdf for more
|
| 280 |
+
|
| 281 |
+
@chex.all_variants
|
| 282 |
+
@parameterized.parameters(
|
| 283 |
+
dict(eps=2, expected=4.5317),
|
| 284 |
+
dict(eps=1, expected=3.7153),
|
| 285 |
+
dict(eps=-1, expected=2.0827),
|
| 286 |
+
dict(eps=0, expected=2.8990),
|
| 287 |
+
dict(eps=-0.5, expected=2.4908),
|
| 288 |
+
dict(eps=1.15, expected=3.8378),
|
| 289 |
+
dict(eps=1.214, expected=3.8900),
|
| 290 |
+
dict(eps=5.45, expected=7.3480),
|
| 291 |
+
)
|
| 292 |
+
def test_scalar(self, eps, expected):
|
| 293 |
+
np.testing.assert_allclose(
|
| 294 |
+
self.variant(_classification.poly_loss_cross_entropy)(
|
| 295 |
+
self.logits, self.labels, epsilon=eps
|
| 296 |
+
),
|
| 297 |
+
expected,
|
| 298 |
+
atol=1e-4,
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
@chex.all_variants
|
| 302 |
+
@parameterized.parameters(
|
| 303 |
+
dict(eps=2, expected=np.array([0.4823, 1.2567])),
|
| 304 |
+
dict(eps=1, expected=np.array([0.3261, 1.0407])),
|
| 305 |
+
dict(eps=0, expected=np.array([0.1698, 0.8247])),
|
| 306 |
+
dict(eps=-0.5, expected=np.array([0.0917, 0.7168])),
|
| 307 |
+
dict(eps=1.15, expected=np.array([0.3495, 1.0731])),
|
| 308 |
+
dict(eps=1.214, expected=np.array([0.3595, 1.0870])),
|
| 309 |
+
dict(eps=5.45, expected=np.array([1.0211, 2.0018])),
|
| 310 |
+
)
|
| 311 |
+
def test_batched(self, eps, expected):
|
| 312 |
+
np.testing.assert_allclose(
|
| 313 |
+
self.variant(_classification.poly_loss_cross_entropy)(
|
| 314 |
+
self.batched_logits, self.batched_labels, epsilon=eps
|
| 315 |
+
),
|
| 316 |
+
expected,
|
| 317 |
+
atol=1e-4,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
@chex.all_variants
|
| 321 |
+
@parameterized.parameters(
|
| 322 |
+
dict(
|
| 323 |
+
logits=np.array(
|
| 324 |
+
[[4.0, 2.0, 1.0], [0.0, 5.0, 1.0], [0.134, 1.234, 3.235]]
|
| 325 |
+
),
|
| 326 |
+
labels=np.array(
|
| 327 |
+
[[1.0, 0.0, 0.0], [0.0, 0.8, 0.2], [0.34, 0.33, 0.33]]
|
| 328 |
+
),
|
| 329 |
+
),
|
| 330 |
+
dict(
|
| 331 |
+
logits=np.array([[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]]),
|
| 332 |
+
labels=np.array([[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]]),
|
| 333 |
+
),
|
| 334 |
+
dict(
|
| 335 |
+
logits=np.array(
|
| 336 |
+
[[4.0, 2.0, 1.0, 0.134, 1.3515], [0.0, 5.0, 1.0, 0.5215, 5.616]]
|
| 337 |
+
),
|
| 338 |
+
labels=np.array(
|
| 339 |
+
[[0.5, 0.0, 0.0, 0.0, 0.5], [0.0, 0.12, 0.2, 0.56, 0.12]]
|
| 340 |
+
),
|
| 341 |
+
),
|
| 342 |
+
dict(logits=np.array([1.89, 2.39]), labels=np.array([0.34, 0.66])),
|
| 343 |
+
dict(logits=np.array([0.314]), labels=np.array([1.0])),
|
| 344 |
+
)
|
| 345 |
+
def test_equals_to_cross_entropy_when_eps0(self, logits, labels):
|
| 346 |
+
np.testing.assert_allclose(
|
| 347 |
+
self.variant(_classification.poly_loss_cross_entropy)(
|
| 348 |
+
logits, labels, epsilon=0.0
|
| 349 |
+
),
|
| 350 |
+
self.variant(_classification.softmax_cross_entropy)(logits, labels),
|
| 351 |
+
atol=1e-4,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
class HingeTest(parameterized.TestCase):
|
| 356 |
+
|
| 357 |
+
def test_binary(self):
|
| 358 |
+
label = jnp.array(1)
|
| 359 |
+
signed_label = jnp.array(2.0 * label - 1.0)
|
| 360 |
+
score = jnp.array(10.0)
|
| 361 |
+
|
| 362 |
+
def reference_impl(label, logit):
|
| 363 |
+
return jax.nn.relu(1 - logit * (2.0 * label - 1.0))
|
| 364 |
+
|
| 365 |
+
expected = reference_impl(label, score)
|
| 366 |
+
result = _classification.hinge_loss(score, signed_label)
|
| 367 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 368 |
+
|
| 369 |
+
def test_batched_binary(self):
|
| 370 |
+
labels = jnp.array([1, 0])
|
| 371 |
+
signed_labels = jnp.array(2.0 * labels - 1.0)
|
| 372 |
+
scores = jnp.array([10.0, 20.0])
|
| 373 |
+
|
| 374 |
+
def reference_impl(label, logit):
|
| 375 |
+
return jax.nn.relu(1 - logit * (2.0 * label - 1.0))
|
| 376 |
+
|
| 377 |
+
expected = jax.vmap(reference_impl)(labels, scores)
|
| 378 |
+
# no need to vmap the optax loss. leading dimensions automatically handled.
|
| 379 |
+
result = _classification.hinge_loss(scores, signed_labels)
|
| 380 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 381 |
+
|
| 382 |
+
def test_multi_class(self):
|
| 383 |
+
label = jnp.array(1)
|
| 384 |
+
scores = jnp.array([10.0, 3.0])
|
| 385 |
+
|
| 386 |
+
def reference_impl(label, scores):
|
| 387 |
+
one_hot_label = jax.nn.one_hot(label, scores.shape[-1])
|
| 388 |
+
return jnp.max(scores + 1.0 - one_hot_label) - scores[label]
|
| 389 |
+
|
| 390 |
+
expected = reference_impl(label, scores)
|
| 391 |
+
result = _classification.multiclass_hinge_loss(scores, label)
|
| 392 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 393 |
+
|
| 394 |
+
def test_batched_multi_class(self):
|
| 395 |
+
label = jnp.array([1, 0])
|
| 396 |
+
scores = jnp.array([[10.0, 3.0], [11.0, -2.0]])
|
| 397 |
+
|
| 398 |
+
def reference_impl(label, scores):
|
| 399 |
+
one_hot_label = jax.nn.one_hot(label, scores.shape[-1])
|
| 400 |
+
return jnp.max(scores + 1.0 - one_hot_label) - scores[label]
|
| 401 |
+
|
| 402 |
+
expected = jax.vmap(reference_impl)(label, scores)
|
| 403 |
+
# no need to vmap the optax loss. leading dimensions automatically handled.
|
| 404 |
+
result = _classification.multiclass_hinge_loss(scores, label)
|
| 405 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
class SparsemaxTest(parameterized.TestCase):
|
| 409 |
+
|
| 410 |
+
def test_binary(self):
|
| 411 |
+
label = 1
|
| 412 |
+
score = 10.0
|
| 413 |
+
|
| 414 |
+
def reference_impl(label, logit):
|
| 415 |
+
scores = -(2 * label - 1) * logit
|
| 416 |
+
if scores <= -1.0:
|
| 417 |
+
return 0.0
|
| 418 |
+
elif scores >= 1.0:
|
| 419 |
+
return scores
|
| 420 |
+
else:
|
| 421 |
+
return (scores + 1.0) ** 2 / 4
|
| 422 |
+
|
| 423 |
+
expected = reference_impl(label, score)
|
| 424 |
+
result = _classification.sparsemax_loss(
|
| 425 |
+
jnp.asarray(score), jnp.asarray(label)
|
| 426 |
+
)
|
| 427 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 428 |
+
|
| 429 |
+
def test_batched_binary(self):
|
| 430 |
+
labels = jnp.array([1, 0])
|
| 431 |
+
scores = jnp.array([10.0, 20.0])
|
| 432 |
+
|
| 433 |
+
def reference_impl(label, logit):
|
| 434 |
+
scores = -(2 * label - 1) * logit
|
| 435 |
+
if scores <= -1.0:
|
| 436 |
+
return 0.0
|
| 437 |
+
elif scores >= 1.0:
|
| 438 |
+
return scores
|
| 439 |
+
else:
|
| 440 |
+
return (scores + 1.0) ** 2 / 4
|
| 441 |
+
|
| 442 |
+
expected = jnp.asarray([
|
| 443 |
+
reference_impl(labels[0], scores[0]),
|
| 444 |
+
reference_impl(labels[1], scores[1]),
|
| 445 |
+
])
|
| 446 |
+
# in the optax loss the leading dimensions are automatically handled.
|
| 447 |
+
result = _classification.sparsemax_loss(scores, labels)
|
| 448 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
class ConvexKLDivergenceTest(parameterized.TestCase):
|
| 452 |
+
|
| 453 |
+
def setUp(self):
|
| 454 |
+
super().setUp()
|
| 455 |
+
self.log_ps = np.array([
|
| 456 |
+
[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026],
|
| 457 |
+
[-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971],
|
| 458 |
+
])
|
| 459 |
+
self.qs = np.array(
|
| 460 |
+
[[0.2, 0.2, 0.2, 0.1, 0.15, 0.15], [0.05, 0.03, 0.02, 0.3, 0.5, 0.0]]
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
# Computed convex kullback-leibler divergence of P from Q.
|
| 464 |
+
self.exp = np.array([0.88757247, 0.859308])
|
| 465 |
+
|
| 466 |
+
@chex.all_variants
|
| 467 |
+
def test_scalar(self):
|
| 468 |
+
np.testing.assert_allclose(
|
| 469 |
+
self.variant(_classification.convex_kl_divergence)(
|
| 470 |
+
self.log_ps[0], self.qs[0]
|
| 471 |
+
),
|
| 472 |
+
self.exp[0],
|
| 473 |
+
atol=1e-4,
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
@chex.all_variants
|
| 477 |
+
def test_batched(self):
|
| 478 |
+
np.testing.assert_allclose(
|
| 479 |
+
self.variant(_classification.convex_kl_divergence)(
|
| 480 |
+
self.log_ps, self.qs
|
| 481 |
+
),
|
| 482 |
+
self.exp,
|
| 483 |
+
atol=1e-4,
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
class PerceptronTest(parameterized.TestCase):
|
| 488 |
+
|
| 489 |
+
def test_binary(self):
|
| 490 |
+
label = jnp.array(1)
|
| 491 |
+
signed_label = jnp.array(2.0 * label - 1.0)
|
| 492 |
+
score = jnp.array(10.0)
|
| 493 |
+
|
| 494 |
+
def reference_impl(label, logit) -> float:
|
| 495 |
+
return jax.nn.relu(-logit * (2.0 * label - 1.0))
|
| 496 |
+
|
| 497 |
+
expected = reference_impl(label, score)
|
| 498 |
+
result = _classification.perceptron_loss(score, signed_label)
|
| 499 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 500 |
+
|
| 501 |
+
def test_batched_binary(self):
|
| 502 |
+
labels = jnp.array([1, 0])
|
| 503 |
+
signed_labels = jnp.array(2.0 * labels - 1.0)
|
| 504 |
+
scores = jnp.array([10.0, 20.0])
|
| 505 |
+
|
| 506 |
+
def reference_impl(label, logit) -> float:
|
| 507 |
+
return jax.nn.relu(-logit * (2.0 * label - 1.0))
|
| 508 |
+
|
| 509 |
+
expected = jax.vmap(reference_impl)(labels, scores)
|
| 510 |
+
# no need to vmap the optax loss. leading dimensions automatically handled.
|
| 511 |
+
result = _classification.perceptron_loss(scores, signed_labels)
|
| 512 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 513 |
+
|
| 514 |
+
def test_multi_class(self):
|
| 515 |
+
label = jnp.array(1)
|
| 516 |
+
scores = jnp.array([10.0, 3.0])
|
| 517 |
+
|
| 518 |
+
def reference_impl(label, scores):
|
| 519 |
+
return jnp.max(scores) - scores[label]
|
| 520 |
+
|
| 521 |
+
expected = reference_impl(label, scores)
|
| 522 |
+
result = _classification.multiclass_perceptron_loss(scores, label)
|
| 523 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 524 |
+
|
| 525 |
+
def test_batched_multi_class(self):
|
| 526 |
+
label = jnp.array([1, 0])
|
| 527 |
+
scores = jnp.array([[10.0, 3.0], [11.0, -2.0]])
|
| 528 |
+
|
| 529 |
+
def reference_impl(label, scores):
|
| 530 |
+
return jnp.max(scores) - scores[label]
|
| 531 |
+
|
| 532 |
+
expected = jax.vmap(reference_impl)(label, scores)
|
| 533 |
+
# no need to vmap the optax loss. leading dimensions automatically handled.
|
| 534 |
+
result = _classification.multiclass_perceptron_loss(scores, label)
|
| 535 |
+
np.testing.assert_allclose(result, expected, atol=1e-4)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
class KLDivergenceTest(parameterized.TestCase):
|
| 539 |
+
|
| 540 |
+
def setUp(self):
|
| 541 |
+
super().setUp()
|
| 542 |
+
self.log_ps = np.array([
|
| 543 |
+
[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026],
|
| 544 |
+
[-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971],
|
| 545 |
+
])
|
| 546 |
+
self.qs = np.array(
|
| 547 |
+
[[0.2, 0.2, 0.2, 0.1, 0.15, 0.15], [0.05, 0.03, 0.02, 0.3, 0.5, 0.0]]
|
| 548 |
+
)
|
| 549 |
+
# Computed kullback-leibler divergence of P from Q.
|
| 550 |
+
self.exp = np.array([0.8875577, 0.7592807])
|
| 551 |
+
|
| 552 |
+
@chex.all_variants
|
| 553 |
+
def test_scalar(self):
|
| 554 |
+
np.testing.assert_allclose(
|
| 555 |
+
self.variant(_classification.kl_divergence)(self.log_ps[0], self.qs[0]),
|
| 556 |
+
self.exp[0],
|
| 557 |
+
atol=1e-4,
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
@chex.all_variants
|
| 561 |
+
def test_batched(self):
|
| 562 |
+
np.testing.assert_allclose(
|
| 563 |
+
self.variant(_classification.kl_divergence)(self.log_ps, self.qs),
|
| 564 |
+
self.exp,
|
| 565 |
+
atol=1e-4,
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
class KLDivergenceWithLogTargetsTest(parameterized.TestCase):
|
| 570 |
+
|
| 571 |
+
def setUp(self):
|
| 572 |
+
super().setUp()
|
| 573 |
+
self.log_ps = np.array([
|
| 574 |
+
[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026],
|
| 575 |
+
[-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971],
|
| 576 |
+
])
|
| 577 |
+
self.qs = np.array([
|
| 578 |
+
[-1.6094, -1.6094, -1.6094, -2.3026, -1.8971, -1.8971],
|
| 579 |
+
[-2.9957, -3.5066, -3.9120, -1.2040, -0.6931, -2.3026],
|
| 580 |
+
])
|
| 581 |
+
# Computed kullback-leibler divergence of P from Q.
|
| 582 |
+
self.exp = np.array([0.8875625, 0.7187435584901326])
|
| 583 |
+
|
| 584 |
+
@chex.all_variants
|
| 585 |
+
def test_scalar(self):
|
| 586 |
+
np.testing.assert_allclose(
|
| 587 |
+
self.variant(_classification.kl_divergence_with_log_targets)(
|
| 588 |
+
self.log_ps[0], self.qs[0]
|
| 589 |
+
),
|
| 590 |
+
self.exp[0],
|
| 591 |
+
atol=1e-4,
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
@chex.all_variants
|
| 595 |
+
def test_batched(self):
|
| 596 |
+
np.testing.assert_allclose(
|
| 597 |
+
self.variant(_classification.kl_divergence_with_log_targets)(
|
| 598 |
+
self.log_ps, self.qs
|
| 599 |
+
),
|
| 600 |
+
self.exp,
|
| 601 |
+
atol=1e-4,
|
| 602 |
+
)
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
def _lengths_to_paddings(lengths: chex.Array, maxlength: int) -> chex.Array:
|
| 606 |
+
indices = jnp.arange(maxlength).reshape((1,) * lengths.ndim + (maxlength,))
|
| 607 |
+
lengths = jnp.expand_dims(lengths, axis=-1)
|
| 608 |
+
elem_valid = indices < lengths
|
| 609 |
+
return np.logical_not(elem_valid).astype(np.float32)
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
def _average_ctc_loss(
|
| 613 |
+
logprobs: chex.Array,
|
| 614 |
+
logprob_paddings: chex.Array,
|
| 615 |
+
labels: chex.Array,
|
| 616 |
+
label_paddings: chex.Array,
|
| 617 |
+
) -> chex.Array:
|
| 618 |
+
return jnp.average(
|
| 619 |
+
_classification.ctc_loss(
|
| 620 |
+
logprobs, logprob_paddings, labels, label_paddings
|
| 621 |
+
)
|
| 622 |
+
)
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
class CTCTest(parameterized.TestCase):
|
| 626 |
+
|
| 627 |
+
def setUp(self):
|
| 628 |
+
super().setUp()
|
| 629 |
+
np.random.seed(1234)
|
| 630 |
+
self._rtol = 5e-3 if jax.default_backend() != 'cpu' else 1e-6
|
| 631 |
+
|
| 632 |
+
@chex.all_variants
|
| 633 |
+
def test_with_one_to_one_alignment(self):
|
| 634 |
+
# when inputsteps and outputsteps are equal, no blank will be allowed.
|
| 635 |
+
batchsize = 8
|
| 636 |
+
steps = 50
|
| 637 |
+
nclasses = 40
|
| 638 |
+
logits = np.random.randn(batchsize, steps, nclasses)
|
| 639 |
+
labels = np.random.uniform(1, nclasses, size=(batchsize, steps)).astype(
|
| 640 |
+
np.int32
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
# This function only covers the cases without same-label repetition.
|
| 644 |
+
# `test_repeat_with_one_to_one_alignment` below complements those cases.
|
| 645 |
+
# So, redraw the samples for satisfying the non-repetition constraint.
|
| 646 |
+
for n in range(labels.shape[0]):
|
| 647 |
+
for t in range(1, labels.shape[1]):
|
| 648 |
+
while labels[n, t] == labels[n, t - 1]:
|
| 649 |
+
labels[n, t] = np.random.uniform(1, nclasses)
|
| 650 |
+
|
| 651 |
+
results = self.variant(_classification.ctc_loss_with_forward_probs)(
|
| 652 |
+
logits, np.zeros(logits.shape[:2]), labels, np.zeros(labels.shape)
|
| 653 |
+
)
|
| 654 |
+
(per_seq_loss, logalpha_blank, logalpha_emit) = results
|
| 655 |
+
|
| 656 |
+
logprobs = jax.nn.log_softmax(logits)
|
| 657 |
+
for b in range(batchsize):
|
| 658 |
+
p = 0.0
|
| 659 |
+
for t in range(steps):
|
| 660 |
+
p += logprobs[b, t, labels[b, t]]
|
| 661 |
+
np.testing.assert_allclose(np.array(-p), per_seq_loss[b], rtol=self._rtol)
|
| 662 |
+
|
| 663 |
+
# Check forward-probabilities.
|
| 664 |
+
# 1. All-phi path: logalpha_blank[-1, b, 0] must be a probability of
|
| 665 |
+
# the path that outputs blank symbols for all the frames.
|
| 666 |
+
np.testing.assert_allclose(
|
| 667 |
+
logalpha_blank[-1, b, 0], np.sum(logprobs[b, :, 0]), rtol=self._rtol
|
| 668 |
+
)
|
| 669 |
+
|
| 670 |
+
# 2. After emitting all the labels
|
| 671 |
+
# the negated loss must be identical with the forward probability of
|
| 672 |
+
# paths after consuming all the labels (because one-to-one alignment
|
| 673 |
+
# doesn't allow extra blank symbols)
|
| 674 |
+
np.testing.assert_allclose(
|
| 675 |
+
logalpha_emit[-1, b, steps - 1], -per_seq_loss[b], rtol=self._rtol
|
| 676 |
+
)
|
| 677 |
+
# and, this forward probability must be copied to the blank forward
|
| 678 |
+
# probability of the next step.
|
| 679 |
+
np.testing.assert_allclose(
|
| 680 |
+
logalpha_blank[-1, b, steps], -per_seq_loss[b], rtol=self._rtol
|
| 681 |
+
)
|
| 682 |
+
|
| 683 |
+
@chex.all_variants
|
| 684 |
+
def test_with_one_to_one_alignment_and_paddings(self):
|
| 685 |
+
batch_size = 5
|
| 686 |
+
nclasses = 13
|
| 687 |
+
steps = 7
|
| 688 |
+
logits = np.random.normal(size=[batch_size, steps, nclasses])
|
| 689 |
+
logprobs = jax.nn.log_softmax(logits)
|
| 690 |
+
|
| 691 |
+
labels = []
|
| 692 |
+
for n in range(batch_size):
|
| 693 |
+
row = list(range(1, nclasses))
|
| 694 |
+
np.random.shuffle(row)
|
| 695 |
+
labels.append(row[:steps])
|
| 696 |
+
labels = np.array(labels)
|
| 697 |
+
|
| 698 |
+
lengths = np.random.randint(3, 6, size=(batch_size,))
|
| 699 |
+
paddings = _lengths_to_paddings(lengths, steps)
|
| 700 |
+
|
| 701 |
+
actual_loss = self.variant(_classification.ctc_loss)(
|
| 702 |
+
logits, paddings, labels, paddings
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
value_and_grad = self.variant(jax.value_and_grad(_average_ctc_loss))
|
| 706 |
+
unused_avg_loss, actual_gradients = value_and_grad(
|
| 707 |
+
logits, paddings, labels, paddings
|
| 708 |
+
)
|
| 709 |
+
|
| 710 |
+
for n in range(batch_size):
|
| 711 |
+
expected_loss = -sum(
|
| 712 |
+
logprobs[n, t, k] for t, k in enumerate(labels[n, : lengths[n]])
|
| 713 |
+
)
|
| 714 |
+
np.testing.assert_allclose(expected_loss, actual_loss[n], rtol=self._rtol)
|
| 715 |
+
|
| 716 |
+
expected_gradients = np.array(jax.nn.softmax(logits[n]))
|
| 717 |
+
expected_gradients[lengths[n] :] = 0.0
|
| 718 |
+
for t, k in enumerate(labels[n, : lengths[n]]):
|
| 719 |
+
expected_gradients[t, k] -= 1.0
|
| 720 |
+
expected_gradients /= batch_size
|
| 721 |
+
np.testing.assert_allclose(
|
| 722 |
+
expected_gradients, actual_gradients[n], rtol=self._rtol
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
@chex.all_variants
|
| 726 |
+
def test_repeat_with_one_to_one_alignment(self):
|
| 727 |
+
# test if it can correctly handle the same-label repetition.
|
| 728 |
+
nclasses = 5
|
| 729 |
+
labels = np.array([
|
| 730 |
+
[1, 2, 2, 3],
|
| 731 |
+
[2, 3, 4, 4],
|
| 732 |
+
[1, 1, 1, 1],
|
| 733 |
+
[1, 1, 2, 3],
|
| 734 |
+
[1, 1, 1, 2],
|
| 735 |
+
])
|
| 736 |
+
expected_alignment = [ # expected minimal alignment
|
| 737 |
+
[1, 2, 0, 2, 3],
|
| 738 |
+
[2, 3, 4, 0, 4],
|
| 739 |
+
[1, 0, 1, 0, 1, 0, 1],
|
| 740 |
+
[1, 0, 1, 2, 3],
|
| 741 |
+
[1, 0, 1, 0, 1, 2],
|
| 742 |
+
]
|
| 743 |
+
batch_size = len(labels)
|
| 744 |
+
label_lens = np.array([4] * batch_size)
|
| 745 |
+
label_steps = 6
|
| 746 |
+
# Designed to have two padding elements on the right.
|
| 747 |
+
labels = np.pad(labels, [(0, 0), (0, label_steps - labels.shape[1])])
|
| 748 |
+
label_paddings = _lengths_to_paddings(label_lens, label_steps)
|
| 749 |
+
|
| 750 |
+
logit_lengths = np.array([len(seq) for seq in expected_alignment])
|
| 751 |
+
logit_steps = max(logit_lengths)
|
| 752 |
+
logits = np.random.randn(batch_size, logit_steps, nclasses)
|
| 753 |
+
logit_paddings = _lengths_to_paddings(logit_lengths, logit_steps)
|
| 754 |
+
|
| 755 |
+
per_seq_loss = self.variant(_classification.ctc_loss)(
|
| 756 |
+
logits, logit_paddings, labels, label_paddings
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
logprobs = jax.nn.log_softmax(logits)
|
| 760 |
+
for n in range(batch_size):
|
| 761 |
+
expected_loss = -sum(
|
| 762 |
+
logprobs[n, t, k] for t, k in enumerate(expected_alignment[n])
|
| 763 |
+
)
|
| 764 |
+
np.testing.assert_allclose(
|
| 765 |
+
jnp.array(expected_loss), per_seq_loss[n], rtol=self._rtol
|
| 766 |
+
)
|
| 767 |
+
|
| 768 |
+
|
| 769 |
+
class SigmoidFocalLossTest(parameterized.TestCase):
|
| 770 |
+
|
| 771 |
+
def setUp(self):
|
| 772 |
+
super().setUp()
|
| 773 |
+
self.ys = np.array([[2.0, 0.1, -2.0], [0.3, -0.1, 1.2]], dtype=np.float32)
|
| 774 |
+
self.ts = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
|
| 775 |
+
self._rtol = 5e-3 if jax.default_backend() != 'cpu' else 1e-6
|
| 776 |
+
|
| 777 |
+
logit = lambda x: jnp.log(x / (1.0 - x))
|
| 778 |
+
self.large_ys = logit(jnp.array([0.9, 0.98, 0.3, 0.99]))
|
| 779 |
+
self.small_ys = logit(jnp.array([0.1, 0.02, 0.09, 0.15]))
|
| 780 |
+
self.ones_ts = jnp.array([1.0, 1.0, 1.0, 1.0])
|
| 781 |
+
|
| 782 |
+
@chex.all_variants
|
| 783 |
+
def test_focal_equals_ce(self):
|
| 784 |
+
"""If gamma == 0 and alpha == 0 we expect a CE loss."""
|
| 785 |
+
np.testing.assert_allclose(
|
| 786 |
+
self.variant(_classification.sigmoid_focal_loss)(
|
| 787 |
+
self.ys, self.ts, gamma=0.0
|
| 788 |
+
),
|
| 789 |
+
_classification.sigmoid_binary_cross_entropy(self.ys, self.ts),
|
| 790 |
+
rtol=self._rtol,
|
| 791 |
+
)
|
| 792 |
+
|
| 793 |
+
@chex.all_variants
|
| 794 |
+
def test_scale(self):
|
| 795 |
+
"""This test should catch problems with p_t."""
|
| 796 |
+
gamma = 2
|
| 797 |
+
focal_loss = self.variant(_classification.sigmoid_focal_loss)(
|
| 798 |
+
self.ys, self.ts, gamma=gamma
|
| 799 |
+
)
|
| 800 |
+
p = jax.nn.sigmoid(self.ys)
|
| 801 |
+
ce_loss = _classification.sigmoid_binary_cross_entropy(self.ys, self.ts)
|
| 802 |
+
p_t = p * self.ts + (1 - p) * (1 - self.ts)
|
| 803 |
+
scale = (1 - p_t) ** gamma
|
| 804 |
+
focal_scale = focal_loss / ce_loss
|
| 805 |
+
np.testing.assert_allclose(focal_scale, scale, rtol=self._rtol)
|
| 806 |
+
|
| 807 |
+
@chex.all_variants
|
| 808 |
+
def test_large_logit_fl_less_than_ce(self):
|
| 809 |
+
"""If gamma == 2 and alpha == 0.5, the impact of large logits is reduced."""
|
| 810 |
+
focal_loss = self.variant(_classification.sigmoid_focal_loss)(
|
| 811 |
+
self.large_ys, self.ones_ts, gamma=2, alpha=0.5
|
| 812 |
+
)
|
| 813 |
+
ce_loss = _classification.sigmoid_binary_cross_entropy(
|
| 814 |
+
self.large_ys, self.ones_ts
|
| 815 |
+
)
|
| 816 |
+
loss_ratio = ce_loss / focal_loss
|
| 817 |
+
expected_ratio = 2.0 / ((1.0 - jax.nn.sigmoid(self.large_ys)) ** 2)
|
| 818 |
+
np.testing.assert_allclose(loss_ratio, expected_ratio, rtol=self._rtol)
|
| 819 |
+
|
| 820 |
+
@chex.all_variants
|
| 821 |
+
def test_small_logit_fl_less_than_ce(self):
|
| 822 |
+
"""If gamma == 2, small logits retain their weight."""
|
| 823 |
+
focal_loss = self.variant(_classification.sigmoid_focal_loss)(
|
| 824 |
+
self.small_ys, self.ones_ts, gamma=2
|
| 825 |
+
)
|
| 826 |
+
ce_loss = _classification.sigmoid_binary_cross_entropy(
|
| 827 |
+
self.small_ys, self.ones_ts
|
| 828 |
+
)
|
| 829 |
+
loss_ratio = ce_loss / focal_loss
|
| 830 |
+
expected_ratio = 1.0 / ((1.0 - jax.nn.sigmoid(self.small_ys)) ** 2)
|
| 831 |
+
np.testing.assert_allclose(loss_ratio, expected_ratio, rtol=self._rtol)
|
| 832 |
+
|
| 833 |
+
@chex.all_variants
|
| 834 |
+
def test_alpha_one(self):
|
| 835 |
+
"""Test if re-weighting with alpha=1 is ok."""
|
| 836 |
+
np.testing.assert_allclose(
|
| 837 |
+
self.variant(_classification.sigmoid_focal_loss)(
|
| 838 |
+
self.ys, self.ts, gamma=0.0, alpha=1
|
| 839 |
+
),
|
| 840 |
+
_classification.sigmoid_binary_cross_entropy(self.ys, self.ts)
|
| 841 |
+
* self.ts,
|
| 842 |
+
rtol=self._rtol,
|
| 843 |
+
)
|
| 844 |
+
|
| 845 |
+
@chex.all_variants
|
| 846 |
+
def test_ignore_positive(self):
|
| 847 |
+
"""If alpha == 0 positive examples do not matter."""
|
| 848 |
+
focal_loss = self.variant(_classification.sigmoid_focal_loss)(
|
| 849 |
+
self.ys, self.ts, alpha=0
|
| 850 |
+
)
|
| 851 |
+
ce_loss = _classification.sigmoid_binary_cross_entropy(self.ys, self.ts)
|
| 852 |
+
assert all(ce_loss[self.ts == 1] > 0)
|
| 853 |
+
assert all(focal_loss[self.ts == 1] == 0)
|
| 854 |
+
|
| 855 |
+
@chex.all_variants
|
| 856 |
+
def test_ignore_negative(self):
|
| 857 |
+
"""If alpha == 1 negative examples do not matter."""
|
| 858 |
+
focal_loss = self.variant(_classification.sigmoid_focal_loss)(
|
| 859 |
+
self.ys, self.ts, alpha=1
|
| 860 |
+
)
|
| 861 |
+
ce_loss = _classification.sigmoid_binary_cross_entropy(self.ys, self.ts)
|
| 862 |
+
assert all(ce_loss[self.ts == 0] > 0)
|
| 863 |
+
assert all(focal_loss[self.ts == 0] == 0)
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
if __name__ == '__main__':
|
| 867 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/losses/_fenchel_young_test.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.losses._fenchel_young."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
import chex
|
| 19 |
+
import jax
|
| 20 |
+
import jax.numpy as jnp
|
| 21 |
+
from jax.scipy.special import logsumexp
|
| 22 |
+
|
| 23 |
+
from optax.losses import _classification
|
| 24 |
+
from optax.losses import _fenchel_young
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def one_hot_argmax(inputs: jnp.ndarray) -> jnp.ndarray:
|
| 28 |
+
"""An argmax one-hot function for arbitrary shapes."""
|
| 29 |
+
inputs_flat = jnp.reshape(inputs, (-1))
|
| 30 |
+
flat_one_hot = jax.nn.one_hot(jnp.argmax(inputs_flat), inputs_flat.shape[0])
|
| 31 |
+
return jnp.reshape(flat_one_hot, inputs.shape)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class FenchelYoungTest(chex.TestCase):
|
| 35 |
+
|
| 36 |
+
@chex.all_variants
|
| 37 |
+
def test_fenchel_young_reg(self):
|
| 38 |
+
# Checks the behavior of the Fenchel-Young loss.
|
| 39 |
+
fy_loss = self.variant(_fenchel_young.make_fenchel_young_loss(logsumexp))
|
| 40 |
+
rng = jax.random.PRNGKey(0)
|
| 41 |
+
rngs = jax.random.split(rng, 2)
|
| 42 |
+
theta_true = jax.random.uniform(rngs[0], (8, 5))
|
| 43 |
+
y_true = jax.vmap(jax.nn.softmax)(theta_true)
|
| 44 |
+
theta_random = jax.random.uniform(rngs[1], (8, 5))
|
| 45 |
+
y_random = jax.vmap(jax.nn.softmax)(theta_random)
|
| 46 |
+
grad_random = jax.vmap(jax.grad(fy_loss))(theta_random, y_true)
|
| 47 |
+
# Checks that the gradient of the loss takes the correct form.
|
| 48 |
+
chex.assert_trees_all_close(grad_random, y_random - y_true, rtol=1e-4)
|
| 49 |
+
y_one_hot = jax.vmap(one_hot_argmax)(theta_true)
|
| 50 |
+
int_one_hot = jnp.where(y_one_hot == 1.)[1]
|
| 51 |
+
loss_one_hot = jax.vmap(fy_loss)(theta_random, y_one_hot)
|
| 52 |
+
log_loss = jax.vmap(
|
| 53 |
+
_classification.softmax_cross_entropy_with_integer_labels)(
|
| 54 |
+
theta_random, int_one_hot)
|
| 55 |
+
# Checks that the FY loss associated to logsumexp is correct.
|
| 56 |
+
chex.assert_trees_all_close(loss_one_hot, log_loss, rtol=1e-4)
|
| 57 |
+
# Checks that vmapping or not is equivalent.
|
| 58 |
+
loss_one_hot_no_vmap = fy_loss(theta_random, y_one_hot)
|
| 59 |
+
chex.assert_trees_all_close(loss_one_hot, loss_one_hot_no_vmap, rtol=1e-4)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
if __name__ == "__main__":
|
| 63 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/losses/_regression_test.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.losses._regression."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
import numpy as np
|
| 23 |
+
|
| 24 |
+
from optax.losses import _regression
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SquaredErrorTest(parameterized.TestCase):
|
| 28 |
+
|
| 29 |
+
def setUp(self):
|
| 30 |
+
super().setUp()
|
| 31 |
+
self.ys = jnp.array([-2., -1., 0.5, 1.])
|
| 32 |
+
self.ts = jnp.array([-1.5, 0., -1, 1.])
|
| 33 |
+
# compute expected outputs in numpy.
|
| 34 |
+
self.exp = (self.ts - self.ys) ** 2
|
| 35 |
+
|
| 36 |
+
@chex.all_variants
|
| 37 |
+
def test_scalar(self):
|
| 38 |
+
np.testing.assert_allclose(
|
| 39 |
+
self.variant(_regression.squared_error)(
|
| 40 |
+
self.ys[0], self.ts[0]), self.exp[0])
|
| 41 |
+
|
| 42 |
+
@chex.all_variants
|
| 43 |
+
def test_batched(self):
|
| 44 |
+
np.testing.assert_allclose(
|
| 45 |
+
self.variant(_regression.squared_error)(
|
| 46 |
+
self.ys, self.ts), self.exp)
|
| 47 |
+
|
| 48 |
+
@chex.all_variants
|
| 49 |
+
def test_shape_mismatch(self):
|
| 50 |
+
with self.assertRaises(AssertionError):
|
| 51 |
+
_ = self.variant(_regression.squared_error)(
|
| 52 |
+
self.ys, jnp.expand_dims(self.ts, axis=-1))
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class L2LossTest(parameterized.TestCase):
|
| 56 |
+
|
| 57 |
+
def setUp(self):
|
| 58 |
+
super().setUp()
|
| 59 |
+
self.ys = jnp.array([-2., -1., 0.5, 1.])
|
| 60 |
+
self.ts = jnp.array([-1.5, 0., -1, 1.])
|
| 61 |
+
# compute expected outputs in numpy.
|
| 62 |
+
self.exp = 0.5 * (self.ts - self.ys) ** 2
|
| 63 |
+
|
| 64 |
+
@chex.all_variants
|
| 65 |
+
def test_scalar(self):
|
| 66 |
+
np.testing.assert_allclose(
|
| 67 |
+
self.variant(_regression.l2_loss)(self.ys[0], self.ts[0]), self.exp[0])
|
| 68 |
+
|
| 69 |
+
@chex.all_variants
|
| 70 |
+
def test_batched(self):
|
| 71 |
+
np.testing.assert_allclose(
|
| 72 |
+
self.variant(_regression.l2_loss)(self.ys, self.ts), self.exp)
|
| 73 |
+
|
| 74 |
+
@chex.all_variants
|
| 75 |
+
def test_shape_mismatch(self):
|
| 76 |
+
with self.assertRaises(AssertionError):
|
| 77 |
+
_ = self.variant(_regression.l2_loss)(
|
| 78 |
+
self.ys, jnp.expand_dims(self.ts, axis=-1))
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class HuberLossTest(parameterized.TestCase):
|
| 82 |
+
|
| 83 |
+
def setUp(self):
|
| 84 |
+
super().setUp()
|
| 85 |
+
self.ys = np.array([-2.0, 0.5, 0., 0.5, 2.0, 4.0, 132.])
|
| 86 |
+
self.ts = np.array([0.0, -0.5, 0., 1., 1.0, 2.0, 0.3])
|
| 87 |
+
# computed expected outputs manually.
|
| 88 |
+
self.exp = np.array([1.5, 0.5, 0., 0.125, 0.5, 1.5, 131.2])
|
| 89 |
+
|
| 90 |
+
@chex.all_variants
|
| 91 |
+
def test_scalar(self):
|
| 92 |
+
np.testing.assert_allclose(
|
| 93 |
+
self.variant(_regression.huber_loss)(
|
| 94 |
+
self.ys[0], self.ts[0], delta=1.0),
|
| 95 |
+
self.exp[0])
|
| 96 |
+
|
| 97 |
+
@chex.all_variants
|
| 98 |
+
def test_batched(self):
|
| 99 |
+
np.testing.assert_allclose(
|
| 100 |
+
self.variant(_regression.huber_loss)(
|
| 101 |
+
self.ys, self.ts, delta=1.0),
|
| 102 |
+
self.exp)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# TODO(b/188419459): add test for grad and second order grad.
|
| 106 |
+
class LogCoshTest(parameterized.TestCase):
|
| 107 |
+
|
| 108 |
+
def setUp(self):
|
| 109 |
+
super().setUp()
|
| 110 |
+
# Test large values for overflow
|
| 111 |
+
self.ys = jnp.array([500, -2., -1., 0.5, 1.])
|
| 112 |
+
self.ts = jnp.array([-200, -1.5, 0., -1, 1.])
|
| 113 |
+
# computed using tensorflow.keras.losses.log_cosh v2.4.1
|
| 114 |
+
self.exp = jnp.array([699.3068, 0.12011445, 0.4337809, 0.85544014, 0.])
|
| 115 |
+
self.exp_ys_only = jnp.array(
|
| 116 |
+
[499.30685, 1.3250027, 0.4337809, 0.12011451, 0.43378082])
|
| 117 |
+
|
| 118 |
+
@chex.all_variants
|
| 119 |
+
def test_scalar(self):
|
| 120 |
+
out = self.variant(_regression.log_cosh)(self.ys[0], self.ts[0])
|
| 121 |
+
np.testing.assert_allclose(out, self.exp[0], atol=1e-5)
|
| 122 |
+
|
| 123 |
+
@chex.all_variants
|
| 124 |
+
def test_batched(self):
|
| 125 |
+
out = self.variant(_regression.log_cosh)(self.ys, self.ts)
|
| 126 |
+
np.testing.assert_allclose(out, self.exp, atol=1e-5)
|
| 127 |
+
|
| 128 |
+
@chex.all_variants
|
| 129 |
+
def test_scalar_predictions_only(self):
|
| 130 |
+
out = self.variant(_regression.log_cosh)(self.ys[0])
|
| 131 |
+
np.testing.assert_allclose(out, self.exp_ys_only[0], atol=1e-5)
|
| 132 |
+
|
| 133 |
+
@chex.all_variants
|
| 134 |
+
def test_batched_predictions_only(self):
|
| 135 |
+
out = self.variant(_regression.log_cosh)(self.ys)
|
| 136 |
+
np.testing.assert_allclose(out, self.exp_ys_only, atol=1e-5)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class CosineDistanceTest(parameterized.TestCase):
|
| 140 |
+
|
| 141 |
+
def setUp(self):
|
| 142 |
+
super().setUp()
|
| 143 |
+
self.ys = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32)
|
| 144 |
+
self.ts = np.array([[0., 1.2, 0.2], [1., -0.3, 0.]], dtype=np.float32)
|
| 145 |
+
# distance computed expected output from `scipy 1.20`.
|
| 146 |
+
self.exp = np.array([0.9358251989, 1.0464068465], dtype=np.float32)
|
| 147 |
+
|
| 148 |
+
@chex.all_variants
|
| 149 |
+
def test_scalar_distance(self):
|
| 150 |
+
"""Tests for a full batch."""
|
| 151 |
+
np.testing.assert_allclose(
|
| 152 |
+
self.variant(_regression.cosine_distance)(self.ys[0], self.ts[0]),
|
| 153 |
+
self.exp[0], atol=1e-4)
|
| 154 |
+
|
| 155 |
+
@chex.all_variants
|
| 156 |
+
def test_scalar_similarity(self):
|
| 157 |
+
"""Tests for a full batch."""
|
| 158 |
+
np.testing.assert_allclose(
|
| 159 |
+
self.variant(_regression.cosine_similarity)(self.ys[0], self.ts[0]),
|
| 160 |
+
1. - self.exp[0], atol=1e-4)
|
| 161 |
+
|
| 162 |
+
@chex.all_variants
|
| 163 |
+
def test_batched_distance(self):
|
| 164 |
+
"""Tests for a full batch."""
|
| 165 |
+
np.testing.assert_allclose(
|
| 166 |
+
self.variant(_regression.cosine_distance)(self.ys, self.ts),
|
| 167 |
+
self.exp, atol=1e-4)
|
| 168 |
+
|
| 169 |
+
@chex.all_variants
|
| 170 |
+
def test_batched_similarity(self):
|
| 171 |
+
"""Tests for a full batch."""
|
| 172 |
+
np.testing.assert_allclose(
|
| 173 |
+
self.variant(_regression.cosine_similarity)(self.ys, self.ts),
|
| 174 |
+
1. - self.exp, atol=1e-4)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
if __name__ == '__main__':
|
| 178 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/monte_carlo/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utilities for efficient monte carlo gradient estimation."""
|
| 16 |
+
|
| 17 |
+
from optax.monte_carlo.control_variates import control_delta_method
|
| 18 |
+
from optax.monte_carlo.control_variates import control_variates_jacobians
|
| 19 |
+
from optax.monte_carlo.control_variates import moving_avg_baseline
|
| 20 |
+
from optax.monte_carlo.stochastic_gradient_estimators import measure_valued_jacobians
|
| 21 |
+
from optax.monte_carlo.stochastic_gradient_estimators import pathwise_jacobians
|
| 22 |
+
from optax.monte_carlo.stochastic_gradient_estimators import score_function_jacobians
|
testbed/google-deepmind__optax/optax/monte_carlo/control_variates.py
ADDED
|
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
r"""Implementation of control variates.
|
| 16 |
+
|
| 17 |
+
We are interested in computing the gradient using control variates:
|
| 18 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 19 |
+
= \nabla_{\theta} [E_{p(x; \theta)} f(x) - h(x; \theta) + E_{p(x; \theta)}]
|
| 20 |
+
= \nabla_{\theta} [E_{p(x; \theta)} f(x) - h(x; \theta)]
|
| 21 |
+
+ \nabla_{\theta} E_{p(x; \theta)}]
|
| 22 |
+
= \nabla_{\theta} [E_{p(x; \theta)} f(x) - h(x; \theta)]
|
| 23 |
+
+ \nabla_{\theta} E_{p(x; \theta)}]
|
| 24 |
+
= \nabla_{\theta} \int {p(x; \theta)} (f(x) - h(x; \theta)) dx
|
| 25 |
+
+ \nabla_{\theta} E_{p(x; \theta)}]
|
| 26 |
+
= \int \nabla_{\theta} {p(x; \theta)} (f(x) - h(x; \theta)) dx
|
| 27 |
+
+ [E_{p(x; \theta)} \nabla_{\theta} (f(x) - h(x; \theta))
|
| 28 |
+
+ \nabla_{\theta} E_{p(x; \theta)}]
|
| 29 |
+
= \int \nabla_{\theta} {p(x; \theta)} (f(x) - h(x; \theta)) dx
|
| 30 |
+
- [E_{p(x; \theta)} \nabla_{\theta} h(x; \theta)
|
| 31 |
+
+ \nabla_{\theta} E_{p(x; \theta)}]
|
| 32 |
+
|
| 33 |
+
The above computation is performed in `control_variates_jacobians`.
|
| 34 |
+
|
| 35 |
+
When adding a new control variate, one does not need to implement the jacobian
|
| 36 |
+
computation, but instead has to implement the forward computation.
|
| 37 |
+
|
| 38 |
+
Each control variate implemented has to satisfy the following API:
|
| 39 |
+
* control_variate(function)
|
| 40 |
+
This returns a tuple of three functions:
|
| 41 |
+
* The first element of the tuple is a function which returns the
|
| 42 |
+
control variate value for a set of samples. It takes in as
|
| 43 |
+
arguments the parameters used to construct the distribution,
|
| 44 |
+
the distributional samples, and the state of the control variate
|
| 45 |
+
(if any). The return value of this function will have shape
|
| 46 |
+
`num_samples`, where `num_samples` is the number of samples
|
| 47 |
+
provided as input.
|
| 48 |
+
* The second is a function returns the expected value of the control
|
| 49 |
+
variate. The input arguments of this function are the parameters
|
| 50 |
+
of the distribution and the state of the control variate.
|
| 51 |
+
* The third is a function which updates the state of the control
|
| 52 |
+
variate, and returns the updated states.
|
| 53 |
+
|
| 54 |
+
For examples, see `control_delta_method` and `moving_avg_baseline`.
|
| 55 |
+
"""
|
| 56 |
+
from typing import Any, Callable, Sequence
|
| 57 |
+
|
| 58 |
+
import chex
|
| 59 |
+
import jax
|
| 60 |
+
import jax.numpy as jnp
|
| 61 |
+
|
| 62 |
+
from optax._src import base
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
CvState = Any
|
| 66 |
+
ComputeCv = Callable[[base.Params, chex.Array, CvState], chex.Array]
|
| 67 |
+
CvExpectedValue = Callable[[base.Params, CvState], CvState]
|
| 68 |
+
UpdateCvState = Callable[[base.Params, chex.Array, CvState], CvState]
|
| 69 |
+
ControlVariate = tuple[ComputeCv, CvExpectedValue, UpdateCvState]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def control_delta_method(
|
| 73 |
+
function: Callable[[chex.Array], float]) -> ControlVariate:
|
| 74 |
+
"""The control delta covariate method.
|
| 75 |
+
|
| 76 |
+
Control variate obtained by performing a second order Taylor expansion
|
| 77 |
+
on the cost function f at the mean of the input distribution.
|
| 78 |
+
|
| 79 |
+
Only implemented for Gaussian random variables.
|
| 80 |
+
|
| 81 |
+
For details, see: https://icml.cc/2012/papers/687.pdf
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
function: The function for which to compute the control variate.
|
| 85 |
+
The function takes in one argument (a sample from the distribution) and
|
| 86 |
+
returns a floating point value.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
A tuple of three functions, to compute the control variate, the
|
| 90 |
+
expected value of the control variate, and to update the control variate
|
| 91 |
+
state.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def delta(
|
| 95 |
+
params: base.Params,
|
| 96 |
+
sample: chex.Array,
|
| 97 |
+
state: CvState = None) -> chex.Array:
|
| 98 |
+
""""Second order expansion of `function` at the mean of the input dist."""
|
| 99 |
+
del state
|
| 100 |
+
mean_dist = params[0]
|
| 101 |
+
centered_sample = sample - mean_dist
|
| 102 |
+
# Function is a function of samples. Here, we use the mean as the input
|
| 103 |
+
# since we do a Taylor expansion of function around the mean.
|
| 104 |
+
grads = jax.grad(function)(mean_dist)
|
| 105 |
+
hessians = jax.hessian(function)(mean_dist)
|
| 106 |
+
assert hessians.ndim == 2
|
| 107 |
+
control_variate = function(mean_dist)
|
| 108 |
+
control_variate += jnp.dot(centered_sample, grads)
|
| 109 |
+
control_variate += jnp.dot(
|
| 110 |
+
jnp.dot(centered_sample, hessians), centered_sample) / 2.
|
| 111 |
+
return control_variate
|
| 112 |
+
|
| 113 |
+
def expected_value_delta(
|
| 114 |
+
params: base.Params, state: CvState) -> jax.Array:
|
| 115 |
+
""""Expected value of second order expansion of `function` at dist mean."""
|
| 116 |
+
del state
|
| 117 |
+
mean_dist = params[0]
|
| 118 |
+
var_dist = jnp.square(jnp.exp(params[1]))
|
| 119 |
+
hessians = jax.hessian(function)(mean_dist)
|
| 120 |
+
|
| 121 |
+
assert hessians.ndim == 2
|
| 122 |
+
hess_diags = jnp.diag(hessians)
|
| 123 |
+
assert hess_diags.ndim == 1
|
| 124 |
+
|
| 125 |
+
# Trace (Hessian * Sigma) and we use that Sigma is diagonal.
|
| 126 |
+
expected_second_order_term = jnp.sum(var_dist * hess_diags) / 2.
|
| 127 |
+
|
| 128 |
+
expected_control_variate = function(mean_dist)
|
| 129 |
+
expected_control_variate += expected_second_order_term
|
| 130 |
+
return expected_control_variate
|
| 131 |
+
|
| 132 |
+
def update_state(
|
| 133 |
+
params: base.Params,
|
| 134 |
+
samples: chex.Array,
|
| 135 |
+
state: CvState = None) -> CvState:
|
| 136 |
+
""""No state kept, so no operation is done."""
|
| 137 |
+
del params, samples
|
| 138 |
+
return state
|
| 139 |
+
|
| 140 |
+
return delta, expected_value_delta, update_state
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def moving_avg_baseline(
|
| 144 |
+
function: Callable[[chex.Array], float],
|
| 145 |
+
decay: float = 0.99,
|
| 146 |
+
zero_debias: bool = True,
|
| 147 |
+
use_decay_early_training_heuristic=True) -> ControlVariate:
|
| 148 |
+
"""A moving average baseline.
|
| 149 |
+
|
| 150 |
+
It has no effect on the pathwise or measure valued estimator.
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
function: The function for which to compute the control variate.
|
| 154 |
+
The function takes in one argument (a sample from the distribution) and
|
| 155 |
+
returns a floating point value.
|
| 156 |
+
decay: The decay rate for the moving average.
|
| 157 |
+
zero_debias: Whether or not to use zero debiasing for the moving average.
|
| 158 |
+
use_decay_early_training_heuristic: Whether or not to use a heuristic which
|
| 159 |
+
overrides the decay value early in training based on
|
| 160 |
+
min(decay, (1.0 + i) / (10.0 + i)). This stabilises training and was
|
| 161 |
+
adapted from the Tensorflow codebase.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
A tuple of three functions, to compute the control variate, the
|
| 165 |
+
expected value of the control variate, and to update the control variate
|
| 166 |
+
state.
|
| 167 |
+
"""
|
| 168 |
+
def moving_avg(
|
| 169 |
+
params: base.Params,
|
| 170 |
+
samples: chex.Array,
|
| 171 |
+
state: CvState = None) -> CvState:
|
| 172 |
+
""""Return the moving average."""
|
| 173 |
+
del params, samples
|
| 174 |
+
return state[0]
|
| 175 |
+
|
| 176 |
+
def expected_value_moving_avg(
|
| 177 |
+
params: base.Params, state: CvState) -> chex.Array:
|
| 178 |
+
""""Return the moving average."""
|
| 179 |
+
del params
|
| 180 |
+
return state[0]
|
| 181 |
+
|
| 182 |
+
def update_state(
|
| 183 |
+
params: base.Params,
|
| 184 |
+
samples: chex.Array,
|
| 185 |
+
state: CvState = None) -> CvState:
|
| 186 |
+
""""Update the moving average."""
|
| 187 |
+
del params
|
| 188 |
+
value, i = state
|
| 189 |
+
|
| 190 |
+
if use_decay_early_training_heuristic:
|
| 191 |
+
iteration_decay = jnp.minimum(decay, (1.0 + i) / (10.0 + i))
|
| 192 |
+
else:
|
| 193 |
+
iteration_decay = decay
|
| 194 |
+
|
| 195 |
+
updated_value = iteration_decay * value
|
| 196 |
+
updated_value += (1 - iteration_decay) * jnp.mean(
|
| 197 |
+
jax.vmap(function)(samples))
|
| 198 |
+
|
| 199 |
+
if zero_debias:
|
| 200 |
+
updated_value /= (jnp.ones([]) - jnp.power(iteration_decay, i + 1))
|
| 201 |
+
|
| 202 |
+
return (jax.lax.stop_gradient(updated_value), i + 1)
|
| 203 |
+
|
| 204 |
+
return moving_avg, expected_value_moving_avg, update_state
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _map(cv, params, samples, state):
|
| 208 |
+
return jax.vmap(lambda x: cv(params, x, state))(samples)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def control_variates_jacobians(
|
| 212 |
+
function: Callable[[chex.Array], float],
|
| 213 |
+
control_variate_from_function: Callable[
|
| 214 |
+
[Callable[[chex.Array], float]], ControlVariate
|
| 215 |
+
],
|
| 216 |
+
grad_estimator: Callable[..., jnp.ndarray],
|
| 217 |
+
params: base.Params,
|
| 218 |
+
dist_builder: Callable[..., Any],
|
| 219 |
+
rng: chex.PRNGKey,
|
| 220 |
+
num_samples: int,
|
| 221 |
+
control_variate_state: CvState = None,
|
| 222 |
+
estimate_cv_coeffs: bool = False,
|
| 223 |
+
estimate_cv_coeffs_num_samples: int = 20,
|
| 224 |
+
) -> tuple[Sequence[chex.Array], CvState]:
|
| 225 |
+
r"""Obtain jacobians using control variates.
|
| 226 |
+
|
| 227 |
+
We will compute each term individually. The first term will use stochastic
|
| 228 |
+
gradient estimation. The second term will be computes using Monte
|
| 229 |
+
Carlo estimation and automatic differentiation to compute
|
| 230 |
+
\nabla_{\theta} h(x; \theta). The the third term will be computed using
|
| 231 |
+
automatic differentiation, as we restrict ourselves to control variates
|
| 232 |
+
which compute this expectation in closed form.
|
| 233 |
+
|
| 234 |
+
This function updates the state of the control variate (once), before
|
| 235 |
+
computing the control variate coefficients.
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
function: Function f(x) for which to estimate grads_{params} E_dist f(x).
|
| 239 |
+
The function takes in one argument (a sample from the distribution) and
|
| 240 |
+
returns a floating point value.
|
| 241 |
+
control_variate_from_function: The control variate to use to reduce
|
| 242 |
+
variance. See `control_delta_method` and `moving_avg_baseline` examples.
|
| 243 |
+
grad_estimator: The gradient estimator to be used to compute the gradients.
|
| 244 |
+
Note that not all control variates will reduce variance for all
|
| 245 |
+
estimators. For example, the `moving_avg_baseline` will make no difference
|
| 246 |
+
to the measure valued or pathwise estimators.
|
| 247 |
+
params: A tuple of jnp arrays.
|
| 248 |
+
The parameters for which to construct the distribution and for which we
|
| 249 |
+
want to compute the jacobians.
|
| 250 |
+
dist_builder: a constructor which builds a distribution given the input
|
| 251 |
+
parameters specified by params. `dist_builder(params)` should return a
|
| 252 |
+
valid distribution.
|
| 253 |
+
rng: a PRNGKey key.
|
| 254 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 255 |
+
control_variate_state: The control variate state. This is used for control
|
| 256 |
+
variates which keep states (such as the moving average baselines).
|
| 257 |
+
estimate_cv_coeffs: Boolean. Whether or not to estimate the optimal control
|
| 258 |
+
variate coefficient via `estimate_control_variate_coefficients`.
|
| 259 |
+
estimate_cv_coeffs_num_samples: The number of samples to use to estimate
|
| 260 |
+
the optimal coefficient. These need to be new samples to ensure that the
|
| 261 |
+
objective is unbiased.
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
A tuple of size two:
|
| 265 |
+
|
| 266 |
+
* A tuple of size `params`, each element is `num_samples x param.shape`
|
| 267 |
+
jacobian vector containing the estimates of the gradients obtained
|
| 268 |
+
for each sample.
|
| 269 |
+
The mean of this vector is the gradient wrt to parameters that can be
|
| 270 |
+
used for learning. The entire jacobian vector can be used to assess
|
| 271 |
+
estimator variance.
|
| 272 |
+
* The updated CV state.
|
| 273 |
+
"""
|
| 274 |
+
control_variate = control_variate_from_function(function)
|
| 275 |
+
stochastic_cv, expected_value_cv, update_state_cv = control_variate
|
| 276 |
+
data_dim = jax.tree_util.tree_leaves(params)[0].shape[0]
|
| 277 |
+
if estimate_cv_coeffs:
|
| 278 |
+
cv_coeffs = estimate_control_variate_coefficients(
|
| 279 |
+
function, control_variate_from_function, grad_estimator, params,
|
| 280 |
+
dist_builder, rng, estimate_cv_coeffs_num_samples,
|
| 281 |
+
control_variate_state)
|
| 282 |
+
else:
|
| 283 |
+
cv_coeffs = [1.0] * len(params)
|
| 284 |
+
|
| 285 |
+
# \int \nabla_{\theta} {p(x; \theta)} (f(x) - h(x; \theta)) dx
|
| 286 |
+
function_jacobians = grad_estimator(
|
| 287 |
+
function, params, dist_builder, rng, num_samples)
|
| 288 |
+
|
| 289 |
+
# Chain rule since CVs can also depend on parameters - for example, for the
|
| 290 |
+
# pathwise gradient estimator they have in order to have an effect on
|
| 291 |
+
# gradient.
|
| 292 |
+
# The rng has to be the same as passed to the grad_estimator above so that we
|
| 293 |
+
# obtain the same samples.
|
| 294 |
+
samples = dist_builder(*params).sample((num_samples,), seed=rng)
|
| 295 |
+
# If the CV has state, update it.
|
| 296 |
+
control_variate_state = update_state_cv(
|
| 297 |
+
params, samples, control_variate_state)
|
| 298 |
+
|
| 299 |
+
def samples_fn(x):
|
| 300 |
+
return stochastic_cv(
|
| 301 |
+
jax.lax.stop_gradient(params), x, control_variate_state)
|
| 302 |
+
|
| 303 |
+
cv_jacobians = grad_estimator(
|
| 304 |
+
samples_fn, params, dist_builder, rng, num_samples)
|
| 305 |
+
|
| 306 |
+
# The gradients of the stochastic covariate with respect to the parameters.
|
| 307 |
+
def param_fn(x):
|
| 308 |
+
return jnp.mean(_map(
|
| 309 |
+
stochastic_cv, x,
|
| 310 |
+
jax.lax.stop_gradient(samples), control_variate_state))
|
| 311 |
+
|
| 312 |
+
# [E_{p(x; \theta)} \nabla_{\theta} h(x; \theta)
|
| 313 |
+
cv_param_grads = jax.grad(param_fn)(params)
|
| 314 |
+
# The gradients of the closed form expectation of the control variate
|
| 315 |
+
# with respect to the parameters: # \nabla_{\theta} E_{p(x; \theta)}].
|
| 316 |
+
expected_value_grads = jax.grad(
|
| 317 |
+
lambda x: expected_value_cv(x, control_variate_state))(params)
|
| 318 |
+
|
| 319 |
+
jacobians = []
|
| 320 |
+
for param_index, param in enumerate(jax.tree_util.tree_leaves(params)):
|
| 321 |
+
chex.assert_shape(function_jacobians[param_index], (num_samples, data_dim))
|
| 322 |
+
chex.assert_shape(cv_jacobians[param_index], (num_samples, data_dim))
|
| 323 |
+
chex.assert_shape(cv_param_grads[param_index], (data_dim,))
|
| 324 |
+
chex.assert_shape(expected_value_grads[param_index], (data_dim,))
|
| 325 |
+
|
| 326 |
+
cv_coeff = cv_coeffs[param_index]
|
| 327 |
+
# \int \nabla_{\theta} {p(x; \theta)} (f(x) - h(x; \theta)) dx
|
| 328 |
+
param_jacobians = function_jacobians[param_index]
|
| 329 |
+
param_jacobians -= cv_coeff * cv_jacobians[param_index]
|
| 330 |
+
# - [E_{p(x; \theta)} \nabla_{\theta} h(x; \theta)
|
| 331 |
+
param_jacobians -= cv_coeff * cv_param_grads[param_index]
|
| 332 |
+
# \nabla_{\theta} E_{p(x; \theta)}]
|
| 333 |
+
param_jacobians += cv_coeff * expected_value_grads[param_index]
|
| 334 |
+
|
| 335 |
+
chex.assert_shape(param_jacobians, (num_samples,) + param.shape)
|
| 336 |
+
jacobians.append(param_jacobians)
|
| 337 |
+
|
| 338 |
+
return jacobians, control_variate_state
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def estimate_control_variate_coefficients(
|
| 342 |
+
function: Callable[[chex.Array], float],
|
| 343 |
+
control_variate_from_function: Callable[
|
| 344 |
+
[Callable[[chex.Array], float]], ControlVariate
|
| 345 |
+
],
|
| 346 |
+
grad_estimator: Callable[..., jnp.ndarray],
|
| 347 |
+
params: base.Params,
|
| 348 |
+
dist_builder: Callable[..., Any],
|
| 349 |
+
rng: chex.PRNGKey,
|
| 350 |
+
num_samples: int,
|
| 351 |
+
control_variate_state: CvState = None,
|
| 352 |
+
eps: float = 1e-3,
|
| 353 |
+
) -> Sequence[float]:
|
| 354 |
+
r"""Estimates the control variate coefficients for the given parameters.
|
| 355 |
+
|
| 356 |
+
For each variable `var_k`, the coefficient is given by:
|
| 357 |
+
\sum_k cov(df/d var_k, d cv/d var_k) / (\sum var(d cv/d var_k) + eps)
|
| 358 |
+
|
| 359 |
+
Where var_k is the k'th element of the parameters in `params`.
|
| 360 |
+
The covariance and variance calculations are done from samples obtained
|
| 361 |
+
from the distribution obtained by calling `dist_builder` on the input
|
| 362 |
+
`params`.
|
| 363 |
+
|
| 364 |
+
This function does not update the state of the control variate.
|
| 365 |
+
|
| 366 |
+
Args:
|
| 367 |
+
function: Function f(x) for which to estimate grads_{params} E_dist f(x).
|
| 368 |
+
The function takes in one argument (a sample from the distribution) and
|
| 369 |
+
returns a floating point value.
|
| 370 |
+
control_variate_from_function: The control variate to use to reduce
|
| 371 |
+
variance. See `control_delta_method` and `moving_avg_baseline` examples.
|
| 372 |
+
grad_estimator: The gradient estimator to be used to compute the gradients.
|
| 373 |
+
Note that not all control variates will reduce variance for all
|
| 374 |
+
estimators. For example, the `moving_avg_baseline` will make no difference
|
| 375 |
+
to the measure valued or pathwise estimators.
|
| 376 |
+
params: A tuple of jnp arrays.
|
| 377 |
+
The parameters for which to construct the distribution and for which we
|
| 378 |
+
want to compute the jacobians.
|
| 379 |
+
dist_builder: a constructor which builds a distribution given the input
|
| 380 |
+
parameters specified by params. `dist_builder(params)` should return a
|
| 381 |
+
valid distribution.
|
| 382 |
+
rng: a PRNGKey key.
|
| 383 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 384 |
+
control_variate_state: The control variate state. This is used for control
|
| 385 |
+
variates which keep states (such as the moving average baselines).
|
| 386 |
+
eps: A small constant used to avoid numerical issues. Float.
|
| 387 |
+
|
| 388 |
+
Returns:
|
| 389 |
+
A list of control variate coefficients (each a scalar), for each parameter
|
| 390 |
+
in `params`.
|
| 391 |
+
"""
|
| 392 |
+
# Resample to avoid biased gradients.
|
| 393 |
+
cv_rng, _ = jax.random.split(rng)
|
| 394 |
+
del rng # Avoid using rng in this function.
|
| 395 |
+
stochastic_cv, _, _ = control_variate_from_function(function)
|
| 396 |
+
|
| 397 |
+
# Samples have to be the same so we use the same rng.
|
| 398 |
+
cv_jacobians = grad_estimator(
|
| 399 |
+
lambda x: stochastic_cv(params, x, control_variate_state),
|
| 400 |
+
params, dist_builder, cv_rng, num_samples)
|
| 401 |
+
function_jacobians = grad_estimator(
|
| 402 |
+
function, params, dist_builder, cv_rng, num_samples)
|
| 403 |
+
|
| 404 |
+
def compute_coeff(param_cv_jacs, param_f_jacs):
|
| 405 |
+
assert param_f_jacs.ndim == 2
|
| 406 |
+
assert param_cv_jacs.ndim == 2
|
| 407 |
+
|
| 408 |
+
mean_f = jnp.mean(param_f_jacs, axis=0)
|
| 409 |
+
mean_cv = jnp.mean(param_cv_jacs, axis=0)
|
| 410 |
+
|
| 411 |
+
cov = jnp.mean((param_f_jacs - mean_f) * (param_cv_jacs - mean_cv), axis=0)
|
| 412 |
+
|
| 413 |
+
assert cov.ndim == 1
|
| 414 |
+
|
| 415 |
+
# Compute the coefficients which minimize variance.
|
| 416 |
+
# Since we want to minimize the variances across parameter dimensions,
|
| 417 |
+
# the optimal coefficients are given by the sum of covariances per
|
| 418 |
+
# dimensions over the sum of variances per dimension.
|
| 419 |
+
cv_coeff = jnp.sum(cov) / (jnp.sum(jnp.var(param_cv_jacs, axis=0)) + eps)
|
| 420 |
+
return jax.lax.stop_gradient(cv_coeff)
|
| 421 |
+
|
| 422 |
+
return [compute_coeff(cv_jacobians[i], function_jacobians[i])
|
| 423 |
+
for i in range(len(params))]
|
testbed/google-deepmind__optax/optax/monte_carlo/control_variates_test.py
ADDED
|
@@ -0,0 +1,597 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `control_variates.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
from optax._src import utils
|
| 26 |
+
from optax.monte_carlo import control_variates
|
| 27 |
+
from optax.monte_carlo import stochastic_gradient_estimators as sge
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# Set seed for deterministic sampling.
|
| 31 |
+
np.random.seed(42)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _assert_equal(actual, expected, rtol=1e-2, atol=1e-2):
|
| 35 |
+
"""Asserts that arrays are equal."""
|
| 36 |
+
# Note: assert_allclose does not check shapes
|
| 37 |
+
chex.assert_equal_shape((actual, expected))
|
| 38 |
+
|
| 39 |
+
# Scalar.
|
| 40 |
+
if not actual.shape:
|
| 41 |
+
np.testing.assert_allclose(
|
| 42 |
+
np.asarray(actual), np.asarray(expected), rtol, atol)
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
# We get around the bug https://github.com/numpy/numpy/issues/13801
|
| 46 |
+
zero_indices = np.argwhere(expected == 0)
|
| 47 |
+
if not np.all(np.abs(actual[zero_indices]) <= atol):
|
| 48 |
+
raise AssertionError(f'Larger than {atol} diff in {actual[zero_indices]}')
|
| 49 |
+
|
| 50 |
+
non_zero_indices = np.argwhere(expected != 0)
|
| 51 |
+
np.testing.assert_allclose(
|
| 52 |
+
np.asarray(actual)[non_zero_indices],
|
| 53 |
+
expected[non_zero_indices], rtol, atol)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _map(cv, params, samples, state=None):
|
| 57 |
+
return jax.vmap(lambda x: cv(params, x, state))(samples)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _map_variant(variant):
|
| 61 |
+
return variant(_map, static_argnums=0)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _cv_jac_variant(variant):
|
| 65 |
+
return variant(
|
| 66 |
+
control_variates.control_variates_jacobians,
|
| 67 |
+
static_argnums=(0, 1, 2, 4, 6, 7, 8))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class DeltaControlVariateTest(chex.TestCase):
|
| 71 |
+
|
| 72 |
+
@chex.all_variants
|
| 73 |
+
@parameterized.parameters([(1.0, 0.5)])
|
| 74 |
+
def testQuadraticFunction(self, effective_mean, effective_log_scale):
|
| 75 |
+
data_dims = 20
|
| 76 |
+
num_samples = 10**6
|
| 77 |
+
rng = jax.random.PRNGKey(1)
|
| 78 |
+
|
| 79 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 80 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 81 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 82 |
+
params = [mean, log_scale]
|
| 83 |
+
|
| 84 |
+
dist = utils.multi_normal(*params)
|
| 85 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 86 |
+
function = lambda x: jnp.sum(x**2)
|
| 87 |
+
|
| 88 |
+
cv, expected_cv, _ = control_variates.control_delta_method(function)
|
| 89 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
|
| 90 |
+
expected_cv_value = jnp.sum(dist_samples**2) / num_samples
|
| 91 |
+
|
| 92 |
+
# This should be an analytical computation, the result needs to be
|
| 93 |
+
# accurate.
|
| 94 |
+
_assert_equal(avg_cv, expected_cv_value, rtol=1e-1, atol=1e-3)
|
| 95 |
+
_assert_equal(expected_cv(params, None), expected_cv_value, rtol=0.02)
|
| 96 |
+
|
| 97 |
+
@chex.all_variants
|
| 98 |
+
@parameterized.parameters([(1.0, 1.0)])
|
| 99 |
+
def testPolinomialFunction(self, effective_mean, effective_log_scale):
|
| 100 |
+
data_dims = 10
|
| 101 |
+
num_samples = 10**3
|
| 102 |
+
|
| 103 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 104 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 105 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 106 |
+
params = [mean, log_scale]
|
| 107 |
+
|
| 108 |
+
dist = utils.multi_normal(*params)
|
| 109 |
+
rng = jax.random.PRNGKey(1)
|
| 110 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 111 |
+
function = lambda x: jnp.sum(x**5)
|
| 112 |
+
|
| 113 |
+
cv, expected_cv, _ = control_variates.control_delta_method(function)
|
| 114 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
|
| 115 |
+
|
| 116 |
+
# Check that the average value of the control variate is close to the
|
| 117 |
+
# expected value.
|
| 118 |
+
_assert_equal(avg_cv, expected_cv(params, None), rtol=1e-1, atol=1e-3)
|
| 119 |
+
|
| 120 |
+
@chex.all_variants
|
| 121 |
+
def testNonPolynomialFunction(self):
|
| 122 |
+
data_dims = 10
|
| 123 |
+
num_samples = 10**3
|
| 124 |
+
|
| 125 |
+
mean = jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 126 |
+
log_scale = jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 127 |
+
params = [mean, log_scale]
|
| 128 |
+
|
| 129 |
+
rng = jax.random.PRNGKey(1)
|
| 130 |
+
dist = utils.multi_normal(*params)
|
| 131 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 132 |
+
function = lambda x: jnp.sum(jnp.log(x**2))
|
| 133 |
+
|
| 134 |
+
cv, expected_cv, _ = control_variates.control_delta_method(function)
|
| 135 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
|
| 136 |
+
|
| 137 |
+
# Check that the average value of the control variate is close to the
|
| 138 |
+
# expected value.
|
| 139 |
+
_assert_equal(avg_cv, expected_cv(params, None), rtol=1e-1, atol=1e-3)
|
| 140 |
+
|
| 141 |
+
# Second order expansion is log(\mu**2) + 1/2 * \sigma**2 (-2 / \mu**2)
|
| 142 |
+
expected_cv_val = - np.exp(1.) ** 2 * data_dims
|
| 143 |
+
_assert_equal(
|
| 144 |
+
expected_cv(params, None), expected_cv_val, rtol=1e-1, atol=1e-3)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class MovingAverageBaselineTest(chex.TestCase):
|
| 148 |
+
|
| 149 |
+
@chex.all_variants
|
| 150 |
+
@parameterized.parameters(
|
| 151 |
+
[(1.0, 0.5, 0.9),
|
| 152 |
+
(1.0, 0.5, 0.99)])
|
| 153 |
+
def testLinearFunction(
|
| 154 |
+
self, effective_mean, effective_log_scale, decay):
|
| 155 |
+
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
|
| 156 |
+
num_samples = 10**4
|
| 157 |
+
data_dims = len(weights)
|
| 158 |
+
|
| 159 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 160 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 161 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 162 |
+
|
| 163 |
+
params = [mean, log_scale]
|
| 164 |
+
function = lambda x: jnp.sum(weights * x)
|
| 165 |
+
|
| 166 |
+
rng = jax.random.PRNGKey(1)
|
| 167 |
+
dist = utils.multi_normal(*params)
|
| 168 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 169 |
+
|
| 170 |
+
cv, expected_cv, update_state = control_variates.moving_avg_baseline(
|
| 171 |
+
function, decay=decay, zero_debias=False,
|
| 172 |
+
use_decay_early_training_heuristic=False)
|
| 173 |
+
|
| 174 |
+
state_1 = jnp.array(1.)
|
| 175 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(
|
| 176 |
+
cv, params, dist_samples, (state_1, 0)))
|
| 177 |
+
_assert_equal(avg_cv, state_1)
|
| 178 |
+
_assert_equal(expected_cv(params, (state_1, 0)), state_1)
|
| 179 |
+
|
| 180 |
+
state_2 = jnp.array(2.)
|
| 181 |
+
avg_cv = jnp.mean(
|
| 182 |
+
_map_variant(self.variant)(cv, params, dist_samples, (state_2, 0)))
|
| 183 |
+
_assert_equal(avg_cv, state_2)
|
| 184 |
+
_assert_equal(expected_cv(params, (state_2, 0)), state_2)
|
| 185 |
+
|
| 186 |
+
update_state_1 = update_state(params, dist_samples, (state_1, 0))[0]
|
| 187 |
+
_assert_equal(
|
| 188 |
+
update_state_1,
|
| 189 |
+
decay * state_1 + (1 - decay) * function(mean))
|
| 190 |
+
|
| 191 |
+
update_state_2 = update_state(params, dist_samples, (state_2, 0))[0]
|
| 192 |
+
_assert_equal(
|
| 193 |
+
update_state_2,
|
| 194 |
+
decay * state_2 + (1 - decay) * function(mean))
|
| 195 |
+
|
| 196 |
+
@chex.all_variants
|
| 197 |
+
@parameterized.parameters(
|
| 198 |
+
[(1.0, 0.5, 0.9),
|
| 199 |
+
(1.0, 0.5, 0.99)])
|
| 200 |
+
def testLinearFunctionWithHeuristic(
|
| 201 |
+
self, effective_mean, effective_log_scale, decay):
|
| 202 |
+
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
|
| 203 |
+
num_samples = 10**5
|
| 204 |
+
data_dims = len(weights)
|
| 205 |
+
|
| 206 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 207 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 208 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 209 |
+
|
| 210 |
+
params = [mean, log_scale]
|
| 211 |
+
function = lambda x: jnp.sum(weights * x)
|
| 212 |
+
|
| 213 |
+
rng = jax.random.PRNGKey(1)
|
| 214 |
+
dist = utils.multi_normal(*params)
|
| 215 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 216 |
+
|
| 217 |
+
cv, expected_cv, update_state = control_variates.moving_avg_baseline(
|
| 218 |
+
function, decay=decay, zero_debias=False,
|
| 219 |
+
use_decay_early_training_heuristic=True)
|
| 220 |
+
|
| 221 |
+
state_1 = jnp.array(1.)
|
| 222 |
+
avg_cv = jnp.mean(_map_variant(self.variant)(
|
| 223 |
+
cv, params, dist_samples, (state_1, 0)))
|
| 224 |
+
_assert_equal(avg_cv, state_1)
|
| 225 |
+
_assert_equal(expected_cv(params, (state_1, 0)), state_1)
|
| 226 |
+
|
| 227 |
+
state_2 = jnp.array(2.)
|
| 228 |
+
avg_cv = jnp.mean(
|
| 229 |
+
_map_variant(self.variant)(cv, params, dist_samples, (state_2, 0)))
|
| 230 |
+
_assert_equal(avg_cv, state_2)
|
| 231 |
+
_assert_equal(expected_cv(params, (state_2, 0)), state_2)
|
| 232 |
+
|
| 233 |
+
first_step_decay = 0.1
|
| 234 |
+
update_state_1 = update_state(params, dist_samples, (state_1, 0))[0]
|
| 235 |
+
_assert_equal(
|
| 236 |
+
update_state_1,
|
| 237 |
+
first_step_decay * state_1 + (1 - first_step_decay) * function(mean))
|
| 238 |
+
|
| 239 |
+
second_step_decay = 2. / 11
|
| 240 |
+
update_state_2 = update_state(params, dist_samples, (state_2, 1))[0]
|
| 241 |
+
_assert_equal(
|
| 242 |
+
update_state_2,
|
| 243 |
+
second_step_decay * state_2 + (1 - second_step_decay) * function(mean))
|
| 244 |
+
|
| 245 |
+
@parameterized.parameters(
|
| 246 |
+
[(1.0, 0.5, 0.9),
|
| 247 |
+
(1.0, 0.5, 0.99)])
|
| 248 |
+
def testLinearFunctionZeroDebias(
|
| 249 |
+
self, effective_mean, effective_log_scale, decay):
|
| 250 |
+
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
|
| 251 |
+
num_samples = 10**5
|
| 252 |
+
data_dims = len(weights)
|
| 253 |
+
|
| 254 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 255 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 256 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 257 |
+
|
| 258 |
+
params = [mean, log_scale]
|
| 259 |
+
function = lambda x: jnp.sum(weights * x)
|
| 260 |
+
|
| 261 |
+
rng = jax.random.PRNGKey(1)
|
| 262 |
+
dist = utils.multi_normal(*params)
|
| 263 |
+
dist_samples = dist.sample((num_samples,), rng)
|
| 264 |
+
|
| 265 |
+
update_state = control_variates.moving_avg_baseline(
|
| 266 |
+
function, decay=decay, zero_debias=False,
|
| 267 |
+
use_decay_early_training_heuristic=False)[-1]
|
| 268 |
+
|
| 269 |
+
update_state_zero_debias = control_variates.moving_avg_baseline(
|
| 270 |
+
function, decay=decay, zero_debias=True,
|
| 271 |
+
use_decay_early_training_heuristic=False)[-1]
|
| 272 |
+
|
| 273 |
+
updated_state = update_state(params, dist_samples, (jnp.array(0.), 0))[0]
|
| 274 |
+
_assert_equal(updated_state, (1 - decay) * function(mean))
|
| 275 |
+
|
| 276 |
+
updated_state_zero_debias = update_state_zero_debias(
|
| 277 |
+
params, dist_samples, (jnp.array(0.), 0))[0]
|
| 278 |
+
_assert_equal(
|
| 279 |
+
updated_state_zero_debias, function(mean))
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class DeltaMethodAnalyticalExpectedGrads(chex.TestCase):
|
| 283 |
+
"""Tests for grads approximations."""
|
| 284 |
+
|
| 285 |
+
@chex.all_variants
|
| 286 |
+
@parameterized.named_parameters(
|
| 287 |
+
chex.params_product([
|
| 288 |
+
('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians),
|
| 289 |
+
('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians),
|
| 290 |
+
('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians),
|
| 291 |
+
], [
|
| 292 |
+
('estimate_cv_coeffs', True),
|
| 293 |
+
('no_estimate_cv_coeffs', False),
|
| 294 |
+
],
|
| 295 |
+
named=True))
|
| 296 |
+
def testQuadraticFunction(self, effective_mean, effective_log_scale,
|
| 297 |
+
grad_estimator, estimate_cv_coeffs):
|
| 298 |
+
data_dims = 3
|
| 299 |
+
num_samples = 10**3
|
| 300 |
+
|
| 301 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 302 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 303 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 304 |
+
|
| 305 |
+
params = [mean, log_scale]
|
| 306 |
+
function = lambda x: jnp.sum(x**2)
|
| 307 |
+
rng = jax.random.PRNGKey(1)
|
| 308 |
+
|
| 309 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 310 |
+
function,
|
| 311 |
+
control_variates.control_delta_method,
|
| 312 |
+
grad_estimator,
|
| 313 |
+
params,
|
| 314 |
+
utils.multi_normal, # dist_builder
|
| 315 |
+
rng,
|
| 316 |
+
num_samples,
|
| 317 |
+
None, # No cv state.
|
| 318 |
+
estimate_cv_coeffs)[0]
|
| 319 |
+
|
| 320 |
+
expected_mean_grads = 2 * effective_mean * np.ones(
|
| 321 |
+
data_dims, dtype=np.float32)
|
| 322 |
+
expected_log_scale_grads = 2 * np.exp(2 * effective_log_scale) * np.ones(
|
| 323 |
+
data_dims, dtype=np.float32)
|
| 324 |
+
|
| 325 |
+
mean_jacobians = jacobians[0]
|
| 326 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 327 |
+
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
|
| 328 |
+
|
| 329 |
+
log_scale_jacobians = jacobians[1]
|
| 330 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 331 |
+
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
|
| 332 |
+
|
| 333 |
+
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
|
| 334 |
+
rtol=1e-1, atol=1e-3)
|
| 335 |
+
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
|
| 336 |
+
rtol=1e-1, atol=1e-3)
|
| 337 |
+
|
| 338 |
+
@chex.all_variants
|
| 339 |
+
@parameterized.named_parameters(
|
| 340 |
+
chex.params_product([
|
| 341 |
+
('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians),
|
| 342 |
+
('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians),
|
| 343 |
+
('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians),
|
| 344 |
+
], [
|
| 345 |
+
('estimate_cv_coeffs', True),
|
| 346 |
+
('no_estimate_cv_coeffs', False),
|
| 347 |
+
],
|
| 348 |
+
named=True))
|
| 349 |
+
def testCubicFunction(
|
| 350 |
+
self, effective_mean, effective_log_scale, grad_estimator,
|
| 351 |
+
estimate_cv_coeffs):
|
| 352 |
+
data_dims = 1
|
| 353 |
+
num_samples = 10**5
|
| 354 |
+
|
| 355 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 356 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 357 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 358 |
+
|
| 359 |
+
params = [mean, log_scale]
|
| 360 |
+
function = lambda x: jnp.sum(x**3)
|
| 361 |
+
rng = jax.random.PRNGKey(1)
|
| 362 |
+
|
| 363 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 364 |
+
function,
|
| 365 |
+
control_variates.control_delta_method,
|
| 366 |
+
grad_estimator,
|
| 367 |
+
params,
|
| 368 |
+
utils.multi_normal,
|
| 369 |
+
rng,
|
| 370 |
+
num_samples,
|
| 371 |
+
None, # No cv state.
|
| 372 |
+
estimate_cv_coeffs)[0]
|
| 373 |
+
|
| 374 |
+
# The third order uncentered moment of the Gaussian distribution is
|
| 375 |
+
# mu**3 + 2 mu * sigma **2. We use that to compute the expected value
|
| 376 |
+
# of the gradients. Note: for the log scale we need use the chain rule.
|
| 377 |
+
expected_mean_grads = (
|
| 378 |
+
3 * effective_mean**2 + 3 * np.exp(effective_log_scale)**2)
|
| 379 |
+
expected_mean_grads *= np.ones(data_dims, dtype=np.float32)
|
| 380 |
+
expected_log_scale_grads = (
|
| 381 |
+
6 * effective_mean * np.exp(effective_log_scale) ** 2)
|
| 382 |
+
expected_log_scale_grads *= np.ones(data_dims, dtype=np.float32)
|
| 383 |
+
|
| 384 |
+
mean_jacobians = jacobians[0]
|
| 385 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 386 |
+
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
|
| 387 |
+
|
| 388 |
+
log_scale_jacobians = jacobians[1]
|
| 389 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 390 |
+
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
|
| 391 |
+
|
| 392 |
+
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
|
| 393 |
+
rtol=1e-1, atol=1e-3)
|
| 394 |
+
|
| 395 |
+
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
|
| 396 |
+
rtol=1e-1, atol=1e-3)
|
| 397 |
+
|
| 398 |
+
@chex.all_variants
|
| 399 |
+
@parameterized.named_parameters(
|
| 400 |
+
chex.params_product([
|
| 401 |
+
('_score_function_jacobians', 1.0, 1.0, sge.score_function_jacobians),
|
| 402 |
+
('_pathwise_jacobians', 1.0, 1.0, sge.pathwise_jacobians),
|
| 403 |
+
('_measure_valued_jacobians', 1.0, 1.0, sge.measure_valued_jacobians),
|
| 404 |
+
], [
|
| 405 |
+
('estimate_cv_coeffs', True),
|
| 406 |
+
('no_estimate_cv_coeffs', False),
|
| 407 |
+
],
|
| 408 |
+
named=True))
|
| 409 |
+
def testForthPowerFunction(
|
| 410 |
+
self, effective_mean, effective_log_scale, grad_estimator,
|
| 411 |
+
estimate_cv_coeffs):
|
| 412 |
+
data_dims = 1
|
| 413 |
+
num_samples = 10**5
|
| 414 |
+
|
| 415 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 416 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 417 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 418 |
+
|
| 419 |
+
params = [mean, log_scale]
|
| 420 |
+
function = lambda x: jnp.sum(x**4)
|
| 421 |
+
rng = jax.random.PRNGKey(1)
|
| 422 |
+
|
| 423 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 424 |
+
function,
|
| 425 |
+
control_variates.control_delta_method,
|
| 426 |
+
grad_estimator,
|
| 427 |
+
params,
|
| 428 |
+
utils.multi_normal,
|
| 429 |
+
rng,
|
| 430 |
+
num_samples,
|
| 431 |
+
None, # No cv state
|
| 432 |
+
estimate_cv_coeffs)[0]
|
| 433 |
+
# The third order uncentered moment of the Gaussian distribution is
|
| 434 |
+
# mu**4 + 6 mu **2 sigma **2 + 3 sigma**4. We use that to compute the
|
| 435 |
+
# expected value of the gradients.
|
| 436 |
+
# Note: for the log scale we need use the chain rule.
|
| 437 |
+
expected_mean_grads = (
|
| 438 |
+
3 * effective_mean**3
|
| 439 |
+
+ 12 * effective_mean * np.exp(effective_log_scale)**2)
|
| 440 |
+
expected_mean_grads *= np.ones(data_dims, dtype=np.float32)
|
| 441 |
+
expected_log_scale_grads = 12 * (
|
| 442 |
+
effective_mean**2 * np.exp(effective_log_scale) +
|
| 443 |
+
np.exp(effective_log_scale) ** 3) * np.exp(effective_log_scale)
|
| 444 |
+
expected_log_scale_grads *= np.ones(data_dims, dtype=np.float32)
|
| 445 |
+
|
| 446 |
+
mean_jacobians = jacobians[0]
|
| 447 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 448 |
+
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
|
| 449 |
+
|
| 450 |
+
log_scale_jacobians = jacobians[1]
|
| 451 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 452 |
+
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
|
| 453 |
+
|
| 454 |
+
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
|
| 455 |
+
rtol=1e-1, atol=1e-3)
|
| 456 |
+
|
| 457 |
+
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
|
| 458 |
+
rtol=1e-1, atol=1e-3)
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
class ConsistencyWithStandardEstimators(chex.TestCase):
|
| 462 |
+
"""Tests for consistency between estimators."""
|
| 463 |
+
|
| 464 |
+
@chex.all_variants
|
| 465 |
+
@parameterized.named_parameters(
|
| 466 |
+
chex.params_product([
|
| 467 |
+
('_score_function_jacobians', 1, 1, sge.score_function_jacobians),
|
| 468 |
+
('_pathwise_jacobians', 1, 1, sge.pathwise_jacobians),
|
| 469 |
+
('_measure_valued_jacobians', 1, 1, sge.measure_valued_jacobians),
|
| 470 |
+
], [
|
| 471 |
+
(
|
| 472 |
+
'control_delta_method',
|
| 473 |
+
10**5,
|
| 474 |
+
control_variates.control_delta_method
|
| 475 |
+
),
|
| 476 |
+
('moving_avg_baseline', 10**6, control_variates.moving_avg_baseline),
|
| 477 |
+
],
|
| 478 |
+
named=True))
|
| 479 |
+
def testWeightedLinearFunction(self, effective_mean, effective_log_scale,
|
| 480 |
+
grad_estimator, num_samples,
|
| 481 |
+
control_variate_from_function):
|
| 482 |
+
"""Check that the gradients are consistent between estimators."""
|
| 483 |
+
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
|
| 484 |
+
data_dims = len(weights)
|
| 485 |
+
|
| 486 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 487 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 488 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 489 |
+
|
| 490 |
+
params = [mean, log_scale]
|
| 491 |
+
function = lambda x: jnp.sum(weights * x)
|
| 492 |
+
rng = jax.random.PRNGKey(1)
|
| 493 |
+
cv_rng, ge_rng = jax.random.split(rng)
|
| 494 |
+
|
| 495 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 496 |
+
function,
|
| 497 |
+
control_variate_from_function,
|
| 498 |
+
grad_estimator,
|
| 499 |
+
params,
|
| 500 |
+
utils.multi_normal, # dist_builder
|
| 501 |
+
cv_rng, # rng
|
| 502 |
+
num_samples,
|
| 503 |
+
(0., 0), # control_variate_state
|
| 504 |
+
False)[0]
|
| 505 |
+
|
| 506 |
+
mean_jacobians = jacobians[0]
|
| 507 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 508 |
+
mean_grads = jnp.mean(mean_jacobians, axis=0)
|
| 509 |
+
|
| 510 |
+
log_scale_jacobians = jacobians[1]
|
| 511 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 512 |
+
log_scale_grads = jnp.mean(log_scale_jacobians, axis=0)
|
| 513 |
+
|
| 514 |
+
# We use a different random number generator for the gradient estimator
|
| 515 |
+
# without the control variate.
|
| 516 |
+
no_cv_jacobians = grad_estimator(
|
| 517 |
+
function, [mean, log_scale],
|
| 518 |
+
utils.multi_normal, ge_rng, num_samples=num_samples)
|
| 519 |
+
|
| 520 |
+
no_cv_mean_jacobians = no_cv_jacobians[0]
|
| 521 |
+
chex.assert_shape(no_cv_mean_jacobians, (num_samples, data_dims))
|
| 522 |
+
no_cv_mean_grads = jnp.mean(no_cv_mean_jacobians, axis=0)
|
| 523 |
+
|
| 524 |
+
no_cv_log_scale_jacobians = no_cv_jacobians[1]
|
| 525 |
+
chex.assert_shape(no_cv_log_scale_jacobians, (num_samples, data_dims))
|
| 526 |
+
no_cv_log_scale_grads = jnp.mean(no_cv_log_scale_jacobians, axis=0)
|
| 527 |
+
|
| 528 |
+
_assert_equal(mean_grads, no_cv_mean_grads, rtol=1e-1, atol=5e-2)
|
| 529 |
+
_assert_equal(log_scale_grads, no_cv_log_scale_grads, rtol=1, atol=5e-2)
|
| 530 |
+
|
| 531 |
+
@chex.all_variants
|
| 532 |
+
@parameterized.named_parameters(
|
| 533 |
+
chex.params_product([
|
| 534 |
+
('_score_function_jacobians', 1, 1, sge.score_function_jacobians,
|
| 535 |
+
10**5),
|
| 536 |
+
('_pathwise_jacobians', 1, 1, sge.pathwise_jacobians, 10**5),
|
| 537 |
+
('_measure_valued_jacobians', 1, 1, sge.measure_valued_jacobians,
|
| 538 |
+
10**5),
|
| 539 |
+
], [
|
| 540 |
+
('control_delta_method', control_variates.control_delta_method),
|
| 541 |
+
('moving_avg_baseline', control_variates.moving_avg_baseline),
|
| 542 |
+
],
|
| 543 |
+
named=True))
|
| 544 |
+
def testNonPolynomialFunction(
|
| 545 |
+
self, effective_mean, effective_log_scale,
|
| 546 |
+
grad_estimator, num_samples, control_variate_from_function):
|
| 547 |
+
"""Check that the gradients are consistent between estimators."""
|
| 548 |
+
data_dims = 3
|
| 549 |
+
|
| 550 |
+
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
|
| 551 |
+
log_scale = effective_log_scale * jnp.ones(
|
| 552 |
+
shape=(data_dims), dtype=jnp.float32)
|
| 553 |
+
|
| 554 |
+
params = [mean, log_scale]
|
| 555 |
+
function = lambda x: jnp.log(jnp.sum(x**2))
|
| 556 |
+
rng = jax.random.PRNGKey(1)
|
| 557 |
+
cv_rng, ge_rng = jax.random.split(rng)
|
| 558 |
+
|
| 559 |
+
jacobians = _cv_jac_variant(self.variant)(
|
| 560 |
+
function,
|
| 561 |
+
control_variate_from_function,
|
| 562 |
+
grad_estimator,
|
| 563 |
+
params,
|
| 564 |
+
utils.multi_normal,
|
| 565 |
+
cv_rng,
|
| 566 |
+
num_samples,
|
| 567 |
+
(0., 0), # control_variate_state
|
| 568 |
+
False)[0]
|
| 569 |
+
|
| 570 |
+
mean_jacobians = jacobians[0]
|
| 571 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 572 |
+
mean_grads = jnp.mean(mean_jacobians, axis=0)
|
| 573 |
+
|
| 574 |
+
log_scale_jacobians = jacobians[1]
|
| 575 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 576 |
+
log_scale_grads = jnp.mean(log_scale_jacobians, axis=0)
|
| 577 |
+
|
| 578 |
+
# We use a different random number generator for the gradient estimator
|
| 579 |
+
# without the control variate.
|
| 580 |
+
no_cv_jacobians = grad_estimator(
|
| 581 |
+
function, [mean, log_scale],
|
| 582 |
+
utils.multi_normal, ge_rng, num_samples=num_samples)
|
| 583 |
+
|
| 584 |
+
no_cv_mean_jacobians = no_cv_jacobians[0]
|
| 585 |
+
chex.assert_shape(no_cv_mean_jacobians, (num_samples, data_dims))
|
| 586 |
+
no_cv_mean_grads = jnp.mean(no_cv_mean_jacobians, axis=0)
|
| 587 |
+
|
| 588 |
+
no_cv_log_scale_jacobians = no_cv_jacobians[1]
|
| 589 |
+
chex.assert_shape(no_cv_log_scale_jacobians, (num_samples, data_dims))
|
| 590 |
+
no_cv_log_scale_grads = jnp.mean(no_cv_log_scale_jacobians, axis=0)
|
| 591 |
+
|
| 592 |
+
_assert_equal(mean_grads, no_cv_mean_grads, rtol=1e-1, atol=5e-2)
|
| 593 |
+
_assert_equal(log_scale_grads, no_cv_log_scale_grads, rtol=1e-1, atol=5e-2)
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
if __name__ == '__main__':
|
| 597 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/monte_carlo/stochastic_gradient_estimators.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
r"""Stochastic Monte Carlo gradient estimators.
|
| 16 |
+
|
| 17 |
+
Utility functions to approximate gradients of the form using Monte Carlo
|
| 18 |
+
estimation:
|
| 19 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 20 |
+
|
| 21 |
+
Here f is assumed to have no dependence on the parameters theta - if f has
|
| 22 |
+
dependence on theta, the functions below need to be called with `stop_grad(f)`
|
| 23 |
+
and the chain rule needs to be applied outside these functions in order
|
| 24 |
+
to obtain unbiased gradient.
|
| 25 |
+
|
| 26 |
+
For more details, see:
|
| 27 |
+
S. Mohamed, M. Rosca, M. Figurnov, A Mnih.
|
| 28 |
+
Monte Carlo Gradient Estimation in Machine Learning. JMLR, 2020.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
import math
|
| 32 |
+
from typing import Any, Callable, Sequence
|
| 33 |
+
|
| 34 |
+
import chex
|
| 35 |
+
import jax
|
| 36 |
+
import jax.numpy as jnp
|
| 37 |
+
import numpy as np
|
| 38 |
+
from optax._src import base
|
| 39 |
+
from optax._src import utils
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def score_function_jacobians(
|
| 43 |
+
function: Callable[[chex.Array], float],
|
| 44 |
+
params: base.Params,
|
| 45 |
+
dist_builder: Callable[..., Any],
|
| 46 |
+
rng: chex.PRNGKey,
|
| 47 |
+
num_samples: int) -> Sequence[chex.Array]:
|
| 48 |
+
r"""Score function gradient estimation.
|
| 49 |
+
|
| 50 |
+
Approximates:
|
| 51 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 52 |
+
With:
|
| 53 |
+
E_{p(x; \theta)} f(x) \nabla_{\theta} \log p(x; \theta)
|
| 54 |
+
|
| 55 |
+
Requires: p to be differentiable wrt to theta. Applicable to both continuous
|
| 56 |
+
and discrete random variables. No requirements on f.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
function: Function f(x) for which to estimate grads_{params} E_dist f(x).
|
| 60 |
+
The function takes in one argument (a sample from the distribution) and
|
| 61 |
+
returns a floating point value.
|
| 62 |
+
params: A tuple of jnp arrays.
|
| 63 |
+
The parameters for which to construct the distribution.
|
| 64 |
+
dist_builder: a constructor which builds a distribution given the input
|
| 65 |
+
parameters specified by params. `dist_builder(params)` should return a
|
| 66 |
+
valid distribution.
|
| 67 |
+
rng: a PRNGKey key.
|
| 68 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
A tuple of size `params`, each element is `num_samples x param.shape`
|
| 72 |
+
jacobian vector containing the estimates of the gradients obtained for
|
| 73 |
+
each sample.
|
| 74 |
+
The mean of this vector is the gradient wrt to parameters that can be used
|
| 75 |
+
for learning. The entire jacobian vector can be used to assess estimator
|
| 76 |
+
variance.
|
| 77 |
+
"""
|
| 78 |
+
def surrogate(params):
|
| 79 |
+
dist = dist_builder(*params)
|
| 80 |
+
one_sample_surrogate_fn = lambda x: function(x) * dist.log_prob(x)
|
| 81 |
+
samples = jax.lax.stop_gradient(dist.sample((num_samples,), seed=rng))
|
| 82 |
+
# We vmap the function application over samples - this ensures that the
|
| 83 |
+
# function we use does not have to be vectorized itself.
|
| 84 |
+
return jax.vmap(one_sample_surrogate_fn)(samples)
|
| 85 |
+
|
| 86 |
+
return jax.jacfwd(surrogate)(params)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def pathwise_jacobians(
|
| 90 |
+
function: Callable[[chex.Array], float],
|
| 91 |
+
params: base.Params,
|
| 92 |
+
dist_builder: Callable[..., Any],
|
| 93 |
+
rng: chex.PRNGKey,
|
| 94 |
+
num_samples: int) -> Sequence[chex.Array]:
|
| 95 |
+
r"""Pathwise gradient estimation.
|
| 96 |
+
|
| 97 |
+
Approximates:
|
| 98 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 99 |
+
With:
|
| 100 |
+
E_{p(\epsilon)} \nabla_{\theta} f(g(\epsilon, \theta))
|
| 101 |
+
where x = g(\epsilon, \theta). g depends on the distribution p.
|
| 102 |
+
|
| 103 |
+
Requires: p to be reparametrizable and the reparametrization to be implemented
|
| 104 |
+
in tensorflow_probability. Applicable to continuous random variables.
|
| 105 |
+
f needs to be differentiable.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
function: Function f(x) for which to estimate grads_{params} E_dist f(x).
|
| 109 |
+
The function takes in one argument (a sample from the distribution) and
|
| 110 |
+
returns a floating point value.
|
| 111 |
+
params: A tuple of jnp arrays.
|
| 112 |
+
The parameters for which to construct the distribution.
|
| 113 |
+
dist_builder: a constructor which builds a distribution given the input
|
| 114 |
+
parameters specified by params. `dist_builder(params)` should return a
|
| 115 |
+
valid distribution.
|
| 116 |
+
rng: a PRNGKey key.
|
| 117 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
A tuple of size `params`, each element is `num_samples x param.shape`
|
| 121 |
+
jacobian vector containing the estimates of the gradients obtained for
|
| 122 |
+
each sample.
|
| 123 |
+
The mean of this vector is the gradient wrt to parameters that can be used
|
| 124 |
+
for learning. The entire jacobian vector can be used to assess estimator
|
| 125 |
+
variance.
|
| 126 |
+
"""
|
| 127 |
+
def surrogate(params):
|
| 128 |
+
# We vmap the function application over samples - this ensures that the
|
| 129 |
+
# function we use does not have to be vectorized itself.
|
| 130 |
+
dist = dist_builder(*params)
|
| 131 |
+
return jax.vmap(function)(dist.sample((num_samples,), seed=rng))
|
| 132 |
+
|
| 133 |
+
return jax.jacfwd(surrogate)(params)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def measure_valued_jacobians(
|
| 137 |
+
function: Callable[[chex.Array], float],
|
| 138 |
+
params: base.Params,
|
| 139 |
+
dist_builder: Callable[..., Any],
|
| 140 |
+
rng: chex.PRNGKey,
|
| 141 |
+
num_samples: int,
|
| 142 |
+
coupling: bool = True) -> Sequence[chex.Array]:
|
| 143 |
+
r"""Measure valued gradient estimation.
|
| 144 |
+
|
| 145 |
+
Approximates:
|
| 146 |
+
\nabla_{\theta} E_{p(x; \theta)} f(x)
|
| 147 |
+
With:
|
| 148 |
+
1./ c (E_{p1(x; \theta)} f(x) - E_{p2(x; \theta)} f(x)) where p1 and p2 are
|
| 149 |
+
measures which depend on p.
|
| 150 |
+
|
| 151 |
+
Currently only supports computing gradients of expectations of Gaussian RVs.
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
function: Function f(x) for which to estimate grads_{params} E_dist f(x).
|
| 155 |
+
The function takes in one argument (a sample from the distribution) and
|
| 156 |
+
returns a floating point value.
|
| 157 |
+
params: A tuple of jnp arrays.
|
| 158 |
+
The parameters for which to construct the distribution.
|
| 159 |
+
dist_builder: a constructor which builds a distribution given the input
|
| 160 |
+
parameters specified by params. `dist_builder(params)` should return a
|
| 161 |
+
valid distribution.
|
| 162 |
+
rng: a PRNGKey key.
|
| 163 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 164 |
+
coupling: A boolean. Whether or not to use coupling for the positive and
|
| 165 |
+
negative samples. Recommended: True, as this reduces variance.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
A tuple of size `params`, each element is `num_samples x param.shape`
|
| 169 |
+
jacobian vector containing the estimates of the gradients obtained for
|
| 170 |
+
each sample.
|
| 171 |
+
The mean of this vector is the gradient wrt to parameters that can be used
|
| 172 |
+
for learning. The entire jacobian vector can be used to assess estimator
|
| 173 |
+
variance.
|
| 174 |
+
"""
|
| 175 |
+
if dist_builder is not utils.multi_normal:
|
| 176 |
+
raise ValueError(
|
| 177 |
+
'Unsupported distribution builder for measure_valued_jacobians!')
|
| 178 |
+
dist = dist_builder(*params)
|
| 179 |
+
# Need to apply chain rule for log scale grad (instead of scale grad).
|
| 180 |
+
return [
|
| 181 |
+
measure_valued_estimation_mean(
|
| 182 |
+
function, dist, rng, num_samples, coupling=coupling),
|
| 183 |
+
jnp.exp(dist.log_scale) * measure_valued_estimation_std(
|
| 184 |
+
function, dist, rng, num_samples, coupling=coupling)]
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def measure_valued_estimation_mean(
|
| 188 |
+
function: Callable[[chex.Array], float],
|
| 189 |
+
dist: Any,
|
| 190 |
+
rng: chex.PRNGKey,
|
| 191 |
+
num_samples: int,
|
| 192 |
+
coupling: bool = True) -> chex.Array:
|
| 193 |
+
"""Measure valued grads of a Gaussian expectation of `function` wrt the mean.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
function: Function f(x) for which to estimate grads_{mean} E_dist f(x).
|
| 197 |
+
The function takes in one argument (a sample from the distribution) and
|
| 198 |
+
returns a floating point value.
|
| 199 |
+
dist: a distribution on which we can call `sample`.
|
| 200 |
+
rng: a PRNGKey key.
|
| 201 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 202 |
+
coupling: A boolean. Whether or not to use coupling for the positive and
|
| 203 |
+
negative samples. Recommended: True, as this reduces variance.
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
A `num_samples x D` vector containing the estimates of the gradients
|
| 207 |
+
obtained for each sample. The mean of this vector can be used to update
|
| 208 |
+
the mean parameter. The entire vector can be used to assess estimator
|
| 209 |
+
variance.
|
| 210 |
+
"""
|
| 211 |
+
mean, log_std = dist.params
|
| 212 |
+
std = jnp.exp(log_std)
|
| 213 |
+
|
| 214 |
+
dist_samples = dist.sample((num_samples,), seed=rng)
|
| 215 |
+
|
| 216 |
+
pos_rng, neg_rng = jax.random.split(rng)
|
| 217 |
+
pos_sample = jax.random.weibull_min(
|
| 218 |
+
pos_rng, scale=math.sqrt(2.), concentration=2., shape=dist_samples.shape)
|
| 219 |
+
|
| 220 |
+
if coupling:
|
| 221 |
+
neg_sample = pos_sample
|
| 222 |
+
else:
|
| 223 |
+
neg_sample = jax.random.weibull_min(
|
| 224 |
+
neg_rng,
|
| 225 |
+
scale=math.sqrt(2.),
|
| 226 |
+
concentration=2.,
|
| 227 |
+
shape=dist_samples.shape)
|
| 228 |
+
|
| 229 |
+
# N x D
|
| 230 |
+
positive_diag = mean + std * pos_sample
|
| 231 |
+
# N x D
|
| 232 |
+
negative_diag = mean - std * neg_sample
|
| 233 |
+
|
| 234 |
+
# NOTE: you can sample base samples here if you use the same rng
|
| 235 |
+
# Duplicate the D dimension - N x D x D.
|
| 236 |
+
base_dist_samples = utils.tile_second_to_last_dim(dist_samples)
|
| 237 |
+
positive = utils.set_diags(base_dist_samples, positive_diag)
|
| 238 |
+
negative = utils.set_diags(base_dist_samples, negative_diag)
|
| 239 |
+
|
| 240 |
+
c = np.sqrt(2 * np.pi) * std # D
|
| 241 |
+
# Apply function. We apply the function to each element of N x D x D.
|
| 242 |
+
# We apply a function that takes a sample and returns one number, so the
|
| 243 |
+
# output will be N x D (which is what we want, batch by dimension).
|
| 244 |
+
# We apply a function in parallel to the batch.
|
| 245 |
+
# Broadcast the division.
|
| 246 |
+
vmaped_function = jax.vmap(jax.vmap(function, 1, 0))
|
| 247 |
+
grads = (vmaped_function(positive) - vmaped_function(negative)) / c
|
| 248 |
+
|
| 249 |
+
chex.assert_shape(grads, (num_samples,) + std.shape)
|
| 250 |
+
return grads
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def measure_valued_estimation_std(
|
| 254 |
+
function: Callable[[chex.Array], float],
|
| 255 |
+
dist: Any,
|
| 256 |
+
rng: chex.PRNGKey,
|
| 257 |
+
num_samples: int,
|
| 258 |
+
coupling: bool = True) -> chex.Array:
|
| 259 |
+
"""Measure valued grads of a Gaussian expectation of `function` wrt the std.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
function: Function f(x) for which to estimate grads_{std} E_dist f(x).
|
| 263 |
+
The function takes in one argument (a sample from the distribution) and
|
| 264 |
+
returns a floating point value.
|
| 265 |
+
dist: a distribution on which we can call `sample`.
|
| 266 |
+
rng: a PRNGKey key.
|
| 267 |
+
num_samples: Int, the number of samples used to compute the grads.
|
| 268 |
+
coupling: A boolean. Whether or not to use coupling for the positive and
|
| 269 |
+
negative samples. Recommended: True, as this reduces variance.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
A `num_samples x D` vector containing the estimates of the gradients
|
| 273 |
+
obtained for each sample. The mean of this vector can be used to update
|
| 274 |
+
the scale parameter. The entire vector can be used to assess estimator
|
| 275 |
+
variance.
|
| 276 |
+
"""
|
| 277 |
+
mean, log_std = dist.params
|
| 278 |
+
std = jnp.exp(log_std)
|
| 279 |
+
|
| 280 |
+
dist_samples = dist.sample((num_samples,), seed=rng)
|
| 281 |
+
|
| 282 |
+
pos_rng, neg_rng = jax.random.split(rng)
|
| 283 |
+
|
| 284 |
+
# The only difference between mean and std gradients is what we sample.
|
| 285 |
+
pos_sample = jax.random.double_sided_maxwell(
|
| 286 |
+
pos_rng, loc=0.0, scale=1.0, shape=dist_samples.shape)
|
| 287 |
+
if coupling:
|
| 288 |
+
unif_rvs = jax.random.uniform(neg_rng, dist_samples.shape)
|
| 289 |
+
neg_sample = unif_rvs * pos_sample
|
| 290 |
+
else:
|
| 291 |
+
neg_sample = jax.random.normal(neg_rng, dist_samples.shape)
|
| 292 |
+
|
| 293 |
+
# Both need to be positive in the case of the scale.
|
| 294 |
+
# N x D
|
| 295 |
+
positive_diag = mean + std * pos_sample
|
| 296 |
+
# N x D
|
| 297 |
+
negative_diag = mean + std * neg_sample
|
| 298 |
+
|
| 299 |
+
# NOTE: you can sample base samples here if you use the same rng
|
| 300 |
+
# Duplicate the D dimension - N x D x D.
|
| 301 |
+
base_dist_samples = utils.tile_second_to_last_dim(dist_samples)
|
| 302 |
+
positive = utils.set_diags(base_dist_samples, positive_diag)
|
| 303 |
+
negative = utils.set_diags(base_dist_samples, negative_diag)
|
| 304 |
+
|
| 305 |
+
# Different C for the scale
|
| 306 |
+
c = std # D
|
| 307 |
+
# Apply function. We apply the function to each element of N x D x D.
|
| 308 |
+
# We apply a function that takes a sample and returns one number, so the
|
| 309 |
+
# output will be N x D (which is what we want, batch by dimension).
|
| 310 |
+
# We apply a function in parallel to the batch.
|
| 311 |
+
# Broadcast the division.
|
| 312 |
+
vmaped_function = jax.vmap(jax.vmap(function, 1, 0))
|
| 313 |
+
grads = (vmaped_function(positive) - vmaped_function(negative)) / c
|
| 314 |
+
|
| 315 |
+
chex.assert_shape(grads, (num_samples,) + std.shape)
|
| 316 |
+
return grads
|
| 317 |
+
|
testbed/google-deepmind__optax/optax/monte_carlo/stochastic_gradient_estimators_test.py
ADDED
|
@@ -0,0 +1,371 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `stochastic_gradient_estimators.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
from optax._src import utils
|
| 26 |
+
from optax.monte_carlo import stochastic_gradient_estimators as sge
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Set seed for deterministic sampling.
|
| 30 |
+
np.random.seed(42)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
_estimator_to_num_samples = {
|
| 34 |
+
sge.score_function_jacobians: 5 * 10**5,
|
| 35 |
+
sge.measure_valued_jacobians: 10**5,
|
| 36 |
+
sge.pathwise_jacobians: 5 * 10**4,
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
_weighted_estimator_to_num_samples = {
|
| 40 |
+
sge.score_function_jacobians: 5 * 10**6,
|
| 41 |
+
sge.measure_valued_jacobians: 5 * 10**5,
|
| 42 |
+
sge.pathwise_jacobians: 5 * 10**4,
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _ones(dims):
|
| 47 |
+
return jnp.ones(shape=(dims), dtype=jnp.float32)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _assert_equal(actual, expected, rtol=1e-2, atol=1e-2):
|
| 51 |
+
"""Asserts that arrays are equal."""
|
| 52 |
+
# Note: assert_allclose does not check shapes
|
| 53 |
+
chex.assert_equal_shape((actual, expected))
|
| 54 |
+
|
| 55 |
+
# We get around the bug https://github.com/numpy/numpy/issues/13801
|
| 56 |
+
zero_indices = np.argwhere(expected == 0)
|
| 57 |
+
if not np.all(np.abs(actual[zero_indices]) <= atol):
|
| 58 |
+
raise AssertionError(f'Larger than {atol} diff in {actual[zero_indices]}')
|
| 59 |
+
|
| 60 |
+
non_zero_indices = np.argwhere(expected != 0)
|
| 61 |
+
np.testing.assert_allclose(
|
| 62 |
+
np.asarray(actual)[non_zero_indices],
|
| 63 |
+
expected[non_zero_indices], rtol, atol)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _estimator_variant(variant, estimator):
|
| 67 |
+
return variant(estimator, static_argnums=(0, 2, 4))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _measure_valued_variant(variant):
|
| 71 |
+
return variant(
|
| 72 |
+
sge.measure_valued_jacobians,
|
| 73 |
+
static_argnums=(0, 2, 4, 5))
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class GradientEstimatorsTest(chex.TestCase):
|
| 77 |
+
|
| 78 |
+
@chex.all_variants
|
| 79 |
+
@parameterized.named_parameters(
|
| 80 |
+
chex.params_product([
|
| 81 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 82 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 83 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 84 |
+
], [
|
| 85 |
+
('0.1', 0.1),
|
| 86 |
+
('0.5', 0.5),
|
| 87 |
+
('0.9', 0.9),
|
| 88 |
+
],
|
| 89 |
+
named=True))
|
| 90 |
+
def testConstantFunction(self, estimator, constant):
|
| 91 |
+
data_dims = 3
|
| 92 |
+
num_samples = _estimator_to_num_samples[estimator]
|
| 93 |
+
|
| 94 |
+
effective_mean = 1.5
|
| 95 |
+
mean = effective_mean * _ones(data_dims)
|
| 96 |
+
|
| 97 |
+
effective_log_scale = 0.0
|
| 98 |
+
log_scale = effective_log_scale * _ones(data_dims)
|
| 99 |
+
rng = jax.random.PRNGKey(1)
|
| 100 |
+
|
| 101 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 102 |
+
lambda x: jnp.array(constant), [mean, log_scale],
|
| 103 |
+
utils.multi_normal, rng, num_samples)
|
| 104 |
+
|
| 105 |
+
# Average over the number of samples.
|
| 106 |
+
mean_jacobians = jacobians[0]
|
| 107 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 108 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 109 |
+
expected_mean_grads = np.zeros(data_dims, dtype=np.float32)
|
| 110 |
+
|
| 111 |
+
log_scale_jacobians = jacobians[1]
|
| 112 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 113 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 114 |
+
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
|
| 115 |
+
|
| 116 |
+
_assert_equal(mean_grads, expected_mean_grads, atol=5e-3)
|
| 117 |
+
_assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-3)
|
| 118 |
+
|
| 119 |
+
@chex.all_variants
|
| 120 |
+
@parameterized.named_parameters(
|
| 121 |
+
chex.params_product([
|
| 122 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 123 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 124 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 125 |
+
], [
|
| 126 |
+
('0.5_-1.', 0.5, -1.),
|
| 127 |
+
('0.7_0.0)', 0.7, 0.0),
|
| 128 |
+
('0.8_0.1', 0.8, 0.1),
|
| 129 |
+
],
|
| 130 |
+
named=True))
|
| 131 |
+
def testLinearFunction(self, estimator, effective_mean, effective_log_scale):
|
| 132 |
+
data_dims = 3
|
| 133 |
+
num_samples = _estimator_to_num_samples[estimator]
|
| 134 |
+
rng = jax.random.PRNGKey(1)
|
| 135 |
+
|
| 136 |
+
mean = effective_mean * _ones(data_dims)
|
| 137 |
+
log_scale = effective_log_scale * _ones(data_dims)
|
| 138 |
+
|
| 139 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 140 |
+
np.sum, [mean, log_scale],
|
| 141 |
+
utils.multi_normal, rng, num_samples)
|
| 142 |
+
|
| 143 |
+
mean_jacobians = jacobians[0]
|
| 144 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 145 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 146 |
+
expected_mean_grads = np.ones(data_dims, dtype=np.float32)
|
| 147 |
+
|
| 148 |
+
log_scale_jacobians = jacobians[1]
|
| 149 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 150 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 151 |
+
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
|
| 152 |
+
|
| 153 |
+
_assert_equal(mean_grads, expected_mean_grads)
|
| 154 |
+
_assert_equal(log_scale_grads, expected_log_scale_grads)
|
| 155 |
+
|
| 156 |
+
@chex.all_variants
|
| 157 |
+
@parameterized.named_parameters(
|
| 158 |
+
chex.params_product([
|
| 159 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 160 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 161 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 162 |
+
], [
|
| 163 |
+
('1.0_0.3', 1.0, 0.3),
|
| 164 |
+
],
|
| 165 |
+
named=True))
|
| 166 |
+
def testQuadraticFunction(
|
| 167 |
+
self, estimator, effective_mean, effective_log_scale):
|
| 168 |
+
data_dims = 3
|
| 169 |
+
num_samples = _estimator_to_num_samples[estimator]
|
| 170 |
+
rng = jax.random.PRNGKey(1)
|
| 171 |
+
|
| 172 |
+
mean = effective_mean * _ones(data_dims)
|
| 173 |
+
log_scale = effective_log_scale * _ones(data_dims)
|
| 174 |
+
|
| 175 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 176 |
+
lambda x: np.sum(x**2) / 2, [mean, log_scale],
|
| 177 |
+
utils.multi_normal, rng, num_samples)
|
| 178 |
+
|
| 179 |
+
mean_jacobians = jacobians[0]
|
| 180 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 181 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 182 |
+
expected_mean_grads = effective_mean * np.ones(
|
| 183 |
+
data_dims, dtype=np.float32)
|
| 184 |
+
|
| 185 |
+
log_scale_jacobians = jacobians[1]
|
| 186 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 187 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 188 |
+
expected_log_scale_grads = np.exp(2 * effective_log_scale) * np.ones(
|
| 189 |
+
data_dims, dtype=np.float32)
|
| 190 |
+
|
| 191 |
+
_assert_equal(mean_grads, expected_mean_grads, atol=5e-2)
|
| 192 |
+
_assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-2)
|
| 193 |
+
|
| 194 |
+
@chex.all_variants
|
| 195 |
+
@parameterized.named_parameters(
|
| 196 |
+
chex.params_product([
|
| 197 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 198 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 199 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 200 |
+
], [
|
| 201 |
+
('case_1', [1.0, 2.0, 3.], [-1., 0.3, -2.], [1., 1., 1.]),
|
| 202 |
+
('case_2', [1.0, 2.0, 3.], [-1., 0.3, -2.], [4., 2., 3.]),
|
| 203 |
+
('case_3', [1.0, 2.0, 3.], [0.1, 0.2, 0.1], [10., 5., 1.]),
|
| 204 |
+
],
|
| 205 |
+
named=True))
|
| 206 |
+
def testWeightedLinear(
|
| 207 |
+
self, estimator, effective_mean, effective_log_scale, weights):
|
| 208 |
+
num_samples = _weighted_estimator_to_num_samples[estimator]
|
| 209 |
+
rng = jax.random.PRNGKey(1)
|
| 210 |
+
|
| 211 |
+
mean = jnp.array(effective_mean)
|
| 212 |
+
log_scale = jnp.array(effective_log_scale)
|
| 213 |
+
weights = jnp.array(weights)
|
| 214 |
+
|
| 215 |
+
data_dims = len(effective_mean)
|
| 216 |
+
|
| 217 |
+
function = lambda x: jnp.sum(x * weights)
|
| 218 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 219 |
+
function, [mean, log_scale],
|
| 220 |
+
utils.multi_normal, rng, num_samples)
|
| 221 |
+
|
| 222 |
+
mean_jacobians = jacobians[0]
|
| 223 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 224 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 225 |
+
|
| 226 |
+
log_scale_jacobians = jacobians[1]
|
| 227 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 228 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 229 |
+
|
| 230 |
+
expected_mean_grads = weights
|
| 231 |
+
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
|
| 232 |
+
|
| 233 |
+
_assert_equal(mean_grads, expected_mean_grads, atol=5e-2)
|
| 234 |
+
_assert_equal(log_scale_grads, expected_log_scale_grads, atol=5e-2)
|
| 235 |
+
|
| 236 |
+
@chex.all_variants
|
| 237 |
+
@parameterized.named_parameters(
|
| 238 |
+
chex.params_product([
|
| 239 |
+
('_score_function_jacobians', sge.score_function_jacobians),
|
| 240 |
+
('_pathwise_jacobians', sge.pathwise_jacobians),
|
| 241 |
+
('_measure_valued_jacobians', sge.measure_valued_jacobians),
|
| 242 |
+
], [
|
| 243 |
+
('case_1', [1.0, 2.0, 3.], [-1., 0.3, -2.], [1., 1., 1.]),
|
| 244 |
+
('case_2', [1.0, 2.0, 3.], [-1., 0.3, -2.], [4., 2., 3.]),
|
| 245 |
+
('case_3', [1.0, 2.0, 3.], [0.1, 0.2, 0.1], [3., 5., 1.]),
|
| 246 |
+
],
|
| 247 |
+
named=True))
|
| 248 |
+
def testWeightedQuadratic(
|
| 249 |
+
self, estimator, effective_mean, effective_log_scale, weights):
|
| 250 |
+
num_samples = _weighted_estimator_to_num_samples[estimator]
|
| 251 |
+
rng = jax.random.PRNGKey(1)
|
| 252 |
+
|
| 253 |
+
mean = jnp.array(effective_mean, dtype=jnp.float32)
|
| 254 |
+
log_scale = jnp.array(effective_log_scale, dtype=jnp.float32)
|
| 255 |
+
weights = jnp.array(weights, dtype=jnp.float32)
|
| 256 |
+
|
| 257 |
+
data_dims = len(effective_mean)
|
| 258 |
+
|
| 259 |
+
function = lambda x: jnp.sum(x * weights) ** 2
|
| 260 |
+
jacobians = _estimator_variant(self.variant, estimator)(
|
| 261 |
+
function, [mean, log_scale],
|
| 262 |
+
utils.multi_normal, rng, num_samples)
|
| 263 |
+
|
| 264 |
+
mean_jacobians = jacobians[0]
|
| 265 |
+
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
|
| 266 |
+
mean_grads = np.mean(mean_jacobians, axis=0)
|
| 267 |
+
|
| 268 |
+
log_scale_jacobians = jacobians[1]
|
| 269 |
+
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
|
| 270 |
+
log_scale_grads = np.mean(log_scale_jacobians, axis=0)
|
| 271 |
+
|
| 272 |
+
expected_mean_grads = 2 * weights * np.sum(weights * mean)
|
| 273 |
+
effective_scale = np.exp(log_scale)
|
| 274 |
+
expected_scale_grads = 2 * weights ** 2 * effective_scale
|
| 275 |
+
expected_log_scale_grads = expected_scale_grads * effective_scale
|
| 276 |
+
|
| 277 |
+
_assert_equal(mean_grads, expected_mean_grads, atol=1e-1, rtol=1e-1)
|
| 278 |
+
_assert_equal(
|
| 279 |
+
log_scale_grads, expected_log_scale_grads, atol=1e-1, rtol=1e-1)
|
| 280 |
+
|
| 281 |
+
@chex.all_variants
|
| 282 |
+
@parameterized.named_parameters(
|
| 283 |
+
chex.params_product(
|
| 284 |
+
[
|
| 285 |
+
('_sum_cos_x', [1.0], [1.0], lambda x: jnp.sum(jnp.cos(x))),
|
| 286 |
+
# Need to ensure that the mean is not too close to 0.
|
| 287 |
+
('_sum_log_x', [10.0], [0.0], lambda x: jnp.sum(jnp.log(x))),
|
| 288 |
+
('_sum_cos_2x', [1.0, 2.0], [1.0, -2
|
| 289 |
+
], lambda x: jnp.sum(jnp.cos(2 * x))),
|
| 290 |
+
('_cos_sum_2x', [1.0, 2.0], [1.0, -2
|
| 291 |
+
], lambda x: jnp.cos(jnp.sum(2 * x))),
|
| 292 |
+
],
|
| 293 |
+
[
|
| 294 |
+
('coupling', True),
|
| 295 |
+
('nocoupling', False),
|
| 296 |
+
],
|
| 297 |
+
named=True))
|
| 298 |
+
def testNonPolynomialFunctionConsistencyWithPathwise(self, effective_mean,
|
| 299 |
+
effective_log_scale,
|
| 300 |
+
function, coupling):
|
| 301 |
+
num_samples = 10**5
|
| 302 |
+
rng = jax.random.PRNGKey(1)
|
| 303 |
+
measure_rng, pathwise_rng = jax.random.split(rng)
|
| 304 |
+
|
| 305 |
+
mean = jnp.array(effective_mean, dtype=jnp.float32)
|
| 306 |
+
log_scale = jnp.array(effective_log_scale, dtype=jnp.float32)
|
| 307 |
+
data_dims = len(effective_mean)
|
| 308 |
+
|
| 309 |
+
measure_valued_jacobians = _measure_valued_variant(self.variant)(
|
| 310 |
+
function, [mean, log_scale],
|
| 311 |
+
utils.multi_normal, measure_rng, num_samples, coupling)
|
| 312 |
+
|
| 313 |
+
measure_valued_mean_jacobians = measure_valued_jacobians[0]
|
| 314 |
+
chex.assert_shape(measure_valued_mean_jacobians, (num_samples, data_dims))
|
| 315 |
+
measure_valued_mean_grads = np.mean(measure_valued_mean_jacobians, axis=0)
|
| 316 |
+
|
| 317 |
+
measure_valued_log_scale_jacobians = measure_valued_jacobians[1]
|
| 318 |
+
chex.assert_shape(
|
| 319 |
+
measure_valued_log_scale_jacobians, (num_samples, data_dims))
|
| 320 |
+
measure_valued_log_scale_grads = np.mean(
|
| 321 |
+
measure_valued_log_scale_jacobians, axis=0)
|
| 322 |
+
|
| 323 |
+
pathwise_jacobians = _estimator_variant(
|
| 324 |
+
self.variant, sge.pathwise_jacobians)(function, [mean, log_scale],
|
| 325 |
+
utils.multi_normal, pathwise_rng,
|
| 326 |
+
num_samples)
|
| 327 |
+
|
| 328 |
+
pathwise_mean_jacobians = pathwise_jacobians[0]
|
| 329 |
+
chex.assert_shape(pathwise_mean_jacobians, (num_samples, data_dims))
|
| 330 |
+
pathwise_mean_grads = np.mean(pathwise_mean_jacobians, axis=0)
|
| 331 |
+
|
| 332 |
+
pathwise_log_scale_jacobians = pathwise_jacobians[1]
|
| 333 |
+
chex.assert_shape(pathwise_log_scale_jacobians, (num_samples, data_dims))
|
| 334 |
+
pathwise_log_scale_grads = np.mean(pathwise_log_scale_jacobians, axis=0)
|
| 335 |
+
|
| 336 |
+
_assert_equal(
|
| 337 |
+
pathwise_mean_grads, measure_valued_mean_grads, rtol=5e-1, atol=1e-1)
|
| 338 |
+
_assert_equal(
|
| 339 |
+
pathwise_log_scale_grads, measure_valued_log_scale_grads,
|
| 340 |
+
rtol=5e-1, atol=1e-1)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class MeasuredValuedEstimatorsTest(chex.TestCase):
|
| 344 |
+
|
| 345 |
+
@chex.all_variants
|
| 346 |
+
@parameterized.parameters([True, False])
|
| 347 |
+
def testRaisesErrorForNonGaussian(self, coupling):
|
| 348 |
+
num_samples = 10**5
|
| 349 |
+
rng = jax.random.PRNGKey(1)
|
| 350 |
+
|
| 351 |
+
function = lambda x: jnp.sum(x) ** 2
|
| 352 |
+
|
| 353 |
+
mean = jnp.array(0, dtype=jnp.float32)
|
| 354 |
+
log_scale = jnp.array(0., dtype=jnp.float32)
|
| 355 |
+
|
| 356 |
+
class TestDist():
|
| 357 |
+
|
| 358 |
+
def __init__(self, params):
|
| 359 |
+
self._params = params
|
| 360 |
+
|
| 361 |
+
def sample(self, n):
|
| 362 |
+
return np.zeros(n)
|
| 363 |
+
|
| 364 |
+
with self.assertRaises(ValueError):
|
| 365 |
+
_measure_valued_variant(self.variant)(
|
| 366 |
+
function, [mean, log_scale],
|
| 367 |
+
TestDist, rng, num_samples, coupling)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
if __name__ == '__main__':
|
| 371 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/optax_test.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
|
| 19 |
+
import optax
|
| 20 |
+
from optax import transforms
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class OptaxTest(absltest.TestCase):
|
| 24 |
+
"""Test optax can be imported correctly."""
|
| 25 |
+
|
| 26 |
+
def test_import(self):
|
| 27 |
+
self.assertTrue(hasattr(optax, 'GradientTransformation'))
|
| 28 |
+
self.assertTrue(hasattr(transforms, 'partition'))
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
if __name__ == '__main__':
|
| 32 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/projections/__init__.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""The projections sub-package."""
|
| 17 |
+
|
| 18 |
+
from optax.projections._projections import projection_box
|
| 19 |
+
from optax.projections._projections import projection_hypercube
|
| 20 |
+
from optax.projections._projections import projection_non_negative
|
| 21 |
+
from optax.projections._projections import projection_simplex
|
testbed/google-deepmind__optax/optax/projections/_projections.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""Euclidean projections."""
|
| 17 |
+
|
| 18 |
+
from typing import Any
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
|
| 22 |
+
import jax
|
| 23 |
+
from jax import flatten_util
|
| 24 |
+
from jax import tree_util as jtu
|
| 25 |
+
import jax.numpy as jnp
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def projection_non_negative(pytree: Any) -> Any:
|
| 29 |
+
r"""Projection onto the non-negative orthant.
|
| 30 |
+
|
| 31 |
+
.. math::
|
| 32 |
+
|
| 33 |
+
\underset{p}{\text{argmin}} ~ ||x - p||_2^2 \quad
|
| 34 |
+
\textrm{subject to} \quad p \ge 0
|
| 35 |
+
|
| 36 |
+
where :math:`x` is the input pytree.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
pytree: pytree to project.
|
| 40 |
+
Returns:
|
| 41 |
+
projected pytree, with the same structure as ``pytree``.
|
| 42 |
+
"""
|
| 43 |
+
return jtu.tree_map(jax.nn.relu, pytree)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _clip_safe(leaf, lower, upper):
|
| 47 |
+
return jnp.clip(jnp.asarray(leaf), lower, upper)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def projection_box(pytree: Any, lower: Any, upper: Any) -> Any:
|
| 51 |
+
r"""Projection onto box constraints.
|
| 52 |
+
|
| 53 |
+
.. math::
|
| 54 |
+
|
| 55 |
+
\underset{p}{\text{argmin}} ~ ||x - p||_2^2 \quad \textrm{subject to} \quad
|
| 56 |
+
\text{lower} \le p \le \text{upper}
|
| 57 |
+
|
| 58 |
+
where :math:`x` is the input pytree.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
pytree: pytree to project.
|
| 62 |
+
lower: lower bound, a scalar or pytree with the same structure as
|
| 63 |
+
``pytree``.
|
| 64 |
+
upper: upper bound, a scalar or pytree with the same structure as
|
| 65 |
+
``pytree``.
|
| 66 |
+
Returns:
|
| 67 |
+
projected pytree, with the same structure as ``pytree``.
|
| 68 |
+
"""
|
| 69 |
+
return jtu.tree_map(_clip_safe, pytree, lower, upper)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def projection_hypercube(pytree: Any, scale: Any = 1.0) -> Any:
|
| 73 |
+
r"""Projection onto the (unit) hypercube.
|
| 74 |
+
|
| 75 |
+
.. math::
|
| 76 |
+
|
| 77 |
+
\underset{p}{\text{argmin}} ~ ||x - p||_2^2 \quad \textrm{subject to} \quad
|
| 78 |
+
0 \le p \le \text{scale}
|
| 79 |
+
|
| 80 |
+
where :math:`x` is the input pytree.
|
| 81 |
+
|
| 82 |
+
By default, we project to the unit hypercube (`scale=1.0`).
|
| 83 |
+
|
| 84 |
+
This is a convenience wrapper around
|
| 85 |
+
:func:`projection_box <optax.projections.projection_box>`.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
pytree: pytree to project.
|
| 89 |
+
scale: scale of the hypercube, a scalar or a pytree (default: 1.0).
|
| 90 |
+
Returns:
|
| 91 |
+
projected pytree, with the same structure as ``pytree``.
|
| 92 |
+
"""
|
| 93 |
+
return projection_box(pytree, lower=0.0, upper=scale)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@jax.custom_jvp
|
| 97 |
+
def _projection_unit_simplex(values: chex.Array) -> chex.Array:
|
| 98 |
+
"""Projection onto the unit simplex."""
|
| 99 |
+
s = 1.0
|
| 100 |
+
n_features = values.shape[0]
|
| 101 |
+
u = jnp.sort(values)[::-1]
|
| 102 |
+
cumsum_u = jnp.cumsum(u)
|
| 103 |
+
ind = jnp.arange(n_features) + 1
|
| 104 |
+
cond = s / ind + (u - cumsum_u / ind) > 0
|
| 105 |
+
idx = jnp.count_nonzero(cond)
|
| 106 |
+
return jax.nn.relu(s / idx + (values - cumsum_u[idx - 1] / idx))
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@_projection_unit_simplex.defjvp
|
| 110 |
+
def _projection_unit_simplex_jvp(
|
| 111 |
+
primals: list[chex.Array], tangents: list[chex.Array]
|
| 112 |
+
) -> tuple[chex.Array, chex.Array]:
|
| 113 |
+
values, = primals
|
| 114 |
+
values_dot, = tangents
|
| 115 |
+
primal_out = _projection_unit_simplex(values)
|
| 116 |
+
supp = primal_out > 0
|
| 117 |
+
card = jnp.count_nonzero(supp)
|
| 118 |
+
tangent_out = supp * values_dot - (jnp.dot(supp, values_dot) / card) * supp
|
| 119 |
+
return primal_out, tangent_out
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def projection_simplex(pytree: Any,
|
| 123 |
+
scale: chex.Numeric = 1.0) -> Any:
|
| 124 |
+
r"""Projection onto a simplex.
|
| 125 |
+
|
| 126 |
+
This function solves the following constrained optimization problem,
|
| 127 |
+
where ``p`` is the input pytree.
|
| 128 |
+
|
| 129 |
+
.. math::
|
| 130 |
+
|
| 131 |
+
\underset{p}{\text{argmin}} ~ ||x - p||_2^2 \quad \textrm{subject to} \quad
|
| 132 |
+
p \ge 0, p^\top 1 = \text{scale}
|
| 133 |
+
|
| 134 |
+
By default, the projection is onto the probability simplex (unit simplex).
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
pytree: pytree to project.
|
| 138 |
+
scale: value the projected pytree should sum to (default: 1.0).
|
| 139 |
+
Returns:
|
| 140 |
+
projected pytree, a pytree with the same structure as ``pytree``.
|
| 141 |
+
|
| 142 |
+
.. versionadded:: 0.2.3
|
| 143 |
+
|
| 144 |
+
Example:
|
| 145 |
+
|
| 146 |
+
Here is an example using a pytree::
|
| 147 |
+
|
| 148 |
+
>>> import jax.numpy as jnp
|
| 149 |
+
>>> from optax import tree_utils, projections
|
| 150 |
+
>>> pytree = {"w": jnp.array([2.5, 3.2]), "b": 0.5}
|
| 151 |
+
>>> tree_utils.tree_sum(pytree)
|
| 152 |
+
6.2
|
| 153 |
+
>>> new_pytree = projections.projection_simplex(pytree)
|
| 154 |
+
>>> tree_utils.tree_sum(new_pytree)
|
| 155 |
+
1.0000002
|
| 156 |
+
"""
|
| 157 |
+
if scale is None:
|
| 158 |
+
scale = 1.0
|
| 159 |
+
|
| 160 |
+
values, unravel_fn = flatten_util.ravel_pytree(pytree)
|
| 161 |
+
new_values = scale * _projection_unit_simplex(values / scale)
|
| 162 |
+
|
| 163 |
+
return unravel_fn(new_values)
|
testbed/google-deepmind__optax/optax/projections/_projections_test.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""Tests for optax.projections."""
|
| 17 |
+
|
| 18 |
+
from absl.testing import absltest
|
| 19 |
+
from absl.testing import parameterized
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
import numpy as np
|
| 24 |
+
from optax import projections as proj
|
| 25 |
+
import optax.tree_utils as otu
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def projection_simplex_jacobian(projection):
|
| 29 |
+
"""Theoretical expression for the Jacobian of projection_simplex."""
|
| 30 |
+
support = (projection > 0).astype(jnp.int32)
|
| 31 |
+
cardinality = jnp.count_nonzero(support)
|
| 32 |
+
return jnp.diag(support) - jnp.outer(support, support) / cardinality
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class ProjectionsTest(parameterized.TestCase):
|
| 36 |
+
|
| 37 |
+
def test_projection_non_negative(self):
|
| 38 |
+
with self.subTest('with an array'):
|
| 39 |
+
x = jnp.array([-1.0, 2.0, 3.0])
|
| 40 |
+
expected = jnp.array([0, 2.0, 3.0])
|
| 41 |
+
np.testing.assert_array_equal(proj.projection_non_negative(x), expected)
|
| 42 |
+
|
| 43 |
+
with self.subTest('with a tuple'):
|
| 44 |
+
np.testing.assert_array_equal(
|
| 45 |
+
proj.projection_non_negative((x, x)), (expected, expected)
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
with self.subTest('with nested pytree'):
|
| 49 |
+
tree_x = (-1.0, {'k1': 1.0, 'k2': (1.0, 1.0)}, 1.0)
|
| 50 |
+
tree_expected = (0.0, {'k1': 1.0, 'k2': (1.0, 1.0)}, 1.0)
|
| 51 |
+
chex.assert_trees_all_equal(
|
| 52 |
+
proj.projection_non_negative(tree_x), tree_expected
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def test_projection_box(self):
|
| 56 |
+
with self.subTest('lower and upper are scalars'):
|
| 57 |
+
lower, upper = 0.0, 2.0
|
| 58 |
+
x = jnp.array([-1.0, 2.0, 3.0])
|
| 59 |
+
expected = jnp.array([0, 2.0, 2.0])
|
| 60 |
+
np.testing.assert_array_equal(
|
| 61 |
+
proj.projection_box(x, lower, upper), expected
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
with self.subTest('lower and upper values are arrays'):
|
| 65 |
+
lower_arr = jnp.ones(len(x)) * lower
|
| 66 |
+
upper_arr = jnp.ones(len(x)) * upper
|
| 67 |
+
np.testing.assert_array_equal(
|
| 68 |
+
proj.projection_box(x, lower_arr, upper_arr), expected
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
with self.subTest('lower and upper are tuples of arrays'):
|
| 72 |
+
lower_tuple = (lower, lower)
|
| 73 |
+
upper_tuple = (upper, upper)
|
| 74 |
+
chex.assert_trees_all_equal(
|
| 75 |
+
proj.projection_box((x, x), lower_tuple, upper_tuple),
|
| 76 |
+
(expected, expected),
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
with self.subTest('lower and upper are pytrees'):
|
| 80 |
+
tree = (-1.0, {'k1': 2.0, 'k2': (2.0, 3.0)}, 3.0)
|
| 81 |
+
expected = (0.0, {'k1': 2.0, 'k2': (2.0, 2.0)}, 2.0)
|
| 82 |
+
lower_tree = (0.0, {'k1': 0.0, 'k2': (0.0, 0.0)}, 0.0)
|
| 83 |
+
upper_tree = (2.0, {'k1': 2.0, 'k2': (2.0, 2.0)}, 2.0)
|
| 84 |
+
chex.assert_trees_all_equal(
|
| 85 |
+
proj.projection_box(tree, lower_tree, upper_tree), expected
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
def test_projection_hypercube(self):
|
| 89 |
+
x = jnp.array([-1.0, 2.0, 0.5])
|
| 90 |
+
|
| 91 |
+
with self.subTest('with default scale'):
|
| 92 |
+
expected = jnp.array([0, 1.0, 0.5])
|
| 93 |
+
np.testing.assert_array_equal(proj.projection_hypercube(x), expected)
|
| 94 |
+
|
| 95 |
+
with self.subTest('with scalar scale'):
|
| 96 |
+
expected = jnp.array([0, 0.8, 0.5])
|
| 97 |
+
np.testing.assert_array_equal(proj.projection_hypercube(x, 0.8), expected)
|
| 98 |
+
|
| 99 |
+
with self.subTest('with array scales'):
|
| 100 |
+
scales = jnp.ones(len(x)) * 0.8
|
| 101 |
+
np.testing.assert_array_equal(
|
| 102 |
+
proj.projection_hypercube(x, scales), expected
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
@parameterized.parameters(1.0, 0.8)
|
| 106 |
+
def test_projection_simplex_array(self, scale):
|
| 107 |
+
rng = np.random.RandomState(0)
|
| 108 |
+
x = rng.randn(50).astype(np.float32)
|
| 109 |
+
p = proj.projection_simplex(x, scale)
|
| 110 |
+
|
| 111 |
+
np.testing.assert_almost_equal(jnp.sum(p), scale, decimal=4)
|
| 112 |
+
self.assertTrue(jnp.all(0 <= p))
|
| 113 |
+
self.assertTrue(jnp.all(p <= scale))
|
| 114 |
+
|
| 115 |
+
@parameterized.parameters(1.0, 0.8)
|
| 116 |
+
def test_projection_simplex_pytree(self, scale):
|
| 117 |
+
pytree = {'w': jnp.array([2.5, 3.2]), 'b': 0.5}
|
| 118 |
+
new_pytree = proj.projection_simplex(pytree, scale)
|
| 119 |
+
np.testing.assert_almost_equal(otu.tree_sum(new_pytree), scale, decimal=4)
|
| 120 |
+
|
| 121 |
+
@parameterized.parameters(1.0, 0.8)
|
| 122 |
+
def test_projection_simplex_edge_case(self, scale):
|
| 123 |
+
p = proj.projection_simplex(jnp.array([0.0, 0.0, -jnp.inf]), scale)
|
| 124 |
+
np.testing.assert_array_almost_equal(
|
| 125 |
+
p, jnp.array([scale / 2, scale / 2, 0.0])
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def test_projection_simplex_jacobian(self):
|
| 129 |
+
rng = np.random.RandomState(0)
|
| 130 |
+
|
| 131 |
+
x = rng.rand(5).astype(np.float32)
|
| 132 |
+
v = rng.randn(5).astype(np.float32)
|
| 133 |
+
|
| 134 |
+
jac_rev = jax.jacrev(proj.projection_simplex)(x)
|
| 135 |
+
jac_fwd = jax.jacfwd(proj.projection_simplex)(x)
|
| 136 |
+
|
| 137 |
+
with self.subTest('Check against theoretical expression'):
|
| 138 |
+
p = proj.projection_simplex(x)
|
| 139 |
+
jac_true = projection_simplex_jacobian(p)
|
| 140 |
+
|
| 141 |
+
np.testing.assert_array_almost_equal(jac_true, jac_fwd)
|
| 142 |
+
np.testing.assert_array_almost_equal(jac_true, jac_rev)
|
| 143 |
+
|
| 144 |
+
with self.subTest('Check against finite difference'):
|
| 145 |
+
jvp = jax.jvp(proj.projection_simplex, (x,), (v,))[1]
|
| 146 |
+
eps = 1e-4
|
| 147 |
+
jvp_finite_diff = (proj.projection_simplex(x + eps * v) -
|
| 148 |
+
proj.projection_simplex(x - eps * v)) / (2 * eps)
|
| 149 |
+
np.testing.assert_array_almost_equal(jvp, jvp_finite_diff, decimal=3)
|
| 150 |
+
|
| 151 |
+
with self.subTest('Check vector-Jacobian product'):
|
| 152 |
+
(vjp,) = jax.vjp(proj.projection_simplex, x)[1](v)
|
| 153 |
+
np.testing.assert_array_almost_equal(vjp, jnp.dot(v, jac_true))
|
| 154 |
+
|
| 155 |
+
with self.subTest('Check Jacobian-vector product'):
|
| 156 |
+
jvp = jax.jvp(proj.projection_simplex, (x,), (v,))[1]
|
| 157 |
+
np.testing.assert_array_almost_equal(jvp, jnp.dot(jac_true, v))
|
| 158 |
+
|
| 159 |
+
@parameterized.parameters(1.0, 0.8)
|
| 160 |
+
def test_projection_simplex_vmap(self, scale):
|
| 161 |
+
rng = np.random.RandomState(0)
|
| 162 |
+
x = rng.randn(3, 50).astype(np.float32)
|
| 163 |
+
scales = jnp.full(len(x), scale)
|
| 164 |
+
|
| 165 |
+
p = jax.vmap(proj.projection_simplex)(x, scales)
|
| 166 |
+
np.testing.assert_array_almost_equal(jnp.sum(p, axis=1), scales)
|
| 167 |
+
np.testing.assert_array_equal(True, 0 <= p)
|
| 168 |
+
np.testing.assert_array_equal(True, p <= scale)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
if __name__ == '__main__':
|
| 172 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/schedules/_inject_test.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `inject.py`."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
from typing import NamedTuple
|
| 19 |
+
|
| 20 |
+
from absl.testing import absltest
|
| 21 |
+
from absl.testing import parameterized
|
| 22 |
+
import chex
|
| 23 |
+
import jax
|
| 24 |
+
import jax.numpy as jnp
|
| 25 |
+
import numpy as np
|
| 26 |
+
from optax._src import base
|
| 27 |
+
from optax._src import clipping
|
| 28 |
+
from optax._src import transform
|
| 29 |
+
from optax._src import wrappers
|
| 30 |
+
from optax.schedules import _inject
|
| 31 |
+
from optax.schedules import _schedule
|
| 32 |
+
from optax.tree_utils import _state_utils
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class ExampleState(NamedTuple):
|
| 36 |
+
total: chex.Numeric
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ExampleStatefulSchedule(base.StatefulSchedule):
|
| 40 |
+
|
| 41 |
+
def init(self) -> ExampleState:
|
| 42 |
+
return ExampleState(total=jnp.zeros([], dtype=jnp.int32))
|
| 43 |
+
|
| 44 |
+
def update(self, state: ExampleState, **extra_args) -> ExampleState:
|
| 45 |
+
total = state.total + extra_args['addendum']
|
| 46 |
+
return ExampleState(total=total)
|
| 47 |
+
|
| 48 |
+
def __call__(self, state: ExampleState, **extra_args) -> chex.Numeric:
|
| 49 |
+
return state.total
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class InjectHyperparamsTest(chex.TestCase):
|
| 53 |
+
"""Tests for the inject_hyperparams wrapper."""
|
| 54 |
+
|
| 55 |
+
@chex.all_variants
|
| 56 |
+
def test_updates(self):
|
| 57 |
+
optim = _inject.inject_hyperparams(transform.scale)( # stateless
|
| 58 |
+
step_size=_schedule.piecewise_constant_schedule(
|
| 59 |
+
3.0, {1: 5, 7: 2, 12: 1.5}))
|
| 60 |
+
|
| 61 |
+
params = [jnp.zeros([], dtype=jnp.float32)]
|
| 62 |
+
state = self.variant(optim.init)(params)
|
| 63 |
+
|
| 64 |
+
# A no-op change, to verify that tree map works.
|
| 65 |
+
state = _state_utils.tree_map_params(optim, lambda v: v, state)
|
| 66 |
+
|
| 67 |
+
update_fn = self.variant(optim.update)
|
| 68 |
+
expected_step_size = [3.0]*2 + [15.0]*6 + [30.0]*5 + [45.0]*3
|
| 69 |
+
|
| 70 |
+
grads = [jnp.ones([], dtype=jnp.float32)]
|
| 71 |
+
for i in range(15):
|
| 72 |
+
updates, state = update_fn(grads, state, params=params)
|
| 73 |
+
np.testing.assert_almost_equal(updates[0], expected_step_size[i+1])
|
| 74 |
+
|
| 75 |
+
@chex.all_variants
|
| 76 |
+
def test_hyperparams_state(self):
|
| 77 |
+
optim = _inject.inject_hyperparams(transform.trace)( # stateful
|
| 78 |
+
decay=_schedule.piecewise_constant_schedule(
|
| 79 |
+
0.8, {3: 0.5, 9: 1.25}),
|
| 80 |
+
nesterov=True)
|
| 81 |
+
|
| 82 |
+
params = [jnp.zeros([2, 3]) for _ in range(3)]
|
| 83 |
+
state = self.variant(optim.init)(params)
|
| 84 |
+
update_fn = self.variant(optim.update)
|
| 85 |
+
|
| 86 |
+
expected_mom = [0.8]*4 + [0.4]*6 + [0.5]*2
|
| 87 |
+
grads = jax.tree_util.tree_map(jnp.ones_like, params)
|
| 88 |
+
for i in range(12):
|
| 89 |
+
np.testing.assert_almost_equal(state.hyperparams['decay'],
|
| 90 |
+
expected_mom[i])
|
| 91 |
+
_, state = update_fn(grads, state)
|
| 92 |
+
|
| 93 |
+
np.testing.assert_almost_equal(state.hyperparams['decay'],
|
| 94 |
+
expected_mom[-1])
|
| 95 |
+
|
| 96 |
+
@chex.all_variants
|
| 97 |
+
def test_constant_hyperparams(self):
|
| 98 |
+
optim = _inject.inject_hyperparams(transform.scale_by_adam)(b1=0., b2=0.)
|
| 99 |
+
|
| 100 |
+
params = [jnp.zeros([2, 3]) for _ in range(3)]
|
| 101 |
+
state = self.variant(optim.init)(params)
|
| 102 |
+
update_fn = self.variant(optim.update)
|
| 103 |
+
|
| 104 |
+
grads = jax.tree_util.tree_map(jnp.ones_like, params)
|
| 105 |
+
for _ in range(5):
|
| 106 |
+
updates, state = update_fn(grads, state, params)
|
| 107 |
+
np.testing.assert_almost_equal(state.hyperparams['b1'], 0.0)
|
| 108 |
+
np.testing.assert_almost_equal(state.hyperparams['b2'], 0.0)
|
| 109 |
+
np.testing.assert_almost_equal(state.hyperparams['eps'], 1e-8)
|
| 110 |
+
np.testing.assert_almost_equal(state.hyperparams['eps_root'], 0.0)
|
| 111 |
+
assert 'eps' in state.hyperparams
|
| 112 |
+
chex.assert_trees_all_close(updates, grads)
|
| 113 |
+
|
| 114 |
+
@chex.all_variants
|
| 115 |
+
def test_overriding_hyperparam(self):
|
| 116 |
+
optim = _inject.inject_hyperparams(clipping.clip_by_global_norm)(0.1)
|
| 117 |
+
params = jnp.zeros((3, 5, 7))
|
| 118 |
+
state = self.variant(optim.init)(params)
|
| 119 |
+
update_fn = self.variant(optim.update)
|
| 120 |
+
|
| 121 |
+
grads = jnp.ones_like(params)
|
| 122 |
+
for i in range(5):
|
| 123 |
+
state.hyperparams['max_norm'] = i
|
| 124 |
+
updates, state = update_fn(grads, state)
|
| 125 |
+
assert np.isclose(jnp.linalg.norm(updates.ravel()), i)
|
| 126 |
+
|
| 127 |
+
@chex.all_variants
|
| 128 |
+
@parameterized.named_parameters(('string', 'mask'), ('list', ['mask']))
|
| 129 |
+
def test_static_args(self, static_args):
|
| 130 |
+
@functools.partial(_inject.inject_hyperparams, static_args=static_args)
|
| 131 |
+
def custom_optim(learning_rate, mask):
|
| 132 |
+
return wrappers.masked(transform.scale(-learning_rate), mask)
|
| 133 |
+
|
| 134 |
+
optim = custom_optim(
|
| 135 |
+
0.1, functools.partial(jax.tree_util.tree_map, lambda x: x.ndim > 1))
|
| 136 |
+
params = [jnp.ones((1, 2)), jnp.ones(2), jnp.ones((1, 1, 1))]
|
| 137 |
+
grads = params
|
| 138 |
+
state = self.variant(optim.init)(params)
|
| 139 |
+
updates, state = self.variant(optim.update)(grads, state)
|
| 140 |
+
expected_updates = jax.tree_util.tree_map(
|
| 141 |
+
lambda x: -0.1 * x if x.ndim > 1 else x, grads)
|
| 142 |
+
|
| 143 |
+
assert set(state.hyperparams.keys()) == {'learning_rate'}, state.hyperparams
|
| 144 |
+
chex.assert_trees_all_close(updates, expected_updates)
|
| 145 |
+
|
| 146 |
+
@chex.all_variants
|
| 147 |
+
@parameterized.named_parameters(('one_arg', 'b1'), ('two_arg', ['b1', 'b2']))
|
| 148 |
+
def test_numeric_static_args(self, static_args):
|
| 149 |
+
optim = _inject.inject_hyperparams(
|
| 150 |
+
transform.scale_by_adam, static_args=static_args)(b1=0.9, b2=0.95)
|
| 151 |
+
|
| 152 |
+
params = [jnp.ones((1, 2)), jnp.ones(2), jnp.ones((1, 1, 1))]
|
| 153 |
+
grads = params
|
| 154 |
+
state = self.variant(optim.init)(params)
|
| 155 |
+
_, state = self.variant(optim.update)(grads, state)
|
| 156 |
+
|
| 157 |
+
assert not set(state.hyperparams.keys()).intersection(set(static_args))
|
| 158 |
+
|
| 159 |
+
@chex.all_variants
|
| 160 |
+
@parameterized.named_parameters(
|
| 161 |
+
('bf16hyp f32param bf16grad', jnp.bfloat16, jnp.float32, jnp.bfloat16),
|
| 162 |
+
('bf16hyp f32param f32_grads', jnp.bfloat16, jnp.float32, jnp.float32),
|
| 163 |
+
('f32hyp bf16param bf16grad', jnp.float32, jnp.bfloat16, jnp.bfloat16),
|
| 164 |
+
('f32hyp f32param bf16grad', jnp.float32, jnp.float32, jnp.bfloat16),
|
| 165 |
+
('f32hyp bf16param f32grad', jnp.float32, jnp.bfloat16, jnp.float32),
|
| 166 |
+
)
|
| 167 |
+
def test_hyperparam_dtypes(self,
|
| 168 |
+
hyperparam_dtype,
|
| 169 |
+
param_dtype,
|
| 170 |
+
grad_dtype):
|
| 171 |
+
"""Tests that hyperparam dtype override works as desired."""
|
| 172 |
+
optim = _inject.inject_hyperparams(
|
| 173 |
+
transform.scale_by_adam,
|
| 174 |
+
hyperparam_dtype=hyperparam_dtype)(b1=0.9, b2=0.95)
|
| 175 |
+
|
| 176 |
+
params = [jnp.ones((1, 2), dtype=param_dtype),
|
| 177 |
+
jnp.ones(2, dtype=param_dtype),
|
| 178 |
+
jnp.ones((1, 1, 1), dtype=param_dtype)]
|
| 179 |
+
grads = jax.tree_util.tree_map(lambda x: x.astype(grad_dtype), params)
|
| 180 |
+
state = self.variant(optim.init)(params)
|
| 181 |
+
# Check that the hyperparams are overridden
|
| 182 |
+
self.assertEqual(state.hyperparams['b1'].dtype, hyperparam_dtype)
|
| 183 |
+
self.assertEqual(state.hyperparams['b2'].dtype, hyperparam_dtype)
|
| 184 |
+
|
| 185 |
+
_, state = self.variant(optim.update)(grads, state)
|
| 186 |
+
|
| 187 |
+
self.assertEqual(state.hyperparams['b1'].dtype, hyperparam_dtype)
|
| 188 |
+
self.assertEqual(state.hyperparams['b2'].dtype, hyperparam_dtype)
|
| 189 |
+
|
| 190 |
+
@parameterized.named_parameters(('string', 'lr'), ('list', ['lr']))
|
| 191 |
+
def test_static_args_error(self, static_args):
|
| 192 |
+
with self.assertRaises(ValueError):
|
| 193 |
+
_inject.inject_hyperparams(transform.scale, static_args=static_args)
|
| 194 |
+
|
| 195 |
+
@chex.all_variants
|
| 196 |
+
def test_inject_hyperparams_starts_with_step_count_zero(self):
|
| 197 |
+
"""Checks that inject_hyperparams uses step count 0 in the first update."""
|
| 198 |
+
# See also: https://github.com/deepmind/optax/issues/415.
|
| 199 |
+
opt = _inject.inject_hyperparams(transform.scale)(lambda count: count)
|
| 200 |
+
params = jnp.zeros(3)
|
| 201 |
+
grads = jnp.array([-1, 0, 1])
|
| 202 |
+
updates, _ = self.variant(opt.update)(grads, opt.init(params))
|
| 203 |
+
np.testing.assert_array_equal(updates, np.zeros(3))
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class StatefulTest(chex.TestCase):
|
| 207 |
+
|
| 208 |
+
def test_wrap_stateless_schedule(self):
|
| 209 |
+
my_schedule = _schedule.linear_schedule(1., 1., 10)
|
| 210 |
+
my_wrapped_schedule = _inject.WrappedSchedule(my_schedule)
|
| 211 |
+
|
| 212 |
+
count = jnp.zeros([], dtype=jnp.int32)
|
| 213 |
+
state = my_wrapped_schedule.init()
|
| 214 |
+
np.testing.assert_allclose(count, state, atol=0.0)
|
| 215 |
+
|
| 216 |
+
for _ in range(8):
|
| 217 |
+
np.testing.assert_allclose(
|
| 218 |
+
my_schedule(count), my_wrapped_schedule(state), atol=0.0)
|
| 219 |
+
count = count + 1
|
| 220 |
+
extra_args = dict(loss=jnp.ones([], dtype=jnp.float32))
|
| 221 |
+
state = my_wrapped_schedule.update(state, **extra_args)
|
| 222 |
+
np.testing.assert_allclose(count, state, atol=0.0)
|
| 223 |
+
|
| 224 |
+
@chex.all_variants
|
| 225 |
+
def test_inject_stateful_hyperparams(self):
|
| 226 |
+
grads = (
|
| 227 |
+
jnp.ones((3,), dtype=jnp.float32),
|
| 228 |
+
jnp.ones((2,), dtype=jnp.float32),)
|
| 229 |
+
params = grads
|
| 230 |
+
|
| 231 |
+
my_stateful_schedule = ExampleStatefulSchedule()
|
| 232 |
+
tx = _inject.inject_hyperparams(
|
| 233 |
+
transform.scale)(step_size=my_stateful_schedule)
|
| 234 |
+
state = self.variant(tx.init)(params)
|
| 235 |
+
|
| 236 |
+
extra_args = dict(addendum=0.3 * jnp.ones((), dtype=jnp.float32))
|
| 237 |
+
_, state = self.variant(tx.update)(
|
| 238 |
+
grads, state, params=params, **extra_args)
|
| 239 |
+
_, state = self.variant(tx.update)(
|
| 240 |
+
grads, state, params=params, **extra_args)
|
| 241 |
+
|
| 242 |
+
lr = state.hyperparams['step_size']
|
| 243 |
+
total = state.hyperparams_states['step_size']
|
| 244 |
+
|
| 245 |
+
np.testing.assert_allclose(lr, extra_args['addendum'], atol=0.0)
|
| 246 |
+
np.testing.assert_allclose(total, 2 * extra_args['addendum'], atol=0.0)
|
| 247 |
+
|
| 248 |
+
if __name__ == '__main__':
|
| 249 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/schedules/_join.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utilities to join schedules."""
|
| 16 |
+
|
| 17 |
+
from typing import Sequence
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
import jax.numpy as jnp
|
| 21 |
+
|
| 22 |
+
from optax._src import base
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def join_schedules(
|
| 26 |
+
schedules: Sequence[base.Schedule],
|
| 27 |
+
boundaries: Sequence[int]
|
| 28 |
+
) -> base.Schedule:
|
| 29 |
+
"""Sequentially apply multiple schedules.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
schedules: A list of callables (expected to be optax schedules). Each
|
| 33 |
+
schedule will receive a step count indicating the number of steps since
|
| 34 |
+
the previous boundary transition.
|
| 35 |
+
boundaries: A list of integers (of length one less than schedules) that
|
| 36 |
+
indicate when to transition between schedules.
|
| 37 |
+
Returns:
|
| 38 |
+
schedule: A function that maps step counts to values.
|
| 39 |
+
"""
|
| 40 |
+
def schedule(step: chex.Numeric) -> chex.Numeric:
|
| 41 |
+
output = schedules[0](step)
|
| 42 |
+
for boundary, schedule in zip(boundaries, schedules[1:]):
|
| 43 |
+
output = jnp.where(step < boundary, output, schedule(step - boundary))
|
| 44 |
+
return output
|
| 45 |
+
return schedule
|
testbed/google-deepmind__optax/optax/schedules/_join_test.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `join.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from optax.schedules import _join
|
| 22 |
+
from optax.schedules import _schedule
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class JoinTest(absltest.TestCase):
|
| 26 |
+
|
| 27 |
+
def test_join_schedules(self):
|
| 28 |
+
my_schedule = _join.join_schedules(
|
| 29 |
+
schedules=[
|
| 30 |
+
_schedule.constant_schedule(1.),
|
| 31 |
+
_schedule.constant_schedule(2.),
|
| 32 |
+
_schedule.constant_schedule(1.)],
|
| 33 |
+
boundaries=[3, 6])
|
| 34 |
+
np.testing.assert_allclose(1., my_schedule(0), atol=0.0)
|
| 35 |
+
np.testing.assert_allclose(1., my_schedule(1), atol=0.0)
|
| 36 |
+
np.testing.assert_allclose(1., my_schedule(2), atol=0.0)
|
| 37 |
+
np.testing.assert_allclose(2., my_schedule(3), atol=0.0)
|
| 38 |
+
np.testing.assert_allclose(2., my_schedule(4), atol=0.0)
|
| 39 |
+
np.testing.assert_allclose(2., my_schedule(5), atol=0.0)
|
| 40 |
+
np.testing.assert_allclose(1., my_schedule(6), atol=0.0)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
if __name__ == "__main__":
|
| 44 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/second_order/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""The second order optimisation sub-package."""
|
| 16 |
+
|
| 17 |
+
from optax.second_order._fisher import fisher_diag
|
| 18 |
+
from optax.second_order._hessian import hessian_diag
|
| 19 |
+
from optax.second_order._hessian import hvp
|
testbed/google-deepmind__optax/optax/second_order/_base.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Base types for the second order sub-package."""
|
| 16 |
+
|
| 17 |
+
import abc
|
| 18 |
+
from typing import Any, Protocol
|
| 19 |
+
|
| 20 |
+
import jax
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class LossFn(Protocol):
|
| 24 |
+
"""A loss function to be optimized."""
|
| 25 |
+
|
| 26 |
+
@abc.abstractmethod
|
| 27 |
+
def __call__(
|
| 28 |
+
self, params: Any, inputs: jax.Array, targets: jax.Array
|
| 29 |
+
) -> jax.Array:
|
| 30 |
+
...
|
testbed/google-deepmind__optax/optax/second_order/_hessian.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Functions for computing diagonals of the Hessian wrt to a set of parameters.
|
| 16 |
+
|
| 17 |
+
Computing the Hessian for neural networks is typically intractible due to the
|
| 18 |
+
quadratic memory requirements. Solving for the diagonal can be done cheaply,
|
| 19 |
+
with sub-quadratic memory requirements.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
from typing import Any
|
| 23 |
+
|
| 24 |
+
import jax
|
| 25 |
+
from jax import flatten_util
|
| 26 |
+
import jax.numpy as jnp
|
| 27 |
+
|
| 28 |
+
from optax.second_order import _base
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _ravel(p: Any) -> jax.Array:
|
| 32 |
+
return flatten_util.ravel_pytree(p)[0]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def hvp(
|
| 36 |
+
loss: _base.LossFn,
|
| 37 |
+
v: jax.Array,
|
| 38 |
+
params: Any,
|
| 39 |
+
inputs: jax.Array,
|
| 40 |
+
targets: jax.Array,
|
| 41 |
+
) -> jax.Array:
|
| 42 |
+
"""Performs an efficient vector-Hessian (of `loss`) product.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
loss: the loss function.
|
| 46 |
+
v: a vector of size `ravel(params)`.
|
| 47 |
+
params: model parameters.
|
| 48 |
+
inputs: inputs at which `loss` is evaluated.
|
| 49 |
+
targets: targets at which `loss` is evaluated.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
An Array corresponding to the product of `v` and the Hessian of `loss`
|
| 53 |
+
evaluated at `(params, inputs, targets)`.
|
| 54 |
+
"""
|
| 55 |
+
_, unravel_fn = flatten_util.ravel_pytree(params)
|
| 56 |
+
loss_fn = lambda p: loss(p, inputs, targets)
|
| 57 |
+
return jax.jvp(jax.grad(loss_fn), [params], [unravel_fn(v)])[1]
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def hessian_diag(
|
| 61 |
+
loss: _base.LossFn,
|
| 62 |
+
params: Any,
|
| 63 |
+
inputs: jax.Array,
|
| 64 |
+
targets: jax.Array,
|
| 65 |
+
) -> jax.Array:
|
| 66 |
+
"""Computes the diagonal hessian of `loss` at (`inputs`, `targets`).
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
loss: the loss function.
|
| 70 |
+
params: model parameters.
|
| 71 |
+
inputs: inputs at which `loss` is evaluated.
|
| 72 |
+
targets: targets at which `loss` is evaluated.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
A DeviceArray corresponding to the product to the Hessian of `loss`
|
| 76 |
+
evaluated at `(params, inputs, targets)`.
|
| 77 |
+
"""
|
| 78 |
+
vs = jnp.eye(_ravel(params).size)
|
| 79 |
+
comp = lambda v: jnp.vdot(v, _ravel(hvp(loss, v, params, inputs, targets)))
|
| 80 |
+
return jax.vmap(comp)(vs)
|
testbed/google-deepmind__optax/optax/second_order/_hessian_test.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `hessian.py`."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
|
| 19 |
+
from absl.testing import absltest
|
| 20 |
+
|
| 21 |
+
import chex
|
| 22 |
+
from flax import linen as nn
|
| 23 |
+
import jax
|
| 24 |
+
import jax.numpy as jnp
|
| 25 |
+
import numpy as np
|
| 26 |
+
|
| 27 |
+
from optax.second_order import _hessian
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
NUM_CLASSES = 2
|
| 31 |
+
NUM_SAMPLES = 3
|
| 32 |
+
NUM_FEATURES = 4
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class HessianTest(chex.TestCase):
|
| 36 |
+
|
| 37 |
+
def setUp(self):
|
| 38 |
+
super().setUp()
|
| 39 |
+
|
| 40 |
+
self.data = np.random.rand(NUM_SAMPLES, NUM_FEATURES)
|
| 41 |
+
self.labels = np.random.randint(NUM_CLASSES, size=NUM_SAMPLES)
|
| 42 |
+
|
| 43 |
+
class MLP(nn.Module):
|
| 44 |
+
"""A simple multilayer perceptron model for image classification."""
|
| 45 |
+
|
| 46 |
+
@nn.compact
|
| 47 |
+
def __call__(self, x):
|
| 48 |
+
# Flattens images in the batch.
|
| 49 |
+
x = x.reshape((x.shape[0], -1))
|
| 50 |
+
x = nn.Dense(features=5)(x)
|
| 51 |
+
x = nn.relu(x)
|
| 52 |
+
x = nn.Dense(features=NUM_CLASSES)(x)
|
| 53 |
+
return x
|
| 54 |
+
|
| 55 |
+
net = MLP()
|
| 56 |
+
self.parameters = net.init({'params': jax.random.PRNGKey(0)}, self.data)[
|
| 57 |
+
'params'
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
def loss(params, inputs, targets):
|
| 61 |
+
log_probs = net.apply({'params': params}, inputs)
|
| 62 |
+
return -jnp.mean(jax.nn.one_hot(targets, NUM_CLASSES) * log_probs)
|
| 63 |
+
|
| 64 |
+
self.loss_fn = loss
|
| 65 |
+
|
| 66 |
+
def jax_hessian_diag(loss_fun, params, inputs, targets):
|
| 67 |
+
"""This is the 'ground-truth' obtained via the JAX library."""
|
| 68 |
+
flat_params, unravel_fn = jax.flatten_util.ravel_pytree(params)
|
| 69 |
+
|
| 70 |
+
def flattened_loss(flat_params):
|
| 71 |
+
return loss_fun(unravel_fn(flat_params), inputs, targets)
|
| 72 |
+
|
| 73 |
+
flat_hessian = jax.hessian(flattened_loss)(flat_params)
|
| 74 |
+
return jnp.diag(flat_hessian)
|
| 75 |
+
|
| 76 |
+
self.hessian_diag = jax_hessian_diag(
|
| 77 |
+
self.loss_fn, self.parameters, self.data, self.labels)
|
| 78 |
+
|
| 79 |
+
@chex.all_variants
|
| 80 |
+
def test_hessian_diag(self):
|
| 81 |
+
hessian_diag_fn = self.variant(
|
| 82 |
+
functools.partial(_hessian.hessian_diag, self.loss_fn))
|
| 83 |
+
actual = hessian_diag_fn(self.parameters, self.data, self.labels)
|
| 84 |
+
np.testing.assert_array_almost_equal(self.hessian_diag, actual, 5)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
if __name__ == '__main__':
|
| 88 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/transforms/__init__.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""The transforms sub-package."""
|
| 16 |
+
|
| 17 |
+
from optax.transforms._accumulation import ema
|
| 18 |
+
from optax.transforms._accumulation import EmaState
|
| 19 |
+
from optax.transforms._accumulation import MultiSteps
|
| 20 |
+
from optax.transforms._accumulation import MultiStepsState
|
| 21 |
+
from optax.transforms._accumulation import ShouldSkipUpdateFunction
|
| 22 |
+
from optax.transforms._accumulation import skip_large_updates
|
| 23 |
+
from optax.transforms._accumulation import skip_not_finite
|
| 24 |
+
from optax.transforms._accumulation import trace
|
| 25 |
+
from optax.transforms._accumulation import TraceState
|
| 26 |
+
from optax.transforms._adding import add_decayed_weights
|
| 27 |
+
from optax.transforms._adding import add_noise
|
| 28 |
+
from optax.transforms._adding import AddNoiseState
|
| 29 |
+
from optax.transforms._clipping import adaptive_grad_clip
|
| 30 |
+
from optax.transforms._clipping import clip
|
| 31 |
+
from optax.transforms._clipping import clip_by_block_rms
|
| 32 |
+
from optax.transforms._clipping import clip_by_global_norm
|
| 33 |
+
from optax.transforms._clipping import per_example_global_norm_clip
|
| 34 |
+
from optax.transforms._clipping import per_example_layer_norm_clip
|
| 35 |
+
from optax.transforms._clipping import unitwise_clip
|
| 36 |
+
from optax.transforms._clipping import unitwise_norm
|
| 37 |
+
from optax.transforms._combining import chain
|
| 38 |
+
from optax.transforms._combining import named_chain
|
| 39 |
+
from optax.transforms._combining import partition
|
| 40 |
+
from optax.transforms._combining import PartitionState
|
| 41 |
+
from optax.transforms._conditionality import apply_if_finite
|
| 42 |
+
from optax.transforms._conditionality import ApplyIfFiniteState
|
| 43 |
+
from optax.transforms._conditionality import conditionally_mask
|
| 44 |
+
from optax.transforms._conditionality import conditionally_transform
|
| 45 |
+
from optax.transforms._conditionality import ConditionallyMaskState
|
| 46 |
+
from optax.transforms._conditionality import ConditionallyTransformState
|
| 47 |
+
from optax.transforms._conditionality import ConditionFn
|
| 48 |
+
from optax.transforms._constraining import keep_params_nonnegative
|
| 49 |
+
from optax.transforms._constraining import NonNegativeParamsState
|
| 50 |
+
from optax.transforms._constraining import zero_nans
|
| 51 |
+
from optax.transforms._constraining import ZeroNansState
|
| 52 |
+
from optax.transforms._layouts import flatten
|
| 53 |
+
from optax.transforms._masking import masked
|
| 54 |
+
from optax.transforms._masking import MaskedNode
|
| 55 |
+
from optax.transforms._masking import MaskedState
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
__all__ = (
|
| 59 |
+
"adaptive_grad_clip",
|
| 60 |
+
"add_decayed_weights",
|
| 61 |
+
"add_noise",
|
| 62 |
+
"AddNoiseState",
|
| 63 |
+
"apply_if_finite",
|
| 64 |
+
"ApplyIfFiniteState",
|
| 65 |
+
"chain",
|
| 66 |
+
"clip_by_block_rms",
|
| 67 |
+
"clip_by_global_norm",
|
| 68 |
+
"clip",
|
| 69 |
+
"conditionally_mask",
|
| 70 |
+
"ConditionallyMaskState",
|
| 71 |
+
"conditionally_transform",
|
| 72 |
+
"ConditionallyTransformState",
|
| 73 |
+
"ema",
|
| 74 |
+
"EmaState",
|
| 75 |
+
"flatten",
|
| 76 |
+
"keep_params_nonnegative",
|
| 77 |
+
"masked",
|
| 78 |
+
"MaskedState",
|
| 79 |
+
"MultiSteps",
|
| 80 |
+
"MultiStepsState",
|
| 81 |
+
"named_chain",
|
| 82 |
+
"NonNegativeParamsState",
|
| 83 |
+
"partition",
|
| 84 |
+
"PartitionState",
|
| 85 |
+
"ShouldSkipUpdateFunction",
|
| 86 |
+
"skip_large_updates",
|
| 87 |
+
"skip_not_finite",
|
| 88 |
+
"trace",
|
| 89 |
+
"TraceState",
|
| 90 |
+
"zero_nans",
|
| 91 |
+
"ZeroNansState",
|
| 92 |
+
)
|
testbed/google-deepmind__optax/optax/transforms/_accumulation.py
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Gradient transformations for accumulating gradients across updates."""
|
| 16 |
+
|
| 17 |
+
from typing import Any, Callable, NamedTuple, Optional, Protocol, Union
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
from jax import lax
|
| 21 |
+
from jax import tree_util as jtu
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax import tree_utils as otu
|
| 25 |
+
|
| 26 |
+
from optax._src import base
|
| 27 |
+
from optax._src import numerics
|
| 28 |
+
from optax._src import utils
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class TraceState(NamedTuple):
|
| 32 |
+
"""Holds an aggregation of past updates."""
|
| 33 |
+
trace: base.Params
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def trace(
|
| 37 |
+
decay: float,
|
| 38 |
+
nesterov: bool = False,
|
| 39 |
+
accumulator_dtype: Optional[Any] = None,
|
| 40 |
+
) -> base.GradientTransformation:
|
| 41 |
+
"""Compute a trace of past updates.
|
| 42 |
+
|
| 43 |
+
Note: `trace` and `ema` have very similar but distinct updates;
|
| 44 |
+
`trace = decay * trace + t`, while `ema = decay * ema + (1-decay) * t`.
|
| 45 |
+
Both are frequently found in the optimization literature.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
decay: Decay rate for the trace of past updates.
|
| 49 |
+
nesterov: Whether to use Nesterov momentum.
|
| 50 |
+
accumulator_dtype: Optional `dtype` to be used for the accumulator; if
|
| 51 |
+
`None` then the `dtype` is inferred from `params` and `updates`.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
A `GradientTransformation` object.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
accumulator_dtype = utils.canonicalize_dtype(accumulator_dtype)
|
| 58 |
+
|
| 59 |
+
def init_fn(params):
|
| 60 |
+
return TraceState(
|
| 61 |
+
trace=otu.tree_zeros_like(params, dtype=accumulator_dtype))
|
| 62 |
+
|
| 63 |
+
def update_fn(updates, state, params=None):
|
| 64 |
+
del params
|
| 65 |
+
f = lambda g, t: g + decay * t
|
| 66 |
+
new_trace = jtu.tree_map(f, updates, state.trace)
|
| 67 |
+
updates = jtu.tree_map(f, updates, new_trace) if nesterov else new_trace
|
| 68 |
+
new_trace = otu.tree_cast(new_trace, accumulator_dtype)
|
| 69 |
+
return updates, TraceState(trace=new_trace)
|
| 70 |
+
|
| 71 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class EmaState(NamedTuple):
|
| 75 |
+
"""Holds an exponential moving average of past updates."""
|
| 76 |
+
count: chex.Array # shape=(), dtype=jnp.int32.
|
| 77 |
+
ema: base.Params
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def ema(
|
| 81 |
+
decay: float,
|
| 82 |
+
debias: bool = True,
|
| 83 |
+
accumulator_dtype: Optional[Any] = None
|
| 84 |
+
) -> base.GradientTransformation:
|
| 85 |
+
"""Compute an exponential moving average of past updates.
|
| 86 |
+
|
| 87 |
+
Note: `trace` and `ema` have very similar but distinct updates;
|
| 88 |
+
`ema = decay * ema + (1-decay) * t`, while `trace = decay * trace + t`.
|
| 89 |
+
Both are frequently found in the optimization literature.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
decay: Decay rate for the exponential moving average.
|
| 93 |
+
debias: Whether to debias the transformed gradient.
|
| 94 |
+
accumulator_dtype: Optional `dtype` to used for the accumulator; if `None`
|
| 95 |
+
then the `dtype` is inferred from `params` and `updates`.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
A `GradientTransformation` object.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
accumulator_dtype = utils.canonicalize_dtype(accumulator_dtype)
|
| 102 |
+
|
| 103 |
+
def init_fn(params):
|
| 104 |
+
return EmaState(
|
| 105 |
+
count=jnp.zeros([], jnp.int32),
|
| 106 |
+
ema=otu.tree_zeros_like(params, dtype=accumulator_dtype))
|
| 107 |
+
|
| 108 |
+
def update_fn(updates, state, params=None):
|
| 109 |
+
del params
|
| 110 |
+
updates = new_ema = otu.tree_update_moment(
|
| 111 |
+
updates, state.ema, decay, order=1)
|
| 112 |
+
count_inc = utils.safe_int32_increment(state.count)
|
| 113 |
+
if debias:
|
| 114 |
+
updates = otu.tree_bias_correction(new_ema, decay, count_inc)
|
| 115 |
+
state_ema = otu.tree_cast(new_ema, accumulator_dtype)
|
| 116 |
+
return updates, EmaState(count=count_inc, ema=state_ema)
|
| 117 |
+
|
| 118 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class ShouldSkipUpdateFunction(Protocol):
|
| 122 |
+
|
| 123 |
+
def __call__(
|
| 124 |
+
self,
|
| 125 |
+
updates: base.Updates,
|
| 126 |
+
gradient_step: chex.Array,
|
| 127 |
+
params: Optional[base.Params]
|
| 128 |
+
) -> tuple[chex.Array, chex.ArrayTree]:
|
| 129 |
+
"""Returns true to indicate that updates should be skipped in a multi-step.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
updates: The updates that the gradient transformation has proposed.
|
| 133 |
+
gradient_step: The current gradient step (see
|
| 134 |
+
`MultiStepsState.gradient_step`). This can be used for example to reject
|
| 135 |
+
large gradients with an annealed maximum allowed gradient norm.
|
| 136 |
+
params: If known, the current params of the function being transformed.
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
A tuple:
|
| 140 |
+
* First element is an array with a single bool indicating whether or not
|
| 141 |
+
the updates should be applied.
|
| 142 |
+
* Second element is an arbitrary py-tree that will be stored in
|
| 143 |
+
`MultiStepsState.skip_state`. Debugging info can be put here.
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def skip_not_finite(
|
| 148 |
+
updates: base.Updates,
|
| 149 |
+
gradient_step: chex.Array,
|
| 150 |
+
params: Optional[base.Params]
|
| 151 |
+
) -> tuple[chex.Array, chex.ArrayTree]:
|
| 152 |
+
"""Returns True iff any of the `updates` contains an inf or a NaN.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
updates: see `ShouldSkipUpdateFunction`.
|
| 156 |
+
gradient_step: see `ShouldSkipUpdateFunction`.
|
| 157 |
+
params: see `ShouldSkipUpdateFunction`.
|
| 158 |
+
|
| 159 |
+
Returns:
|
| 160 |
+
A tuple:
|
| 161 |
+
* First element is a scalar array of type bool.
|
| 162 |
+
* Second element is a dictionary with keys:
|
| 163 |
+
- `should_skip`: True iff `updates` contains an inf or a NaN.
|
| 164 |
+
- `num_not_finite`: total number of inf and NaN found in `updates`.
|
| 165 |
+
"""
|
| 166 |
+
del gradient_step, params
|
| 167 |
+
all_is_finite = [jnp.sum(jnp.logical_not(jnp.isfinite(p)))
|
| 168 |
+
for p in jtu.tree_leaves(updates)]
|
| 169 |
+
num_not_finite = jnp.sum(jnp.array(all_is_finite))
|
| 170 |
+
should_skip = num_not_finite > 0
|
| 171 |
+
return should_skip, dict(should_skip=should_skip,
|
| 172 |
+
num_not_finite=num_not_finite)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def skip_large_updates(
|
| 176 |
+
updates: base.Updates,
|
| 177 |
+
gradient_step: chex.Array,
|
| 178 |
+
params: Optional[base.Params],
|
| 179 |
+
max_squared_norm: float
|
| 180 |
+
) -> tuple[chex.Array, chex.ArrayTree]:
|
| 181 |
+
"""Returns True if the global norm square of `updates` is small enough.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
updates: see `ShouldSkipUpdateFunction`.
|
| 185 |
+
gradient_step: see `ShouldSkipUpdateFunction`.
|
| 186 |
+
params: see `ShouldSkipUpdateFunction`.
|
| 187 |
+
max_squared_norm: max square norm that can be accepted in updates.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
A tuple:
|
| 191 |
+
* First element is a scalar array of type bool.
|
| 192 |
+
* Second element is a dictionary with keys:
|
| 193 |
+
- `should_skip`: iff ||updates||^2 is greater than `max_squared_norm`.
|
| 194 |
+
- `norm_squared`: overall norm square of the `updates`.
|
| 195 |
+
"""
|
| 196 |
+
del gradient_step, params
|
| 197 |
+
norm_sq = jnp.sum(
|
| 198 |
+
jnp.array([jnp.sum(p**2) for p in jtu.tree_leaves(updates)]))
|
| 199 |
+
# This will also return True if `norm_sq` is NaN.
|
| 200 |
+
should_skip = jnp.logical_not(norm_sq < max_squared_norm)
|
| 201 |
+
return should_skip, dict(should_skip=should_skip, norm_squared=norm_sq)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
class MultiStepsState(NamedTuple):
|
| 205 |
+
"""State of the `GradientTransformation` returned by `MultiSteps`.
|
| 206 |
+
|
| 207 |
+
Attributes:
|
| 208 |
+
mini_step: current mini-step counter. At an update, this either increases by
|
| 209 |
+
1 or is reset to 0.
|
| 210 |
+
gradient_step: gradient step counter. This only increases after enough
|
| 211 |
+
mini-steps have been accumulated.
|
| 212 |
+
inner_opt_state: the state of the wrapped optimiser.
|
| 213 |
+
acc_grads: accumulated gradients over multiple mini-steps.
|
| 214 |
+
skip_state: an arbitrarily py tree. This is only relevant when passing
|
| 215 |
+
a `should_skip_update_fn` to `MultiSteps`.
|
| 216 |
+
"""
|
| 217 |
+
mini_step: chex.Array
|
| 218 |
+
gradient_step: chex.Array
|
| 219 |
+
inner_opt_state: Any
|
| 220 |
+
acc_grads: Any
|
| 221 |
+
skip_state: chex.ArrayTree = ()
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
class MultiSteps:
|
| 225 |
+
"""An optimizer wrapper to accumulate gradients over multiple steps.
|
| 226 |
+
|
| 227 |
+
This wrapper collects together the updates passed to its ``update`` function
|
| 228 |
+
over consecutive steps until a given number of scheduled steps is reached.
|
| 229 |
+
In each of these intermediate steps, the returned value from the optimizer is
|
| 230 |
+
a tree of zeros of the same shape of the updates passed as input.
|
| 231 |
+
|
| 232 |
+
Once the scheduled number of intermediate 'mini-steps' has been reached, the
|
| 233 |
+
gradients accumulated to the current time will be passed to the wrapped
|
| 234 |
+
optimizer's update function, (with the inner optimizer's state being updated
|
| 235 |
+
appropriately) and then returned to the caller. The wrapper's accumulated
|
| 236 |
+
gradients are then set back to zero and the process starts again.
|
| 237 |
+
|
| 238 |
+
The number of mini-steps per gradient update is controlled by a function, and
|
| 239 |
+
can vary over training, this also allows varying batch size over training.
|
| 240 |
+
"""
|
| 241 |
+
|
| 242 |
+
def __init__(
|
| 243 |
+
self,
|
| 244 |
+
opt: base.GradientTransformation,
|
| 245 |
+
every_k_schedule: Union[int, Callable[[chex.Array], chex.Array]],
|
| 246 |
+
use_grad_mean: bool = True,
|
| 247 |
+
should_skip_update_fn: Optional[ShouldSkipUpdateFunction] = None):
|
| 248 |
+
# pylint: disable=line-too-long
|
| 249 |
+
"""Initialiser.
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
opt: the wrapped optimizer.
|
| 253 |
+
every_k_schedule: an int or a function.
|
| 254 |
+
|
| 255 |
+
* As a function, it returns how many mini-steps should be accumulated
|
| 256 |
+
in a single gradient step. Its only argument is the current
|
| 257 |
+
gradient step count. By varying the returned value, users can vary the
|
| 258 |
+
overall training batch size.
|
| 259 |
+
* If an ``int``, this is the constant number of mini-steps per gradient
|
| 260 |
+
update.
|
| 261 |
+
use_grad_mean: if ``True`` (the default), gradients accumulated over
|
| 262 |
+
multiple mini-steps are averaged. Otherwise, they are summed.
|
| 263 |
+
should_skip_update_fn: if provided, this function is used to decide when
|
| 264 |
+
to accept or reject the updates from a mini-step. When a mini-step is
|
| 265 |
+
rejected, the inner state of `MultiSteps` is not updated. In other
|
| 266 |
+
words, it is as if this mini-step never happened. For example:
|
| 267 |
+
|
| 268 |
+
* to ignore updates containing inf or NaN, do
|
| 269 |
+
``should_skip_update_fn=skip_not_finite``;
|
| 270 |
+
* to ignore updates with a norm square larger then 42, do:
|
| 271 |
+
``should_skip_update_fn=functools.partial(skip_large_updates, max_norm_sq=42.)``
|
| 272 |
+
|
| 273 |
+
Note that the optimizer's state :class:`optax.MultiStepsState` contains
|
| 274 |
+
a keyword argument ``skip_state`` in which debugging and monitoring
|
| 275 |
+
information returned by ``should_skip_update_fn`` is written.
|
| 276 |
+
"""
|
| 277 |
+
# pylint: enable=line-too-long
|
| 278 |
+
self._opt = base.with_extra_args_support(opt)
|
| 279 |
+
|
| 280 |
+
if isinstance(every_k_schedule, int):
|
| 281 |
+
self._every_k_schedule = lambda step: every_k_schedule
|
| 282 |
+
else:
|
| 283 |
+
self._every_k_schedule = every_k_schedule
|
| 284 |
+
self._use_grad_mean = use_grad_mean
|
| 285 |
+
|
| 286 |
+
if self._use_grad_mean:
|
| 287 |
+
# Use Welford algorithm for numerically stable aggregation of mean.
|
| 288 |
+
self._acc_update = (
|
| 289 |
+
lambda grad, acc, *, n_acc: acc + (grad - acc) / (n_acc + 1))
|
| 290 |
+
else:
|
| 291 |
+
self._acc_update = lambda grad, acc, *, n_acc: grad + acc
|
| 292 |
+
|
| 293 |
+
if should_skip_update_fn is None:
|
| 294 |
+
|
| 295 |
+
def should_skip_update_fn(*unused_args, **unused_kwargs):
|
| 296 |
+
return jnp.array(False, dtype=jnp.bool_), ()
|
| 297 |
+
|
| 298 |
+
self._should_skip_update_fn = should_skip_update_fn
|
| 299 |
+
|
| 300 |
+
@property
|
| 301 |
+
def inner_opt(self):
|
| 302 |
+
return self._opt
|
| 303 |
+
|
| 304 |
+
def init(self, params: Any) -> MultiStepsState:
|
| 305 |
+
"""Builds and returns initial `MultiStepsState`."""
|
| 306 |
+
updates = otu.tree_zeros_like(params)
|
| 307 |
+
gradient_step = jnp.zeros([], dtype=jnp.int32)
|
| 308 |
+
_, skip_state = self._should_skip_update_fn(updates, gradient_step, params)
|
| 309 |
+
init_state = MultiStepsState(
|
| 310 |
+
mini_step=jnp.zeros([], dtype=jnp.int32),
|
| 311 |
+
gradient_step=gradient_step,
|
| 312 |
+
inner_opt_state=self._opt.init(params),
|
| 313 |
+
acc_grads=updates,
|
| 314 |
+
skip_state=skip_state)
|
| 315 |
+
return init_state
|
| 316 |
+
|
| 317 |
+
def update(self,
|
| 318 |
+
updates: base.Updates,
|
| 319 |
+
state: MultiStepsState,
|
| 320 |
+
params: Optional[base.Params] = None,
|
| 321 |
+
**extra_args: Any,
|
| 322 |
+
) -> tuple[base.Updates, MultiStepsState]:
|
| 323 |
+
"""Accumulates gradients and proposes non-zero updates every `k_steps`."""
|
| 324 |
+
k_steps = self._every_k_schedule(state.gradient_step)
|
| 325 |
+
should_skip_update, skip_state = self._should_skip_update_fn(
|
| 326 |
+
updates, state.gradient_step, params)
|
| 327 |
+
if (should_skip_update.dtype, should_skip_update.shape) != (jnp.bool_, ()):
|
| 328 |
+
raise ValueError(
|
| 329 |
+
'The `should_skip_update_fn` function should return a boolean scalar '
|
| 330 |
+
f'array, but it returned an array of dtype {should_skip_update.dtype}'
|
| 331 |
+
f' and shape {should_skip_update.shape}'
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
# Note: we do not enclose variables to allow JAX to re-use memory buffers.
|
| 335 |
+
def _do_update(updates, state, params):
|
| 336 |
+
acc_grads = jtu.tree_map(
|
| 337 |
+
lambda upd, acc: self._acc_update(upd, acc, n_acc=state.mini_step),
|
| 338 |
+
updates,
|
| 339 |
+
state.acc_grads,
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
final_updates, new_inner_state = self._opt.update(
|
| 343 |
+
acc_grads, state.inner_opt_state, params=params, **extra_args
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
emit = state.mini_step == (k_steps - 1)
|
| 347 |
+
new_state = MultiStepsState(
|
| 348 |
+
mini_step=numerics.safe_int32_increment(state.mini_step) % k_steps,
|
| 349 |
+
gradient_step=emit
|
| 350 |
+
* numerics.safe_int32_increment(state.gradient_step)
|
| 351 |
+
+ (1 - emit) * state.gradient_step,
|
| 352 |
+
inner_opt_state=jtu.tree_map(
|
| 353 |
+
lambda st, nst: jnp.where(emit, nst, st),
|
| 354 |
+
state.inner_opt_state,
|
| 355 |
+
new_inner_state,
|
| 356 |
+
),
|
| 357 |
+
acc_grads=jtu.tree_map(
|
| 358 |
+
lambda ga: (1 - emit) * ga, acc_grads
|
| 359 |
+
),
|
| 360 |
+
skip_state=skip_state,
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
final_updates = jtu.tree_map(
|
| 364 |
+
lambda ga: emit * ga, final_updates
|
| 365 |
+
)
|
| 366 |
+
return final_updates, new_state
|
| 367 |
+
|
| 368 |
+
def _skip_update(updates, state, params):
|
| 369 |
+
del updates, params
|
| 370 |
+
multi_state_when_skip = MultiStepsState(
|
| 371 |
+
mini_step=state.mini_step,
|
| 372 |
+
gradient_step=state.gradient_step,
|
| 373 |
+
inner_opt_state=state.inner_opt_state,
|
| 374 |
+
acc_grads=state.acc_grads,
|
| 375 |
+
skip_state=skip_state,
|
| 376 |
+
)
|
| 377 |
+
zero_updates = otu.tree_zeros_like(state.acc_grads)
|
| 378 |
+
return zero_updates, multi_state_when_skip
|
| 379 |
+
|
| 380 |
+
new_updates, new_state = lax.cond(
|
| 381 |
+
should_skip_update, _skip_update, _do_update, *(updates, state, params)
|
| 382 |
+
)
|
| 383 |
+
return new_updates, new_state
|
| 384 |
+
|
| 385 |
+
def has_updated(
|
| 386 |
+
self, state: Union[MultiStepsState, chex.ArrayTree]) -> chex.Array:
|
| 387 |
+
# Use `getattr` to bypass pytype checks.
|
| 388 |
+
return jnp.logical_and(
|
| 389 |
+
getattr(state, 'mini_step') == 0, getattr(state, 'gradient_step') > 0
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
def gradient_transformation(self) -> base.GradientTransformation:
|
| 393 |
+
return base.GradientTransformation(init=self.init, update=self.update)
|
testbed/google-deepmind__optax/optax/transforms/_accumulation_test.py
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.transforms._accumulation."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
import chex
|
| 19 |
+
import flax
|
| 20 |
+
import jax
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
import numpy as np
|
| 23 |
+
|
| 24 |
+
from optax._src import alias
|
| 25 |
+
from optax._src import combine
|
| 26 |
+
from optax._src import transform
|
| 27 |
+
from optax._src import update
|
| 28 |
+
from optax.transforms import _accumulation
|
| 29 |
+
from optax.transforms import _constraining
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class AccumulationTest(chex.TestCase):
|
| 33 |
+
|
| 34 |
+
@chex.all_variants
|
| 35 |
+
def test_ema(self):
|
| 36 |
+
values = jnp.array([5.0, 7.0])
|
| 37 |
+
decay = 0.9
|
| 38 |
+
d = decay
|
| 39 |
+
|
| 40 |
+
ema = _accumulation.ema(decay=decay, debias=False)
|
| 41 |
+
state = ema.init(values[0]) # init to zeroes
|
| 42 |
+
|
| 43 |
+
transform_fn = self.variant(ema.update)
|
| 44 |
+
mean, state = transform_fn(values[0], state)
|
| 45 |
+
np.testing.assert_allclose(mean, (1-d) * values[0], atol=1e-4)
|
| 46 |
+
|
| 47 |
+
mean, _ = transform_fn(values[1], state)
|
| 48 |
+
np.testing.assert_allclose(
|
| 49 |
+
mean,
|
| 50 |
+
(1 - d) * (values[1] + d * values[0]), atol=1e-2)
|
| 51 |
+
|
| 52 |
+
@chex.all_variants
|
| 53 |
+
def test_ema_debias(self):
|
| 54 |
+
values = jnp.array([5.0, 7.0])
|
| 55 |
+
decay = 0.9
|
| 56 |
+
d = decay
|
| 57 |
+
|
| 58 |
+
ema = _accumulation.ema(decay=decay)
|
| 59 |
+
state = ema.init(values[0])
|
| 60 |
+
|
| 61 |
+
transform_fn = self.variant(ema.update)
|
| 62 |
+
mean, state = transform_fn(values[0], state)
|
| 63 |
+
np.testing.assert_allclose(mean, values[0], atol=1e-4)
|
| 64 |
+
|
| 65 |
+
mean, state = transform_fn(values[1], state)
|
| 66 |
+
np.testing.assert_allclose(
|
| 67 |
+
mean,
|
| 68 |
+
((1 - d) * values[1] + d * (1 - d) * values[0]) / (1 - d**2),
|
| 69 |
+
atol=1e-2)
|
| 70 |
+
# The state must not be debiased.
|
| 71 |
+
np.testing.assert_allclose(
|
| 72 |
+
state.ema,
|
| 73 |
+
(1 - d) * values[1] + d * (1 - d) * values[0],
|
| 74 |
+
atol=1e-2)
|
| 75 |
+
|
| 76 |
+
def test_skip_not_finite(self):
|
| 77 |
+
step = jnp.zeros([], dtype=jnp.int32)
|
| 78 |
+
|
| 79 |
+
with self.subTest('test_pos_inf'):
|
| 80 |
+
should_skip, skip_state = _accumulation.skip_not_finite(
|
| 81 |
+
[jnp.array(float('inf')), jnp.zeros([])], step, None)
|
| 82 |
+
self.assertTrue(bool(should_skip))
|
| 83 |
+
self.assertTrue(bool(skip_state['should_skip']))
|
| 84 |
+
self.assertEqual(int(skip_state['num_not_finite']), 1)
|
| 85 |
+
|
| 86 |
+
with self.subTest('test_neg_inf'):
|
| 87 |
+
should_skip, skip_state = _accumulation.skip_not_finite(
|
| 88 |
+
[jnp.array(-float('inf')), jnp.zeros([])], step, None)
|
| 89 |
+
self.assertTrue(bool(should_skip))
|
| 90 |
+
self.assertTrue(bool(skip_state['should_skip']))
|
| 91 |
+
self.assertEqual(int(skip_state['num_not_finite']), 1)
|
| 92 |
+
|
| 93 |
+
with self.subTest('test_nan'):
|
| 94 |
+
should_skip, skip_state = _accumulation.skip_not_finite(
|
| 95 |
+
[jnp.array(float('nan')), jnp.zeros([])], step, None)
|
| 96 |
+
self.assertTrue(bool(should_skip))
|
| 97 |
+
self.assertTrue(bool(skip_state['should_skip']))
|
| 98 |
+
self.assertEqual(int(skip_state['num_not_finite']), 1)
|
| 99 |
+
|
| 100 |
+
with self.subTest('test_finite'):
|
| 101 |
+
should_skip, skip_state = _accumulation.skip_not_finite(
|
| 102 |
+
[jnp.array(11.), jnp.zeros([])], step, None)
|
| 103 |
+
self.assertFalse(bool(should_skip))
|
| 104 |
+
self.assertFalse(bool(skip_state['should_skip']))
|
| 105 |
+
self.assertEqual(int(skip_state['num_not_finite']), 0)
|
| 106 |
+
|
| 107 |
+
def test_skip_large_updates(self):
|
| 108 |
+
step = jnp.zeros([], dtype=jnp.int32)
|
| 109 |
+
|
| 110 |
+
with self.subTest('test_inf'):
|
| 111 |
+
should_skip, skip_state = _accumulation.skip_large_updates(
|
| 112 |
+
[jnp.array(float('inf')), jnp.zeros([])], step, None, 100.)
|
| 113 |
+
self.assertTrue(bool(should_skip))
|
| 114 |
+
self.assertTrue(bool(skip_state['should_skip']))
|
| 115 |
+
self.assertEqual(float(skip_state['norm_squared']), float('inf'))
|
| 116 |
+
|
| 117 |
+
with self.subTest('test_nan'):
|
| 118 |
+
should_skip, skip_state = _accumulation.skip_large_updates(
|
| 119 |
+
[jnp.array(float('nan')), jnp.zeros([])], step, None, 100.)
|
| 120 |
+
self.assertTrue(bool(should_skip))
|
| 121 |
+
self.assertTrue(bool(skip_state['should_skip']))
|
| 122 |
+
# Recall that NaN != NaN.
|
| 123 |
+
norm_squared = float(skip_state['norm_squared'])
|
| 124 |
+
self.assertNotEqual(norm_squared, norm_squared)
|
| 125 |
+
|
| 126 |
+
with self.subTest('test_large'):
|
| 127 |
+
should_skip, skip_state = _accumulation.skip_large_updates(
|
| 128 |
+
[jnp.array(11.), jnp.zeros([])], step, None, 100.)
|
| 129 |
+
self.assertTrue(bool(should_skip))
|
| 130 |
+
self.assertTrue(bool(skip_state['should_skip']))
|
| 131 |
+
self.assertEqual(float(skip_state['norm_squared']), 121.)
|
| 132 |
+
|
| 133 |
+
with self.subTest('test_small'):
|
| 134 |
+
should_skip, skip_state = _accumulation.skip_large_updates(
|
| 135 |
+
[jnp.zeros([]), jnp.zeros([])], step, None, 100.)
|
| 136 |
+
self.assertFalse(bool(should_skip))
|
| 137 |
+
self.assertFalse(bool(skip_state['should_skip']))
|
| 138 |
+
self.assertEqual(float(skip_state['norm_squared']), 0.)
|
| 139 |
+
|
| 140 |
+
@chex.variants(with_jit=True, without_jit=True, with_pmap=True)
|
| 141 |
+
def test_multi_steps(self):
|
| 142 |
+
batch_size = 32
|
| 143 |
+
x_size = 7
|
| 144 |
+
# Parameters should be updated only every `k_steps` optimisation steps.
|
| 145 |
+
k_steps = 4
|
| 146 |
+
data = jnp.ones([batch_size, x_size])
|
| 147 |
+
|
| 148 |
+
class Loss(flax.linen.Module):
|
| 149 |
+
@flax.linen.compact
|
| 150 |
+
def __call__(self, x):
|
| 151 |
+
return jnp.sum(flax.linen.Dense(10)(x)**2)
|
| 152 |
+
|
| 153 |
+
loss = Loss()
|
| 154 |
+
|
| 155 |
+
params = loss.init({'params': jax.random.PRNGKey(0)}, data)['params']
|
| 156 |
+
|
| 157 |
+
def loss_apply(params, data):
|
| 158 |
+
return loss.apply({'params': params}, data)
|
| 159 |
+
|
| 160 |
+
ms_opt = _accumulation.MultiSteps(
|
| 161 |
+
# Use a non-trivial inner optimiser:
|
| 162 |
+
# * it has a state,
|
| 163 |
+
# * it requires the params for the update.
|
| 164 |
+
combine.chain(transform.scale_by_adam(),
|
| 165 |
+
transform.add_decayed_weights(1e-2),
|
| 166 |
+
transform.scale(-1e-4)), k_steps)
|
| 167 |
+
|
| 168 |
+
opt_init, opt_update = ms_opt.gradient_transformation()
|
| 169 |
+
|
| 170 |
+
# Put the training in one function, to check that the update is indeed
|
| 171 |
+
# jittable.
|
| 172 |
+
def train_step(data, opt_state, params):
|
| 173 |
+
grad = jax.grad(loss_apply)(params, data)
|
| 174 |
+
updates, opt_state = opt_update(grad, opt_state, params)
|
| 175 |
+
return updates, opt_state
|
| 176 |
+
|
| 177 |
+
opt_state = opt_init(params)
|
| 178 |
+
|
| 179 |
+
prev_loss = loss_apply(params, data)
|
| 180 |
+
for idx in range(5 * k_steps):
|
| 181 |
+
updates, opt_state = self.variant(train_step)(data, opt_state, params)
|
| 182 |
+
new_params = update.apply_updates(params, updates)
|
| 183 |
+
new_loss = loss_apply(new_params, data)
|
| 184 |
+
if idx % k_steps < k_steps - 1:
|
| 185 |
+
# The parameters should not have changed and the loss should be
|
| 186 |
+
# constant.
|
| 187 |
+
jax.tree_util.tree_map(
|
| 188 |
+
np.testing.assert_array_equal, new_params, params)
|
| 189 |
+
np.testing.assert_equal(new_loss, prev_loss)
|
| 190 |
+
self.assertFalse(ms_opt.has_updated(opt_state))
|
| 191 |
+
else:
|
| 192 |
+
# This is a step where parameters should actually have been updated, and
|
| 193 |
+
# the loss should accordingly go down.
|
| 194 |
+
np.testing.assert_array_less(new_loss, prev_loss)
|
| 195 |
+
prev_loss = new_loss
|
| 196 |
+
self.assertTrue(ms_opt.has_updated(opt_state))
|
| 197 |
+
params = new_params
|
| 198 |
+
|
| 199 |
+
def test_multi_steps_every_k_schedule(self):
|
| 200 |
+
# Test a non-trivial schedule which varies over time.
|
| 201 |
+
ms_opt = _accumulation.MultiSteps(
|
| 202 |
+
alias.sgd(1e-4), lambda grad_step: jnp.where(grad_step < 2, 1, 3))
|
| 203 |
+
opt_init, opt_update = ms_opt.gradient_transformation()
|
| 204 |
+
params = dict(a=jnp.zeros([]))
|
| 205 |
+
opt_state = opt_init(params)
|
| 206 |
+
grad = dict(a=jnp.zeros([]))
|
| 207 |
+
self.assertFalse(ms_opt.has_updated(opt_state))
|
| 208 |
+
# First two steps have 1 mini-step per update.
|
| 209 |
+
for _ in range(2):
|
| 210 |
+
_, opt_state = opt_update(grad, opt_state, params)
|
| 211 |
+
self.assertTrue(ms_opt.has_updated(opt_state))
|
| 212 |
+
# Subsequently, mini-steps should have 3 mini-steps per update.
|
| 213 |
+
for _ in range(5):
|
| 214 |
+
for _ in range(2):
|
| 215 |
+
_, opt_state = opt_update(grad, opt_state, params)
|
| 216 |
+
self.assertFalse(ms_opt.has_updated(opt_state))
|
| 217 |
+
_, opt_state = opt_update(grad, opt_state, params)
|
| 218 |
+
self.assertTrue(ms_opt.has_updated(opt_state))
|
| 219 |
+
|
| 220 |
+
def test_multi_steps_zero_nans(self):
|
| 221 |
+
# Test that MultiStep is compatible with zero_nans
|
| 222 |
+
# https://github.com/google-deepmind/optax/issues/828
|
| 223 |
+
ms_opt = _accumulation.MultiSteps(
|
| 224 |
+
combine.chain(_constraining.zero_nans(), alias.sgd(1e-4)),
|
| 225 |
+
every_k_schedule=2
|
| 226 |
+
)
|
| 227 |
+
opt_init, opt_update = ms_opt.gradient_transformation()
|
| 228 |
+
params = dict(a=jnp.zeros([]))
|
| 229 |
+
opt_state = opt_init(params)
|
| 230 |
+
grad = dict(a=jnp.zeros([]))
|
| 231 |
+
opt_update(grad, opt_state, params)
|
| 232 |
+
|
| 233 |
+
def test_multi_steps_computes_mean(self):
|
| 234 |
+
k_steps = 4
|
| 235 |
+
ms_opt = _accumulation.MultiSteps(
|
| 236 |
+
transform.scale(1.0), k_steps, use_grad_mean=True)
|
| 237 |
+
opt_init, opt_update = ms_opt.gradient_transformation()
|
| 238 |
+
params = dict(a=jnp.zeros([]))
|
| 239 |
+
opt_state = opt_init(params)
|
| 240 |
+
grads = [dict(a=jnp.ones([]) * i) for i in [1, 2, 3, 4]]
|
| 241 |
+
self.assertFalse(ms_opt.has_updated(opt_state))
|
| 242 |
+
|
| 243 |
+
# First 3 steps don't update.
|
| 244 |
+
for grad in grads[:-1]:
|
| 245 |
+
_, opt_state = opt_update(grad, opt_state, params)
|
| 246 |
+
self.assertFalse(ms_opt.has_updated(opt_state))
|
| 247 |
+
|
| 248 |
+
# Actual update.
|
| 249 |
+
new_params, opt_state = opt_update(grads[-1], opt_state, params)
|
| 250 |
+
self.assertTrue(ms_opt.has_updated(opt_state))
|
| 251 |
+
np.testing.assert_array_equal(new_params['a'], 2.5)
|
| 252 |
+
|
| 253 |
+
def test_multi_steps_skip_not_finite(self):
|
| 254 |
+
k_steps = 2
|
| 255 |
+
ms_opt = _accumulation.MultiSteps(
|
| 256 |
+
alias.sgd(1.), k_steps,
|
| 257 |
+
should_skip_update_fn=_accumulation.skip_not_finite)
|
| 258 |
+
opt_init, opt_update = ms_opt.gradient_transformation()
|
| 259 |
+
opt_init = jax.jit(opt_init)
|
| 260 |
+
opt_update = jax.jit(opt_update)
|
| 261 |
+
params = dict(a=jnp.zeros([]))
|
| 262 |
+
opt_state = opt_init(params)
|
| 263 |
+
|
| 264 |
+
with self.subTest('test_good_updates'):
|
| 265 |
+
updates, opt_state = opt_update(dict(a=jnp.ones([])), opt_state, params)
|
| 266 |
+
self.assertEqual(int(opt_state.mini_step), 1)
|
| 267 |
+
params = update.apply_updates(params, updates)
|
| 268 |
+
updates, opt_state = opt_update(dict(a=jnp.ones([])), opt_state, params)
|
| 269 |
+
self.assertEqual(int(opt_state.mini_step), 0)
|
| 270 |
+
params = update.apply_updates(params, updates)
|
| 271 |
+
np.testing.assert_array_equal(params['a'], jnp.negative(jnp.ones([])))
|
| 272 |
+
|
| 273 |
+
with self.subTest('test_inf_updates'):
|
| 274 |
+
updates, opt_state = opt_update(
|
| 275 |
+
dict(a=jnp.array(float('inf'))), opt_state, params)
|
| 276 |
+
self.assertEqual(int(opt_state.mini_step), 0) # No increase in mini_step
|
| 277 |
+
params = update.apply_updates(params, updates)
|
| 278 |
+
np.testing.assert_array_equal(params['a'], jnp.negative(jnp.ones([])))
|
| 279 |
+
|
| 280 |
+
with self.subTest('test_nan_updates'):
|
| 281 |
+
updates, opt_state = opt_update(
|
| 282 |
+
dict(a=jnp.full([], float('nan'))), opt_state, params)
|
| 283 |
+
self.assertEqual(int(opt_state.mini_step), 0) # No increase in mini_step
|
| 284 |
+
params = update.apply_updates(params, updates)
|
| 285 |
+
np.testing.assert_array_equal(params['a'], jnp.negative(jnp.ones([])))
|
| 286 |
+
|
| 287 |
+
with self.subTest('test_final_good_updates'):
|
| 288 |
+
updates, opt_state = opt_update(dict(a=jnp.ones([])), opt_state, params)
|
| 289 |
+
self.assertEqual(int(opt_state.mini_step), 1)
|
| 290 |
+
params = update.apply_updates(params, updates)
|
| 291 |
+
updates, opt_state = opt_update(dict(a=jnp.ones([])), opt_state, params)
|
| 292 |
+
self.assertEqual(int(opt_state.mini_step), 0)
|
| 293 |
+
params = update.apply_updates(params, updates)
|
| 294 |
+
np.testing.assert_array_equal(params['a'], jnp.negative(jnp.full([], 2.)))
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
if __name__ == '__main__':
|
| 298 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/transforms/_adding.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Additive components in gradient transformations."""
|
| 16 |
+
|
| 17 |
+
from typing import Any, Callable, NamedTuple, Optional, Union
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
import jax
|
| 21 |
+
from jax import tree_util as jtu
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax import tree_utils as otu
|
| 25 |
+
from optax._src import base
|
| 26 |
+
from optax._src import numerics
|
| 27 |
+
from optax._src import wrappers
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def add_decayed_weights(
|
| 31 |
+
weight_decay: Union[float, jax.Array] = 0.0,
|
| 32 |
+
mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None
|
| 33 |
+
) -> base.GradientTransformation:
|
| 34 |
+
"""Add parameter scaled by `weight_decay`.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
weight_decay: A scalar weight decay rate.
|
| 38 |
+
mask: A tree with same structure as (or a prefix of) the params PyTree,
|
| 39 |
+
or a Callable that returns such a pytree given the params/updates.
|
| 40 |
+
The leaves should be booleans, `True` for leaves/subtrees you want to
|
| 41 |
+
apply the transformation to, and `False` for those you want to skip.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
A `GradientTransformation` object.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def update_fn(updates, state, params):
|
| 48 |
+
if params is None:
|
| 49 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 50 |
+
updates = jtu.tree_map(
|
| 51 |
+
lambda g, p: g + weight_decay * p, updates, params)
|
| 52 |
+
return updates, state
|
| 53 |
+
|
| 54 |
+
# If mask is not `None`, apply mask to the gradient transformation.
|
| 55 |
+
# E.g. it is common to skip weight decay on bias units and batch stats.
|
| 56 |
+
if mask is not None:
|
| 57 |
+
return wrappers.masked(
|
| 58 |
+
base.GradientTransformation(base.init_empty_state, update_fn), mask)
|
| 59 |
+
return base.GradientTransformation(base.init_empty_state, update_fn)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class AddNoiseState(NamedTuple):
|
| 63 |
+
"""State for adding gradient noise. Contains a count for annealing."""
|
| 64 |
+
count: chex.Array
|
| 65 |
+
rng_key: chex.PRNGKey
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def add_noise(
|
| 69 |
+
eta: float,
|
| 70 |
+
gamma: float,
|
| 71 |
+
seed: int
|
| 72 |
+
) -> base.GradientTransformation:
|
| 73 |
+
"""Add gradient noise.
|
| 74 |
+
|
| 75 |
+
References:
|
| 76 |
+
[Neelakantan et al, 2014](https://arxiv.org/abs/1511.06807)
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
eta: Base variance of the gaussian noise added to the gradient.
|
| 80 |
+
gamma: Decay exponent for annealing of the variance.
|
| 81 |
+
seed: Seed for random number generation.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
A `GradientTransformation` object.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
def init_fn(params):
|
| 88 |
+
del params
|
| 89 |
+
return AddNoiseState(
|
| 90 |
+
count=jnp.zeros([], jnp.int32),
|
| 91 |
+
rng_key=jax.random.PRNGKey(seed))
|
| 92 |
+
|
| 93 |
+
def update_fn(updates, state, params=None): # pylint: disable=missing-docstring
|
| 94 |
+
del params
|
| 95 |
+
count_inc = numerics.safe_int32_increment(state.count)
|
| 96 |
+
standard_deviation = jnp.sqrt(eta / count_inc**gamma)
|
| 97 |
+
|
| 98 |
+
rng_key, sample_key = jax.random.split(state.rng_key)
|
| 99 |
+
noise = otu.tree_random_like(
|
| 100 |
+
sample_key, target_tree=updates, sampler=jax.random.normal)
|
| 101 |
+
updates = otu.tree_add_scalar_mul(
|
| 102 |
+
tree_x=updates, scalar=standard_deviation, tree_y=noise)
|
| 103 |
+
return updates, AddNoiseState(count=count_inc, rng_key=rng_key)
|
| 104 |
+
|
| 105 |
+
return base.GradientTransformation(init_fn, update_fn)
|
testbed/google-deepmind__optax/optax/transforms/_adding_test.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.transforms._adding."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
from jax import tree_util as jtu
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
|
| 23 |
+
from optax.transforms import _adding
|
| 24 |
+
|
| 25 |
+
STEPS = 50
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class AddingTest(chex.TestCase):
|
| 29 |
+
|
| 30 |
+
def setUp(self):
|
| 31 |
+
super().setUp()
|
| 32 |
+
self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
|
| 33 |
+
self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))
|
| 34 |
+
|
| 35 |
+
@chex.all_variants
|
| 36 |
+
def test_add_decayed_weights(self):
|
| 37 |
+
# Define a transform that add decayed weights.
|
| 38 |
+
# We can define a mask either as a pytree, or as a function that
|
| 39 |
+
# returns the pytree. Below we define the pytree directly.
|
| 40 |
+
mask = (True, dict(a=True, b=False))
|
| 41 |
+
tx = _adding.add_decayed_weights(0.1, mask=mask)
|
| 42 |
+
# Define input updates and weights.
|
| 43 |
+
updates = (
|
| 44 |
+
jnp.zeros((2,), dtype=jnp.float32),
|
| 45 |
+
dict(
|
| 46 |
+
a=jnp.zeros((2,), dtype=jnp.float32),
|
| 47 |
+
b=jnp.zeros((2,), dtype=jnp.float32),))
|
| 48 |
+
weights = (
|
| 49 |
+
jnp.ones((2,), dtype=jnp.float32),
|
| 50 |
+
dict(
|
| 51 |
+
a=jnp.ones((2,), dtype=jnp.float32),
|
| 52 |
+
b=jnp.ones((2,), dtype=jnp.float32),))
|
| 53 |
+
# This mask means that we will add decayed weights to the first two
|
| 54 |
+
# terms in the input updates, but not to the last element.
|
| 55 |
+
expected_tx_updates = (
|
| 56 |
+
0.1*jnp.ones((2,), dtype=jnp.float32),
|
| 57 |
+
dict(
|
| 58 |
+
a=0.1*jnp.ones((2,), dtype=jnp.float32),
|
| 59 |
+
b=jnp.zeros((2,), dtype=jnp.float32),))
|
| 60 |
+
# Apply transform
|
| 61 |
+
state = tx.init(weights)
|
| 62 |
+
transform_fn = self.variant(tx.update)
|
| 63 |
+
new_updates, _ = transform_fn(updates, state, weights)
|
| 64 |
+
# Assert output as expected.
|
| 65 |
+
chex.assert_trees_all_close(new_updates, expected_tx_updates)
|
| 66 |
+
|
| 67 |
+
@chex.all_variants
|
| 68 |
+
def test_add_noise_has_correct_variance_scaling(self):
|
| 69 |
+
# Prepare to compare noise with a rescaled unit-variance substitute.
|
| 70 |
+
eta = 0.3
|
| 71 |
+
gamma = 0.55
|
| 72 |
+
seed = 314
|
| 73 |
+
noise = _adding.add_noise(eta, gamma, seed)
|
| 74 |
+
noise_unit = _adding.add_noise(1.0, 0.0, seed)
|
| 75 |
+
|
| 76 |
+
params = self.init_params
|
| 77 |
+
state = noise.init(params)
|
| 78 |
+
state_unit = noise_unit.init(params)
|
| 79 |
+
|
| 80 |
+
# Check the noise itself by adding it to zeros.
|
| 81 |
+
updates = jtu.tree_map(jnp.zeros_like, params)
|
| 82 |
+
|
| 83 |
+
for i in range(1, STEPS + 1):
|
| 84 |
+
updates_i, state = self.variant(noise.update)(updates, state)
|
| 85 |
+
updates_i_unit, state_unit = noise_unit.update(updates, state_unit)
|
| 86 |
+
|
| 87 |
+
scale = jnp.sqrt(eta / i**gamma)
|
| 88 |
+
|
| 89 |
+
updates_i_rescaled = jtu.tree_map(
|
| 90 |
+
lambda g, s=scale: g * s, updates_i_unit)
|
| 91 |
+
|
| 92 |
+
chex.assert_trees_all_close(updates_i, updates_i_rescaled, rtol=1e-4)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/transforms/_clipping.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Gradient clipping transformations.
|
| 16 |
+
|
| 17 |
+
Note that complex numbers are also supported, see
|
| 18 |
+
https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import chex
|
| 22 |
+
import jax
|
| 23 |
+
from jax import tree_util as jtu
|
| 24 |
+
import jax.numpy as jnp
|
| 25 |
+
|
| 26 |
+
from optax import tree_utils as otu
|
| 27 |
+
from optax._src import base
|
| 28 |
+
from optax._src import linear_algebra
|
| 29 |
+
from optax._src import numerics
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def clip(max_delta: chex.Numeric) -> base.GradientTransformation:
|
| 33 |
+
"""Clips updates element-wise, to be in ``[-max_delta, +max_delta]``.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
max_delta: The maximum absolute value for each element in the update.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
A `GradientTransformation` object.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def update_fn(updates, state, params=None):
|
| 43 |
+
del params
|
| 44 |
+
return otu.tree_clip(updates, -max_delta, max_delta), state
|
| 45 |
+
|
| 46 |
+
return base.GradientTransformation(base.init_empty_state, update_fn)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def clip_by_block_rms(threshold: float) -> base.GradientTransformation:
|
| 50 |
+
"""Clips updates to a max rms for the gradient of each param vector or matrix.
|
| 51 |
+
|
| 52 |
+
A `block` is here a weight vector (e.g. in a Linear layer) or a weight matrix
|
| 53 |
+
(e.g. in a convolutional layer) appearing as a leaf in the grads/param pytree.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
threshold: The maximum rms for the gradient of each param vector or matrix.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
A `GradientTransformation` object.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def update_fn(updates, state, params=None):
|
| 63 |
+
del params
|
| 64 |
+
|
| 65 |
+
def _clip_fn(u):
|
| 66 |
+
clip_denom = jnp.maximum(
|
| 67 |
+
1.0,
|
| 68 |
+
jnp.sqrt(jnp.mean(numerics.abs_sq(u))) / threshold)
|
| 69 |
+
return u / clip_denom
|
| 70 |
+
|
| 71 |
+
updates = jtu.tree_map(_clip_fn, updates)
|
| 72 |
+
return updates, state
|
| 73 |
+
|
| 74 |
+
return base.GradientTransformation(base.init_empty_state, update_fn)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def clip_by_global_norm(max_norm: float) -> base.GradientTransformation:
|
| 78 |
+
"""Clips updates using their global norm.
|
| 79 |
+
|
| 80 |
+
References:
|
| 81 |
+
[Pascanu et al, 2012](https://arxiv.org/abs/1211.5063)
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
max_norm: The maximum global norm for an update.
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
A `GradientTransformation` object.
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
def update_fn(updates, state, params=None):
|
| 91 |
+
del params
|
| 92 |
+
g_norm = linear_algebra.global_norm(updates)
|
| 93 |
+
# TODO(b/163995078): revert back to the following (faster) implementation
|
| 94 |
+
# once analysed how it affects backprop through update (e.g. meta-gradients)
|
| 95 |
+
# g_norm = jnp.maximum(max_norm, g_norm)
|
| 96 |
+
# updates = jtu.tree_map(lambda t: (t / g_norm) * max_norm, updates)
|
| 97 |
+
trigger = jnp.squeeze(g_norm < max_norm)
|
| 98 |
+
chex.assert_shape(trigger, ()) # A scalar.
|
| 99 |
+
|
| 100 |
+
def clip_fn(t):
|
| 101 |
+
return jax.lax.select(trigger, t, (t / g_norm.astype(t.dtype)) * max_norm)
|
| 102 |
+
|
| 103 |
+
updates = jtu.tree_map(clip_fn, updates)
|
| 104 |
+
return updates, state
|
| 105 |
+
|
| 106 |
+
return base.GradientTransformation(base.init_empty_state, update_fn)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def per_example_global_norm_clip(
|
| 110 |
+
grads: list[chex.Array], l2_norm_clip: float
|
| 111 |
+
) -> tuple[list[chex.Array], jax.Array]:
|
| 112 |
+
"""Applies gradient clipping per-example using their global norm.
|
| 113 |
+
|
| 114 |
+
References:
|
| 115 |
+
[Abadi et al, 2016](https://arxiv.org/abs/1607.00133)
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
grads: flattened update; the function expects these to have a batch
|
| 119 |
+
dimension on the 0th axis.
|
| 120 |
+
l2_norm_clip: maximum L2 norm of the per-example gradients.
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
A tuple containing sum of the clipped per-example grads, and the number of
|
| 124 |
+
per-example grads that were clipped.
|
| 125 |
+
"""
|
| 126 |
+
bsize = grads[0].shape[0]
|
| 127 |
+
|
| 128 |
+
if any(g.ndim == 0 or bsize != g.shape[0] for g in grads):
|
| 129 |
+
raise ValueError(
|
| 130 |
+
'Unlike other transforms, `per_example_global_norm_clip` expects'
|
| 131 |
+
' `grads` to have a batch dimension in the 0th axis.')
|
| 132 |
+
|
| 133 |
+
global_grad_norms = jax.vmap(linear_algebra.global_norm)(grads)
|
| 134 |
+
divisors = jnp.maximum(global_grad_norms / l2_norm_clip, 1.0)
|
| 135 |
+
num_clipped = jnp.greater(divisors, 1.0).sum()
|
| 136 |
+
clipped_sum = [(jnp.moveaxis(g, 0, -1) / divisors).sum(-1) for g in grads]
|
| 137 |
+
return clipped_sum, num_clipped
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def per_example_layer_norm_clip(
|
| 141 |
+
grads: list[chex.Array],
|
| 142 |
+
global_l2_norm_clip: float,
|
| 143 |
+
uniform: bool = True,
|
| 144 |
+
eps: float = 1e-8,
|
| 145 |
+
) -> tuple[list[chex.Array], list[chex.Array]]:
|
| 146 |
+
"""Applies gradient clipping per-example using per-layer norms.
|
| 147 |
+
|
| 148 |
+
References:
|
| 149 |
+
[McMahan et al, 2012](https://arxiv.org/abs/1710.06963)]
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
grads: flattened update; i.e. a list of gradients in which each item is
|
| 153 |
+
the gradient for one layer; the function expects these to have a batch
|
| 154 |
+
dimension on the 0th axis.
|
| 155 |
+
global_l2_norm_clip: overall L2 clip norm to use.
|
| 156 |
+
uniform: If `True`, per-layer clip norm is global_l2_norm_clip/sqrt(L),
|
| 157 |
+
where L is the number of layers. Otherwise, per-layer clip norm is
|
| 158 |
+
global_l2_norm_clip * sqrt(f), where f is the fraction of total model
|
| 159 |
+
parameters that are in this layer.
|
| 160 |
+
eps: Small positive value to add to norms to avoid possible division by
|
| 161 |
+
zero.
|
| 162 |
+
|
| 163 |
+
Let C = `global_l2_norm_clip value`. Then per-layer clipping is done as
|
| 164 |
+
follows:
|
| 165 |
+
(1) If `uniform` is `True`, each of the K layers has an individual clip
|
| 166 |
+
norm of C / sqrt(K).
|
| 167 |
+
(2) If `uniform` is `False`, each of the K layers has an individual clip
|
| 168 |
+
norm of C * sqrt(D_i / D) where D_i is the number of parameters in
|
| 169 |
+
layer i, and D is the total number of parameters in the model.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
A tuple containing sum of the clipped per-example grads and the number of
|
| 173 |
+
per-example grads that were clipped for each layer.
|
| 174 |
+
"""
|
| 175 |
+
bsize = grads[0].shape[0]
|
| 176 |
+
|
| 177 |
+
if any(g.ndim == 0 or bsize != g.shape[0] for g in grads):
|
| 178 |
+
raise ValueError(
|
| 179 |
+
'Unlike other transforms, `per_example_layer_norm_clip` expects'
|
| 180 |
+
' `grads` to have a batch dimension in the 0th axis; got shapes:'
|
| 181 |
+
f' {(g.shape for g in grads)}.'
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
num_layers = len(grads)
|
| 185 |
+
|
| 186 |
+
# Compute per-layer clip norms, based on whether we are using uniform
|
| 187 |
+
# variant or not.
|
| 188 |
+
if uniform:
|
| 189 |
+
# Create list of length `num_layers` of per-layer clip norm.
|
| 190 |
+
layer_clip_norms = (
|
| 191 |
+
global_l2_norm_clip * (1.0 / num_layers) ** 0.5,
|
| 192 |
+
) * num_layers
|
| 193 |
+
else:
|
| 194 |
+
total_params = sum(g[0].size for g in grads)
|
| 195 |
+
layer_clip_norms = tuple(
|
| 196 |
+
global_l2_norm_clip * (g[0].size / float(total_params)) ** 0.5
|
| 197 |
+
for g in grads
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
# Compute per-layer grad norms.
|
| 201 |
+
def map_layer_norm(grads_list):
|
| 202 |
+
return [jnp.linalg.norm(g, ord=None, axis=None) for g in grads_list]
|
| 203 |
+
|
| 204 |
+
layer_grad_norms_per_example = jax.vmap(map_layer_norm)(grads)
|
| 205 |
+
|
| 206 |
+
# Perform clipping.
|
| 207 |
+
divisors = (
|
| 208 |
+
tuple(
|
| 209 |
+
jnp.maximum(
|
| 210 |
+
layer_grad_norm / (layer_clip_norm + eps), 1.0
|
| 211 |
+
)
|
| 212 |
+
for layer_grad_norm, layer_clip_norm in zip(
|
| 213 |
+
layer_grad_norms_per_example, layer_clip_norms
|
| 214 |
+
)
|
| 215 |
+
)
|
| 216 |
+
)
|
| 217 |
+
num_clipped = [jnp.greater(divisor, 1.0).sum() for divisor in divisors]
|
| 218 |
+
clipped_sum = [
|
| 219 |
+
(g / jnp.expand_dims(d, axis=[i for i in range(1, g.ndim)])).sum(0)
|
| 220 |
+
for g, d in zip(grads, divisors)
|
| 221 |
+
]
|
| 222 |
+
return clipped_sum, num_clipped
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def unitwise_norm(x: chex.Array) -> chex.Array:
|
| 226 |
+
"""Computes norms of each output unit separately."""
|
| 227 |
+
if jnp.squeeze(x).ndim <= 1: # Scalars and vectors
|
| 228 |
+
squared_norm = jnp.sum(numerics.abs_sq(x), keepdims=True)
|
| 229 |
+
# Note that this assumes parameters with a shape of length 3 are multihead
|
| 230 |
+
# linear parameters--if you wish to apply AGC to 1D convs, you may need
|
| 231 |
+
# to modify this line.
|
| 232 |
+
elif x.ndim in (2, 3): # Linear layers of shape IO or multihead linear
|
| 233 |
+
squared_norm = jnp.sum(numerics.abs_sq(x), axis=0, keepdims=True)
|
| 234 |
+
elif x.ndim == 4: # Conv kernels of shape HWIO
|
| 235 |
+
squared_norm = jnp.sum(numerics.abs_sq(x), axis=(0, 1, 2), keepdims=True)
|
| 236 |
+
else:
|
| 237 |
+
raise ValueError(
|
| 238 |
+
f'Expected parameter with shape in {1, 2, 3, 4}, got {x.shape}.')
|
| 239 |
+
chex.assert_is_broadcastable(squared_norm.shape, x.shape)
|
| 240 |
+
return jnp.broadcast_to(jnp.sqrt(squared_norm), x.shape)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def unitwise_clip(g_norm: chex.Array,
|
| 244 |
+
max_norm: chex.Array,
|
| 245 |
+
grad: chex.Array,
|
| 246 |
+
div_eps: float = 1e-6) -> chex.Array:
|
| 247 |
+
"""Applies gradient clipping unit-wise."""
|
| 248 |
+
# This little max(., div_eps) is distinct from the normal eps and just
|
| 249 |
+
# prevents division by zero. It technically should be impossible to engage.
|
| 250 |
+
clipped_grad = grad * (max_norm / jnp.maximum(g_norm, div_eps))
|
| 251 |
+
chex.assert_equal_shape((g_norm, max_norm, grad, clipped_grad))
|
| 252 |
+
return jnp.where(g_norm < max_norm, grad, clipped_grad)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def adaptive_grad_clip(clipping: float,
|
| 256 |
+
eps: float = 1e-3) -> base.GradientTransformation:
|
| 257 |
+
"""Clips updates to be at most ``clipping * parameter_norm``, unit-wise.
|
| 258 |
+
|
| 259 |
+
References:
|
| 260 |
+
[Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image
|
| 261 |
+
Recognition Without Normalization. (https://arxiv.org/abs/2102.06171)
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
clipping: The maximum allowed ratio of update norm to parameter norm.
|
| 265 |
+
eps: An epsilon term to prevent clipping of zero-initialized params.
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
A `GradientTransformation` object.
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
def update_fn(updates, state, params):
|
| 272 |
+
if params is None:
|
| 273 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 274 |
+
g_norm, p_norm = jtu.tree_map(unitwise_norm, (updates, params))
|
| 275 |
+
# Maximum allowable norm.
|
| 276 |
+
max_norm = jtu.tree_map(
|
| 277 |
+
lambda x: clipping * jnp.maximum(x, eps), p_norm)
|
| 278 |
+
# If grad norm > clipping * param_norm, rescale.
|
| 279 |
+
updates = jtu.tree_map(unitwise_clip, g_norm, max_norm, updates)
|
| 280 |
+
return updates, state
|
| 281 |
+
|
| 282 |
+
return base.GradientTransformation(base.init_empty_state, update_fn)
|
testbed/google-deepmind__optax/optax/transforms/_clipping_test.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.transforms._clipping."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
import chex
|
| 19 |
+
import jax
|
| 20 |
+
import jax.numpy as jnp
|
| 21 |
+
|
| 22 |
+
from optax._src import linear_algebra
|
| 23 |
+
from optax.transforms import _clipping
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
STEPS = 50
|
| 27 |
+
LR = 1e-2
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class ClippingTest(absltest.TestCase):
|
| 31 |
+
|
| 32 |
+
def setUp(self):
|
| 33 |
+
super().setUp()
|
| 34 |
+
self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
|
| 35 |
+
self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))
|
| 36 |
+
|
| 37 |
+
def test_clip(self):
|
| 38 |
+
updates = self.per_step_updates
|
| 39 |
+
# For a sufficiently high delta the update should not be changed.
|
| 40 |
+
clipper = _clipping.clip(1e6)
|
| 41 |
+
clipped_updates, _ = clipper.update(updates, None)
|
| 42 |
+
chex.assert_trees_all_close(clipped_updates, clipped_updates)
|
| 43 |
+
# Clipping at delta=1 should make all updates exactly 1.
|
| 44 |
+
clipper = _clipping.clip(1.)
|
| 45 |
+
clipped_updates, _ = clipper.update(updates, None)
|
| 46 |
+
chex.assert_trees_all_close(
|
| 47 |
+
clipped_updates, jax.tree_util.tree_map(jnp.ones_like, updates))
|
| 48 |
+
|
| 49 |
+
def test_clip_by_block_rms(self):
|
| 50 |
+
rmf_fn = lambda t: jnp.sqrt(jnp.mean(t**2))
|
| 51 |
+
updates = self.per_step_updates
|
| 52 |
+
for i in range(1, STEPS + 1):
|
| 53 |
+
clipper = _clipping.clip_by_block_rms(1. / i)
|
| 54 |
+
# Check that the clipper actually works and block rms is <= threshold
|
| 55 |
+
updates, _ = clipper.update(updates, None)
|
| 56 |
+
self.assertAlmostEqual(rmf_fn(updates[0]), 1. / i)
|
| 57 |
+
self.assertAlmostEqual(rmf_fn(updates[1]), 1. / i)
|
| 58 |
+
# Check that continuously clipping won't cause numerical issues.
|
| 59 |
+
updates_step, _ = clipper.update(self.per_step_updates, None)
|
| 60 |
+
chex.assert_trees_all_close(updates, updates_step)
|
| 61 |
+
|
| 62 |
+
def test_clip_by_global_norm(self):
|
| 63 |
+
updates = self.per_step_updates
|
| 64 |
+
for i in range(1, STEPS + 1):
|
| 65 |
+
clipper = _clipping.clip_by_global_norm(1. / i)
|
| 66 |
+
# Check that the clipper actually works and global norm is <= max_norm
|
| 67 |
+
updates, _ = clipper.update(updates, None)
|
| 68 |
+
self.assertAlmostEqual(
|
| 69 |
+
linear_algebra.global_norm(updates), 1. / i, places=6)
|
| 70 |
+
# Check that continuously clipping won't cause numerical issues.
|
| 71 |
+
updates_step, _ = clipper.update(self.per_step_updates, None)
|
| 72 |
+
chex.assert_trees_all_close(updates, updates_step)
|
| 73 |
+
|
| 74 |
+
def test_adaptive_grad_clip(self):
|
| 75 |
+
updates = self.per_step_updates
|
| 76 |
+
params = self.init_params
|
| 77 |
+
for i in range(1, STEPS + 1):
|
| 78 |
+
clip_r = 1. / i
|
| 79 |
+
clipper = _clipping.adaptive_grad_clip(clip_r)
|
| 80 |
+
|
| 81 |
+
# Check that the clipper actually works and upd_norm is < c * param_norm.
|
| 82 |
+
updates, _ = clipper.update(updates, None, params)
|
| 83 |
+
u_norm, p_norm = jax.tree_util.tree_map(
|
| 84 |
+
_clipping.unitwise_norm, (updates, params))
|
| 85 |
+
cmp = jax.tree_util.tree_map(
|
| 86 |
+
lambda u, p, c=clip_r: u - c * p < 1e-6, u_norm, p_norm)
|
| 87 |
+
for leaf in jax.tree_util.tree_leaves(cmp):
|
| 88 |
+
self.assertTrue(leaf.all())
|
| 89 |
+
|
| 90 |
+
# Check that continuously clipping won't cause numerical issues.
|
| 91 |
+
updates_step, _ = clipper.update(self.per_step_updates, None, params)
|
| 92 |
+
chex.assert_trees_all_close(updates, updates_step)
|
| 93 |
+
|
| 94 |
+
def test_per_example_layer_norm_clip(self):
|
| 95 |
+
# Test data for a model with two layers and a batch size of 4. The
|
| 96 |
+
# 0th layer has one parameter (shape (1)), and the 1st layer has shape
|
| 97 |
+
# (3, 3, 2).
|
| 98 |
+
grads_flat = [
|
| 99 |
+
jnp.array([[0.5], [1.5], [-2.0], [3.0]]),
|
| 100 |
+
jnp.ones([4, 3, 3, 2], dtype=jnp.float32),
|
| 101 |
+
]
|
| 102 |
+
|
| 103 |
+
with self.subTest(name='Uniform Variant'):
|
| 104 |
+
sum_clipped_grads, num_clipped = _clipping.per_example_layer_norm_clip(
|
| 105 |
+
grads_flat, global_l2_norm_clip=jnp.sqrt(2), uniform=True
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# For the uniform variant, with global_l2_norm_clip=sqrt(2), the per-layer
|
| 109 |
+
# clip norm is 1.0. Thus the per-example per-layer clipped grads are
|
| 110 |
+
# [[0.5], [1.0], [-1.0], [1.0]] and [1 / sqrt(18) ... ]. The sum of
|
| 111 |
+
# these over the 4 input gradients are [1.5] and [4 / sqrt(18) ...].
|
| 112 |
+
self.assertAlmostEqual(sum_clipped_grads[0], 1.5)
|
| 113 |
+
for element in sum_clipped_grads[1].flatten():
|
| 114 |
+
self.assertAlmostEqual(element, 4 / jnp.sqrt(18), places=4)
|
| 115 |
+
|
| 116 |
+
# The three values in grads_flat[0] with magnitude > 1.0 are clipped, as
|
| 117 |
+
# are all four values in grads_flat[1].
|
| 118 |
+
self.assertEqual(num_clipped[0], 3)
|
| 119 |
+
self.assertEqual(num_clipped[1], 4)
|
| 120 |
+
|
| 121 |
+
with self.subTest(name='Scaled Variant'):
|
| 122 |
+
sum_clipped_grads, num_clipped = _clipping.per_example_layer_norm_clip(
|
| 123 |
+
grads_flat, global_l2_norm_clip=jnp.sqrt(19), uniform=False
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
# For the scaled variant, with global_l2_norm_clip=sqrt(19), the per-layer
|
| 127 |
+
# clip norm for the 0th layer is 1.0, and the per-layer clip norm for
|
| 128 |
+
# the 1st layer is sqrt(18). Thus the per-example per-layer clipped grads
|
| 129 |
+
# are [[0.5], [1.0], [-1.0], [1.0]] and [[1.0)] ... ]. The sum of
|
| 130 |
+
# these over the 4 input gradients are [1.5] and [4.0 ...].
|
| 131 |
+
self.assertAlmostEqual(sum_clipped_grads[0], 1.5)
|
| 132 |
+
for element in sum_clipped_grads[1].flatten():
|
| 133 |
+
self.assertAlmostEqual(element, 4.0)
|
| 134 |
+
|
| 135 |
+
# The three values in grads_flat[0] with magnitude > 1.0 are clipped. The
|
| 136 |
+
# grad norms for grads_flat[1] are all equal to the per-layer clip norm,
|
| 137 |
+
# so none of these grads are clipped.
|
| 138 |
+
self.assertEqual(num_clipped[0], 3)
|
| 139 |
+
self.assertEqual(num_clipped[1], 0)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
if __name__ == '__main__':
|
| 143 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/transforms/_combining.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Flexibly compose gradient transformations."""
|
| 16 |
+
|
| 17 |
+
from typing import Callable, NamedTuple, Union, Mapping, Hashable
|
| 18 |
+
|
| 19 |
+
import jax
|
| 20 |
+
|
| 21 |
+
from optax._src import base
|
| 22 |
+
from optax._src import wrappers
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def chain(
|
| 26 |
+
*args: base.GradientTransformation,
|
| 27 |
+
) -> base.GradientTransformationExtraArgs:
|
| 28 |
+
"""Applies a list of chainable update transformations.
|
| 29 |
+
|
| 30 |
+
This function creates a new :func:`optax.GradientTransformation` that applies
|
| 31 |
+
a sequence of gradient transformations in order. The ``init`` function of the
|
| 32 |
+
new transformation constructs the optimizer state by concatenating the states
|
| 33 |
+
of the individual transforms, while the ``update`` function applies the
|
| 34 |
+
updates in the given order.
|
| 35 |
+
|
| 36 |
+
Examples:
|
| 37 |
+
|
| 38 |
+
A transform that scales by -0.1 the adam update:
|
| 39 |
+
|
| 40 |
+
>>> import optax
|
| 41 |
+
>>> transform1 = optax.scale_by_adam()
|
| 42 |
+
>>> transform2 = optax.scale(-0.1)
|
| 43 |
+
>>> chained_transform = optax.chain(transform1, transform2)
|
| 44 |
+
>>> params = {'a': 1.0}
|
| 45 |
+
>>> state = chained_transform.init(params)
|
| 46 |
+
>>> updates = {'a': -0.5}
|
| 47 |
+
>>> updates, new_state = chained_transform.update(updates, state, params)
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
*args: a sequence of chainable (init_fn, update_fn) tuples.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
A :func:`GradientTransformationExtraArgs`, created by chaining the input
|
| 54 |
+
transformations. Note that independent of the argument types, the resulting
|
| 55 |
+
transformation always supports extra args. Any extra arguments passed to the
|
| 56 |
+
returned transformation will be passed only to those transformations in the
|
| 57 |
+
chain that support extra args.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
transforms = [base.with_extra_args_support(t) for t in args]
|
| 61 |
+
init_fns, update_fns = zip(*transforms)
|
| 62 |
+
|
| 63 |
+
def init_fn(params):
|
| 64 |
+
return tuple(fn(params) for fn in init_fns)
|
| 65 |
+
|
| 66 |
+
def update_fn(updates, state, params=None, **extra_args):
|
| 67 |
+
if len(update_fns) != len(state):
|
| 68 |
+
raise ValueError('The number of updates and states has to be the same in '
|
| 69 |
+
'chain! Make sure you have called init first!')
|
| 70 |
+
|
| 71 |
+
new_state = []
|
| 72 |
+
for s, fn in zip(state, update_fns):
|
| 73 |
+
updates, new_s = fn(updates, s, params, **extra_args)
|
| 74 |
+
new_state.append(new_s)
|
| 75 |
+
return updates, tuple(new_state)
|
| 76 |
+
|
| 77 |
+
# We opt to always return the GradientTransformationExtraArgs type here,
|
| 78 |
+
# instead of selecting the return type based on the arguments, since it works
|
| 79 |
+
# much better with the currently available type checkers. It also means that
|
| 80 |
+
# users will not get unexpected signature errors if they remove all of the
|
| 81 |
+
# transformations in a chain accepting extra args.
|
| 82 |
+
return base.GradientTransformationExtraArgs(init_fn, update_fn)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def named_chain(
|
| 86 |
+
*transforms: tuple[str, base.GradientTransformation]
|
| 87 |
+
) -> base.GradientTransformationExtraArgs:
|
| 88 |
+
"""Chains optax gradient transformations.
|
| 89 |
+
|
| 90 |
+
A variant of :func:`optax.chain` that allows to name each transformation.
|
| 91 |
+
|
| 92 |
+
Here the ``transforms`` are ``(name, transformation)`` pairs, constituted of a
|
| 93 |
+
string ``name`` and an associated transformation ``transformation``. The
|
| 94 |
+
gradient transformation must be an instance of :func:`GradientTransformation`
|
| 95 |
+
or :func:`GradientTransformationExtraArgs`.
|
| 96 |
+
|
| 97 |
+
Each ``name`` is used as key for the state of the corresponding transformation
|
| 98 |
+
within the ``named_chain`` state. Thus the state of the transformation
|
| 99 |
+
with a given ``name`` can be easily retrieved as ``opt_state[name]``.
|
| 100 |
+
|
| 101 |
+
Examples:
|
| 102 |
+
|
| 103 |
+
>>> # tx1 is a GradientTransformation with no extra_args.
|
| 104 |
+
>>> # tx2 is a GradientTransformationExtraArgs that requires `loss`.
|
| 105 |
+
>>> # tx3 is a GradientTransformationExtraArgs that requires `temperature`.
|
| 106 |
+
>>> tx = named_chain(('one', tx1), ('two', tx2), ('three', tx3))
|
| 107 |
+
>>> extra_args={'loss': 0.3, 'temperature': 0.01}
|
| 108 |
+
>>> tx.init(params)
|
| 109 |
+
>>> tx.update(grads, state, params, **extra_args)
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
*transforms: an arbitrary number of ``(name, tx)`` pairs, constituted of a
|
| 113 |
+
string ``name`` and an associated transformation ``tx``. The latter is a
|
| 114 |
+
:func:`GradientTransformation` or :func:`GradientTransformationExtraArgs`.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
A single (init_fn, update_fn) tuple.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
names = [name for name, _ in transforms]
|
| 121 |
+
|
| 122 |
+
if len(names) != len(set(names)):
|
| 123 |
+
raise ValueError(
|
| 124 |
+
f'Named transformations must have unique names, but got {names}')
|
| 125 |
+
|
| 126 |
+
transforms = [
|
| 127 |
+
(name, base.with_extra_args_support(t))
|
| 128 |
+
for name, t in transforms]
|
| 129 |
+
|
| 130 |
+
def init_fn(params):
|
| 131 |
+
states = {}
|
| 132 |
+
for (name, tx) in transforms:
|
| 133 |
+
states[name] = tx.init(params)
|
| 134 |
+
return states
|
| 135 |
+
def update_fn(updates, state, params=None, **extra_args):
|
| 136 |
+
new_state = {}
|
| 137 |
+
for (name, tx) in transforms:
|
| 138 |
+
updates, new_state[name] = tx.update(
|
| 139 |
+
updates, state[name], params, **extra_args)
|
| 140 |
+
return updates, new_state
|
| 141 |
+
|
| 142 |
+
return base.GradientTransformationExtraArgs(init_fn, update_fn)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class PartitionState(NamedTuple):
|
| 146 |
+
inner_states: Mapping[Hashable, base.OptState]
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def partition(
|
| 150 |
+
transforms: Mapping[Hashable, base.GradientTransformation],
|
| 151 |
+
param_labels: Union[base.PyTree, Callable[[base.PyTree], base.PyTree]],
|
| 152 |
+
*,
|
| 153 |
+
mask_compatible_extra_args: bool = False,
|
| 154 |
+
) -> base.GradientTransformationExtraArgs:
|
| 155 |
+
"""Partitions params and applies a different transformation to each subset.
|
| 156 |
+
|
| 157 |
+
Sometimes you may want to apply different transformations to different
|
| 158 |
+
parameters. For example, you may want to apply Adam to the weights of a
|
| 159 |
+
neural network, but SGD to the biases. This function allows you to do that.
|
| 160 |
+
|
| 161 |
+
Examples:
|
| 162 |
+
|
| 163 |
+
Below is an example where we apply Adam to the weights and SGD to the biases
|
| 164 |
+
of a 2-layer neural network::
|
| 165 |
+
|
| 166 |
+
>>> import optax
|
| 167 |
+
>>> import jax
|
| 168 |
+
>>> import jax.numpy as jnp
|
| 169 |
+
|
| 170 |
+
>>> def map_nested_fn(fn):
|
| 171 |
+
... '''Recursively apply `fn` to key-value pairs of a nested dict.'''
|
| 172 |
+
... def map_fn(nested_dict):
|
| 173 |
+
... return {k: (map_fn(v) if isinstance(v, dict) else fn(k, v))
|
| 174 |
+
... for k, v in nested_dict.items()}
|
| 175 |
+
... return map_fn
|
| 176 |
+
|
| 177 |
+
>>> params = {'linear_1': {'w': jnp.zeros((5, 6)), 'b': jnp.zeros(5)},
|
| 178 |
+
... 'linear_2': {'w': jnp.zeros((6, 1)), 'b': jnp.zeros(1)}}
|
| 179 |
+
>>> gradients = jtu.tree_map(jnp.ones_like, params) # dummy gradients
|
| 180 |
+
|
| 181 |
+
>>> label_fn = map_nested_fn(lambda k, _: k)
|
| 182 |
+
>>> tx = optax.multi_transform(
|
| 183 |
+
... {'w': optax.adam(1.0), 'b': optax.sgd(1.0)}, label_fn)
|
| 184 |
+
>>> state = tx.init(params)
|
| 185 |
+
>>> updates, new_state = tx.update(gradients, state, params)
|
| 186 |
+
>>> new_params = optax.apply_updates(params, updates)
|
| 187 |
+
|
| 188 |
+
Instead of providing a ``label_fn``, you may provide a PyTree of labels
|
| 189 |
+
directly. Also, this PyTree may be a prefix of the parameters PyTree. This
|
| 190 |
+
is demonstrated in the GAN pseudocode below::
|
| 191 |
+
|
| 192 |
+
>>> generator_params = ...
|
| 193 |
+
>>> discriminator_params = ...
|
| 194 |
+
>>> all_params = (generator_params, discriminator_params)
|
| 195 |
+
>>> param_labels = ('generator', 'discriminator')
|
| 196 |
+
|
| 197 |
+
>>> tx = optax.multi_transform(
|
| 198 |
+
>>> {'generator': optax.adam(0.1), 'discriminator': optax.adam(0.5)},
|
| 199 |
+
>>> param_labels)
|
| 200 |
+
|
| 201 |
+
If you would like to not optimize some parameters, you may wrap
|
| 202 |
+
:func:`optax.multi_transform` with :func:`optax.masked`.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
transforms: A mapping from labels to transformations. Each transformation
|
| 206 |
+
will be only be applied to parameters with the same label.
|
| 207 |
+
param_labels: A PyTree that is the same shape or a prefix of the
|
| 208 |
+
parameters/updates (or a function that returns one given the parameters as
|
| 209 |
+
input). The leaves of this PyTree correspond to the keys of the transforms
|
| 210 |
+
(therefore the values at the leaves must be a subset of the keys).
|
| 211 |
+
mask_compatible_extra_args: Whether to also apply the same masking to
|
| 212 |
+
extra_arg fields with the same tree structure as params/updates.
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
A :func:`optax.GradientTransformationExtraArgs` that implements an ``init``
|
| 216 |
+
and ``update`` function.
|
| 217 |
+
"""
|
| 218 |
+
|
| 219 |
+
transforms = {
|
| 220 |
+
k: base.with_extra_args_support(v)
|
| 221 |
+
for k, v in transforms.items()
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
def make_mask(labels, group):
|
| 225 |
+
return jax.tree_util.tree_map(lambda label: label == group, labels)
|
| 226 |
+
|
| 227 |
+
def init_fn(params):
|
| 228 |
+
labels = param_labels(params) if callable(param_labels) else param_labels
|
| 229 |
+
|
| 230 |
+
label_set = set(jax.tree_util.tree_leaves(labels))
|
| 231 |
+
if not label_set.issubset(transforms.keys()):
|
| 232 |
+
raise ValueError('Some parameters have no corresponding transformation.\n'
|
| 233 |
+
f'Parameter labels: {list(sorted(label_set))} \n'
|
| 234 |
+
f'Transforms keys: {list(sorted(transforms.keys()))} \n')
|
| 235 |
+
|
| 236 |
+
inner_states = {
|
| 237 |
+
group: wrappers.masked(
|
| 238 |
+
tx, make_mask(labels, group),
|
| 239 |
+
mask_compatible_extra_args=mask_compatible_extra_args).init(params)
|
| 240 |
+
for group, tx in transforms.items()
|
| 241 |
+
}
|
| 242 |
+
return PartitionState(inner_states)
|
| 243 |
+
|
| 244 |
+
def update_fn(updates, state, params=None, **extra_args):
|
| 245 |
+
labels = param_labels(updates) if callable(param_labels) else param_labels
|
| 246 |
+
new_inner_state = {}
|
| 247 |
+
for group, tx in transforms.items():
|
| 248 |
+
masked_tx = wrappers.masked(
|
| 249 |
+
tx, make_mask(labels, group),
|
| 250 |
+
mask_compatible_extra_args=mask_compatible_extra_args)
|
| 251 |
+
updates, new_inner_state[group] = masked_tx.update(
|
| 252 |
+
updates, state.inner_states[group], params, **extra_args)
|
| 253 |
+
return updates, PartitionState(new_inner_state)
|
| 254 |
+
|
| 255 |
+
return base.GradientTransformationExtraArgs(init_fn, update_fn)
|
testbed/google-deepmind__optax/optax/transforms/_combining_test.py
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for `optax.transforms._combining.py`."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax._src import alias
|
| 25 |
+
from optax._src import base
|
| 26 |
+
from optax._src import transform
|
| 27 |
+
from optax._src import update
|
| 28 |
+
from optax.transforms import _accumulation
|
| 29 |
+
from optax.transforms import _combining
|
| 30 |
+
|
| 31 |
+
STEPS = 50
|
| 32 |
+
LR = 1e-2
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class CombiningTest(chex.TestCase):
|
| 36 |
+
|
| 37 |
+
def setUp(self):
|
| 38 |
+
super().setUp()
|
| 39 |
+
self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
|
| 40 |
+
self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))
|
| 41 |
+
|
| 42 |
+
@chex.all_variants
|
| 43 |
+
def test_chain(self):
|
| 44 |
+
transformations = [
|
| 45 |
+
transform.scale_by_adam(),
|
| 46 |
+
_accumulation.trace(decay=0, nesterov=False),
|
| 47 |
+
transform.scale(-LR)]
|
| 48 |
+
|
| 49 |
+
# Apply updates with chain.
|
| 50 |
+
chain_params = self.init_params
|
| 51 |
+
chained_transforms = _combining.chain(*transformations)
|
| 52 |
+
state = chained_transforms.init(chain_params)
|
| 53 |
+
self.assertIsInstance(state, tuple)
|
| 54 |
+
|
| 55 |
+
@self.variant
|
| 56 |
+
def update_fn(updates, state):
|
| 57 |
+
return chained_transforms.update(updates, state)
|
| 58 |
+
|
| 59 |
+
for _ in range(STEPS):
|
| 60 |
+
updates, state = update_fn(self.per_step_updates, state)
|
| 61 |
+
self.assertIsInstance(state, tuple)
|
| 62 |
+
chain_params = update.apply_updates(chain_params, updates)
|
| 63 |
+
|
| 64 |
+
# Manually apply sequence of transformations.
|
| 65 |
+
manual_params = self.init_params
|
| 66 |
+
states = [t.init(manual_params) for t in transformations]
|
| 67 |
+
for _ in range(STEPS):
|
| 68 |
+
updates = self.per_step_updates
|
| 69 |
+
new_states = []
|
| 70 |
+
for t, s in zip(transformations, states):
|
| 71 |
+
updates, state = t.update(updates, s)
|
| 72 |
+
new_states.append(state)
|
| 73 |
+
manual_params = update.apply_updates(manual_params, updates)
|
| 74 |
+
states = new_states
|
| 75 |
+
|
| 76 |
+
# Check equivalence.
|
| 77 |
+
chex.assert_trees_all_close(manual_params, chain_params, rtol=1e-4)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _map_keys_fn(fn):
|
| 81 |
+
def map_fn(nested_dict):
|
| 82 |
+
return {k: (map_fn(v) if isinstance(v, dict) else fn(k, v))
|
| 83 |
+
for k, v in nested_dict.items()}
|
| 84 |
+
return map_fn
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class ExtraArgsTest(chex.TestCase):
|
| 88 |
+
|
| 89 |
+
def test_extra_args(self):
|
| 90 |
+
def init_fn(params):
|
| 91 |
+
del params
|
| 92 |
+
return tuple()
|
| 93 |
+
|
| 94 |
+
# Arguments required by a transformation should be keyword-only.
|
| 95 |
+
# For example, the loss argument in this transformation.
|
| 96 |
+
def update_fn(updates, state, params=None, *, loss, **extra_args):
|
| 97 |
+
# Extra args should always be accepted.
|
| 98 |
+
del extra_args, params
|
| 99 |
+
assert loss == 1
|
| 100 |
+
return updates, state
|
| 101 |
+
|
| 102 |
+
t = base.GradientTransformationExtraArgs(init_fn, update_fn)
|
| 103 |
+
result = _combining.chain(alias.adam(1e-3), t)
|
| 104 |
+
self.assertIsInstance(result, base.GradientTransformationExtraArgs)
|
| 105 |
+
|
| 106 |
+
params = {'a': 1, 'b': 2}
|
| 107 |
+
state = result.init(params)
|
| 108 |
+
result.update(params, state, loss=1, ignored_kwarg='hi')
|
| 109 |
+
|
| 110 |
+
def test_extra_args_chaining(self):
|
| 111 |
+
def init_fn(params):
|
| 112 |
+
del params
|
| 113 |
+
return {}
|
| 114 |
+
def update_fn(updates, state, params=None):
|
| 115 |
+
del params
|
| 116 |
+
return updates, state
|
| 117 |
+
|
| 118 |
+
# Possible gotcha: Chaining regular gradient transformations results in
|
| 119 |
+
# a transformation that supports extra args.
|
| 120 |
+
t1 = base.GradientTransformation(init_fn, update_fn)
|
| 121 |
+
t2 = _combining.chain(t1, t1)
|
| 122 |
+
self.assertIsInstance(t2, base.GradientTransformation)
|
| 123 |
+
self.assertIsInstance(t2, base.GradientTransformationExtraArgs)
|
| 124 |
+
|
| 125 |
+
t3 = base.with_extra_args_support(t2)
|
| 126 |
+
self.assertIsInstance(t3, base.GradientTransformationExtraArgs)
|
| 127 |
+
|
| 128 |
+
def test_extra_args_positional_params(self):
|
| 129 |
+
def init_fn(params):
|
| 130 |
+
del params
|
| 131 |
+
return tuple()
|
| 132 |
+
|
| 133 |
+
def update_fn(updates, state, params=None):
|
| 134 |
+
assert params is not None
|
| 135 |
+
return updates, state
|
| 136 |
+
|
| 137 |
+
def update_fn_kwargs(updates, state, params=None, **extra_args):
|
| 138 |
+
del extra_args
|
| 139 |
+
assert params is not None
|
| 140 |
+
return updates, state
|
| 141 |
+
|
| 142 |
+
t1 = base.GradientTransformation(init_fn, update_fn)
|
| 143 |
+
t2 = base.GradientTransformationExtraArgs(init_fn, update_fn_kwargs)
|
| 144 |
+
opt = _combining.chain(t1, t2)
|
| 145 |
+
params = {'a': 1, 'b': 2}
|
| 146 |
+
state = opt.init(params)
|
| 147 |
+
opt.update(params, state, params, ignored_kwarg='hi')
|
| 148 |
+
opt.update(params, state, params=params, ignored_kwarg='hi')
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class PartitionTest(chex.TestCase):
|
| 152 |
+
"""Tests for the partition wrapper."""
|
| 153 |
+
|
| 154 |
+
@chex.all_variants
|
| 155 |
+
@parameterized.parameters(True, False)
|
| 156 |
+
def test_partition(self, use_fn):
|
| 157 |
+
params = {'a1': 1., 'b1': 2., 'z1': {'a2': 3., 'z2': {'c1': 4.}}}
|
| 158 |
+
params = jax.tree_util.tree_map(jnp.asarray, params)
|
| 159 |
+
input_updates = jax.tree_util.tree_map(lambda x: x / 10.0, params)
|
| 160 |
+
tx_dict = {'a': transform.scale(-1.0),
|
| 161 |
+
'b': transform.ema(0.0), # stateful
|
| 162 |
+
'c': transform.scale(2.0)}
|
| 163 |
+
param_labels = _map_keys_fn(lambda k, _: k[0])
|
| 164 |
+
if not use_fn:
|
| 165 |
+
param_labels = param_labels(params)
|
| 166 |
+
tx = _combining.partition(tx_dict, param_labels)
|
| 167 |
+
update_fn = self.variant(tx.update)
|
| 168 |
+
state = self.variant(tx.init)(params)
|
| 169 |
+
|
| 170 |
+
correct_update_fn = _map_keys_fn(
|
| 171 |
+
lambda k, v: {'a': -v, 'b': v, 'c': 2.0*v}[k[0]])
|
| 172 |
+
|
| 173 |
+
updates, state = update_fn(input_updates, state, params)
|
| 174 |
+
correct_updates = correct_update_fn(input_updates)
|
| 175 |
+
chex.assert_trees_all_close(updates, correct_updates)
|
| 176 |
+
|
| 177 |
+
# Check repeated application, this time with no params.
|
| 178 |
+
correct_updates = correct_update_fn(correct_updates)
|
| 179 |
+
updates, _ = update_fn(updates, state)
|
| 180 |
+
chex.assert_trees_all_close(updates, correct_updates)
|
| 181 |
+
|
| 182 |
+
def test_extra_args(self):
|
| 183 |
+
|
| 184 |
+
class ArgNotEqual1Error(ValueError):
|
| 185 |
+
"""Raised when argument not set as expected."""
|
| 186 |
+
|
| 187 |
+
def init(params):
|
| 188 |
+
return {'mu': params}
|
| 189 |
+
|
| 190 |
+
def update_with_arg(updates, state, params=None, *, arg, **extra_args):
|
| 191 |
+
del params, extra_args
|
| 192 |
+
if arg != 1:
|
| 193 |
+
raise ArgNotEqual1Error()
|
| 194 |
+
return updates, state
|
| 195 |
+
|
| 196 |
+
def update_without_arg(updates, state, params=None):
|
| 197 |
+
del params
|
| 198 |
+
return updates, state
|
| 199 |
+
|
| 200 |
+
opt_no_arg = base.GradientTransformation(init, update_without_arg)
|
| 201 |
+
opt_extra_arg = base.GradientTransformationExtraArgs(init, update_with_arg)
|
| 202 |
+
|
| 203 |
+
opt = _combining.partition(
|
| 204 |
+
{
|
| 205 |
+
'a': opt_no_arg,
|
| 206 |
+
'b': opt_extra_arg,
|
| 207 |
+
},
|
| 208 |
+
('a', 'b'),
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
fake_params = ({'u': jnp.array([1])}, {'v': jnp.array([1])})
|
| 212 |
+
state = opt.init(fake_params)
|
| 213 |
+
|
| 214 |
+
with self.assertRaises(TypeError):
|
| 215 |
+
opt.update(fake_params, state)
|
| 216 |
+
with self.assertRaises(ArgNotEqual1Error):
|
| 217 |
+
opt.update(fake_params, state, arg=2, ignored_kwarg='hi')
|
| 218 |
+
opt.update(fake_params, state, arg=1, ignored_kwarg='hi')
|
| 219 |
+
|
| 220 |
+
@parameterized.parameters(list, tuple, dict)
|
| 221 |
+
def test_empty(self, container):
|
| 222 |
+
init_fn, update_fn = _combining.partition(
|
| 223 |
+
{0: alias.sgd(1.)}, lambda _: 0)
|
| 224 |
+
updates, _ = update_fn(container(), init_fn(container()))
|
| 225 |
+
self.assertEqual(updates, container())
|
| 226 |
+
|
| 227 |
+
@chex.all_variants
|
| 228 |
+
@parameterized.parameters(
|
| 229 |
+
(False, False), (False, True), (True, False), (True, True))
|
| 230 |
+
def test_labels_mismatch(self, use_extra_label, use_fn):
|
| 231 |
+
# The labels from label_fn must be a subet of the keys for the tx.
|
| 232 |
+
params = {'a': 1., 'b': [2., 3.], 'c': {'d': 4., 'e': (5., 6.)}}
|
| 233 |
+
params = jax.tree_util.tree_map(jnp.asarray, params)
|
| 234 |
+
label_tree = {'a': 0, 'b': [1, 0], 'c': 1} # prefix of params
|
| 235 |
+
|
| 236 |
+
if use_extra_label:
|
| 237 |
+
label_tree['a'] = 3
|
| 238 |
+
|
| 239 |
+
transforms = {0: alias.sgd(1.),
|
| 240 |
+
1: alias.adam(1., b1=0., b2=0.),
|
| 241 |
+
2: _accumulation.trace(1.0)}
|
| 242 |
+
init_fn, update_fn = _combining.partition(
|
| 243 |
+
transforms, (lambda _: label_tree) if use_fn else label_tree)
|
| 244 |
+
|
| 245 |
+
if use_extra_label:
|
| 246 |
+
with self.assertRaises(ValueError):
|
| 247 |
+
self.variant(init_fn)(params)
|
| 248 |
+
else:
|
| 249 |
+
state = self.variant(init_fn)(params)
|
| 250 |
+
updates = jax.tree_util.tree_map(lambda x: x / 10.0, params)
|
| 251 |
+
self.variant(update_fn)(updates, state)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def scale_by_loss():
|
| 255 |
+
"""Scale the gradient by the absolute value of the loss."""
|
| 256 |
+
|
| 257 |
+
def update_fn(updates, state, params, *, loss, **extra_args):
|
| 258 |
+
del params, extra_args
|
| 259 |
+
updates = jax.tree_util.tree_map(
|
| 260 |
+
lambda u: u / loss, updates)
|
| 261 |
+
return updates, state
|
| 262 |
+
|
| 263 |
+
return base.GradientTransformationExtraArgs(base.init_empty_state, update_fn)
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class NamedChainTest(absltest.TestCase):
|
| 267 |
+
|
| 268 |
+
def test_named_chain(self):
|
| 269 |
+
tx = _combining.named_chain(
|
| 270 |
+
('scale', transform.scale(0.1)),
|
| 271 |
+
('scale_loss', scale_by_loss()),
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
params = {'a': jnp.ones((4,))}
|
| 275 |
+
grads = params
|
| 276 |
+
|
| 277 |
+
opt_state = tx.init(params)
|
| 278 |
+
updates, _ = tx.update(grads, opt_state, params, loss=0.1)
|
| 279 |
+
|
| 280 |
+
chex.assert_trees_all_close(updates, {'a': jnp.ones((4,))})
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
if __name__ == '__main__':
|
| 284 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/transforms/_conditionality.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Wrappers that allow transformations to be applied conditionally."""
|
| 16 |
+
|
| 17 |
+
from typing import Any, NamedTuple, Protocol
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
from jax import lax
|
| 21 |
+
from jax import tree_util as jtu
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax import tree_utils as otu
|
| 25 |
+
from optax._src import base
|
| 26 |
+
from optax._src import numerics
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class ConditionFn(Protocol):
|
| 30 |
+
"""Condition function for conditional transformations."""
|
| 31 |
+
|
| 32 |
+
def __call__(
|
| 33 |
+
self,
|
| 34 |
+
step: chex.Array,
|
| 35 |
+
**extra_args: Any,
|
| 36 |
+
) -> chex.Array:
|
| 37 |
+
"""Update function with optional extra arguments.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
step: a counter (array of shape [] and dtype ``int32``)
|
| 41 |
+
**extra_args: Additional keyword arguments passed to this condition fn.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
a boolean array of shape [] and dtype ``bool`` indicating whether the
|
| 45 |
+
inner transformation should be called.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class ConditionallyTransformState(NamedTuple):
|
| 50 |
+
"""Maintains inner transform state and adds a step counter."""
|
| 51 |
+
inner_state: Any
|
| 52 |
+
step: chex.Array
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def conditionally_transform(
|
| 56 |
+
inner: base.GradientTransformation,
|
| 57 |
+
should_transform_fn: ConditionFn,
|
| 58 |
+
forward_extra_args: bool = False,
|
| 59 |
+
) -> base.GradientTransformationExtraArgs:
|
| 60 |
+
"""Calls the inner update function only at certain steps.
|
| 61 |
+
|
| 62 |
+
Creates a transformation wrapper that conditionally applies the inner gradient
|
| 63 |
+
transformation, and if the condition is not met, just passes the updates and
|
| 64 |
+
inner state through unchanged. The behaviour is controlled by a user specified
|
| 65 |
+
function ``should_transform_fn`` that is called by ``conditionally_transform``
|
| 66 |
+
passing as input a counter of the number of times that the ``update`` function
|
| 67 |
+
has been previously called, the user specified function must returns a boolean
|
| 68 |
+
controlling whether the inner transformation should be called.
|
| 69 |
+
|
| 70 |
+
WARNING: if instead you want to set the ``updates`` to zero when the condition
|
| 71 |
+
is not met, you can use the ``conditionally_mask`` wrapper.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
inner: the inner transformation.
|
| 75 |
+
should_transform_fn: function takes in a ``step`` counter (array of shape []
|
| 76 |
+
and dtype ``int32``), and returns a boolean array of shape []. If
|
| 77 |
+
``forward_extra_args`` is set to True, any extra arguments are also
|
| 78 |
+
forwarded to the ``should_transform_fn``.
|
| 79 |
+
forward_extra_args: forward extra args to ``should_transform_fn``.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
A new ``GradientTransformationExtraArgs``.
|
| 83 |
+
|
| 84 |
+
.. versionadded:: 0.2.3
|
| 85 |
+
"""
|
| 86 |
+
inner = base.with_extra_args_support(inner)
|
| 87 |
+
|
| 88 |
+
def init_fn(params):
|
| 89 |
+
return ConditionallyTransformState(
|
| 90 |
+
inner_state=inner.init(params), step=jnp.zeros([], dtype=jnp.int32))
|
| 91 |
+
|
| 92 |
+
def update_fn(updates, state, params=None, **extra_args):
|
| 93 |
+
|
| 94 |
+
def do_update(_):
|
| 95 |
+
return inner.update(updates, state.inner_state, params, **extra_args)
|
| 96 |
+
|
| 97 |
+
def reject_update(_):
|
| 98 |
+
return updates, state.inner_state
|
| 99 |
+
|
| 100 |
+
condition_kwargs = extra_args if forward_extra_args else {}
|
| 101 |
+
updates, new_inner_state = lax.cond(
|
| 102 |
+
should_transform_fn(state.step, **condition_kwargs),
|
| 103 |
+
do_update, reject_update, operand=None)
|
| 104 |
+
return updates, ConditionallyTransformState(
|
| 105 |
+
new_inner_state, numerics.safe_int32_increment(state.step))
|
| 106 |
+
|
| 107 |
+
return base.GradientTransformationExtraArgs(init_fn, update_fn)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class ConditionallyMaskState(NamedTuple):
|
| 111 |
+
step: chex.Array
|
| 112 |
+
inner_state: base.OptState
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def conditionally_mask(
|
| 116 |
+
inner: base.GradientTransformation,
|
| 117 |
+
should_transform_fn: ConditionFn,
|
| 118 |
+
forward_extra_args: bool = False,
|
| 119 |
+
) -> base.GradientTransformationExtraArgs:
|
| 120 |
+
"""Calls the inner update function only at certain steps.
|
| 121 |
+
|
| 122 |
+
Creates a transformation wrapper that conditionally applies the inner gradient
|
| 123 |
+
transformation, and if the condition is not met, the updates are set to 0,
|
| 124 |
+
while the inner state is passed through unchanged. The behaviour is controlled
|
| 125 |
+
by a user specified function ``should_transform_fn`` that is called
|
| 126 |
+
by ``conditionally_transform`` passing as input a counter of the number of
|
| 127 |
+
times that the ``update`` function has been previously called, the user
|
| 128 |
+
specified function must returns a boolean controlling whether the inner
|
| 129 |
+
transformation should be called.
|
| 130 |
+
|
| 131 |
+
WARNING: if instead you want to leave ``updates`` unchanged when the condition
|
| 132 |
+
is not met, you can use the ``conditionally_transform`` wrapper.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
inner: the inner transformation.
|
| 136 |
+
should_transform_fn: function takes in a step counter (array of shape []
|
| 137 |
+
and dtype ``int32``), and returns a boolean array of shape []. If
|
| 138 |
+
``forward_extra_args`` is set to True, any extra arguments are also
|
| 139 |
+
forwarded to the ``should_transform_fn``.
|
| 140 |
+
forward_extra_args: forward extra args to ``should_transform_fn``.
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
A new ``GradientTransformationExtraArgs``.
|
| 144 |
+
|
| 145 |
+
.. versionadded:: 0.2.3
|
| 146 |
+
"""
|
| 147 |
+
inner = base.with_extra_args_support(inner)
|
| 148 |
+
|
| 149 |
+
def init_fn(params):
|
| 150 |
+
return ConditionallyMaskState(
|
| 151 |
+
step=jnp.zeros([], jnp.int32), inner_state=inner.init(params)
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
def update_fn(updates, state, params=None, **extra_args):
|
| 155 |
+
|
| 156 |
+
def do_update(_):
|
| 157 |
+
return inner.update(updates, state.inner_state, params, **extra_args)
|
| 158 |
+
|
| 159 |
+
def reject_update(_):
|
| 160 |
+
return otu.tree_zeros_like(updates), state.inner_state
|
| 161 |
+
|
| 162 |
+
condition_kwargs = extra_args if forward_extra_args else {}
|
| 163 |
+
updates, new_inner_state = lax.cond(
|
| 164 |
+
should_transform_fn(state.step, **condition_kwargs),
|
| 165 |
+
do_update, reject_update, operand=None)
|
| 166 |
+
|
| 167 |
+
return updates, ConditionallyMaskState(
|
| 168 |
+
step=numerics.safe_int32_increment(state.step),
|
| 169 |
+
inner_state=new_inner_state,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
return base.GradientTransformationExtraArgs(init_fn, update_fn)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class ApplyIfFiniteState(NamedTuple):
|
| 176 |
+
"""State of the `GradientTransformation` returned by `apply_if_finite`.
|
| 177 |
+
|
| 178 |
+
Attributes:
|
| 179 |
+
notfinite_count: Number of consecutive gradient updates containing an Inf or
|
| 180 |
+
a NaN. This number is reset to 0 whenever a gradient update without an Inf
|
| 181 |
+
or a NaN is done.
|
| 182 |
+
last_finite: Whether or not the last gradient update contained an Inf or a
|
| 183 |
+
NaN.
|
| 184 |
+
total_notfinite: Total number of gradient updates containing an Inf or
|
| 185 |
+
a NaN since this optimizer was initialised. This number is never reset.
|
| 186 |
+
inner_state: The state of the inner `GradientTransformation`.
|
| 187 |
+
|
| 188 |
+
"""
|
| 189 |
+
notfinite_count: Any
|
| 190 |
+
last_finite: Any
|
| 191 |
+
total_notfinite: Any
|
| 192 |
+
inner_state: Any
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def apply_if_finite(
|
| 196 |
+
inner: base.GradientTransformation,
|
| 197 |
+
max_consecutive_errors: int
|
| 198 |
+
) -> base.GradientTransformation:
|
| 199 |
+
"""A function that wraps an optimizer to make it robust to a few NaNs or Infs.
|
| 200 |
+
|
| 201 |
+
The purpose of this function is to prevent any optimization to happen if the
|
| 202 |
+
gradients contain NaNs or Infs. That is, when a NaN or Inf is detected in the
|
| 203 |
+
gradients, the wrapped optimizer ignores that gradient update. If the NaNs or
|
| 204 |
+
Infs persist after a given number of updates, the wrapped optimizer gives up
|
| 205 |
+
and accepts the update.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
inner: Inner transformation to be wrapped.
|
| 209 |
+
max_consecutive_errors: Maximum number of consecutive gradient updates
|
| 210 |
+
containing NaNs or Infs that the wrapped optimizer will ignore. After
|
| 211 |
+
that many ignored updates, the optimizer will give up and accept.
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
New ``GradientTransformationExtraArgs``.
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
inner = base.with_extra_args_support(inner)
|
| 218 |
+
|
| 219 |
+
def init(params):
|
| 220 |
+
return ApplyIfFiniteState(
|
| 221 |
+
notfinite_count=jnp.zeros([], jnp.int32),
|
| 222 |
+
last_finite=jnp.array(True, jnp.bool_),
|
| 223 |
+
total_notfinite=jnp.zeros([], jnp.int32),
|
| 224 |
+
inner_state=inner.init(params))
|
| 225 |
+
|
| 226 |
+
def update(updates, state, params=None, **extra_args):
|
| 227 |
+
inner_state = state.inner_state
|
| 228 |
+
flat_updates = jtu.tree_flatten(updates)[0]
|
| 229 |
+
isfinite = jnp.all(
|
| 230 |
+
jnp.array([jnp.all(jnp.isfinite(p)) for p in flat_updates]))
|
| 231 |
+
notfinite_count = jnp.where(
|
| 232 |
+
isfinite, jnp.zeros([], jnp.int32),
|
| 233 |
+
numerics.safe_int32_increment(state.notfinite_count))
|
| 234 |
+
|
| 235 |
+
def do_update(_):
|
| 236 |
+
return inner.update(updates, inner_state, params, **extra_args)
|
| 237 |
+
|
| 238 |
+
def reject_update(_):
|
| 239 |
+
return otu.tree_zeros_like(updates), inner_state
|
| 240 |
+
|
| 241 |
+
updates, new_inner_state = lax.cond(
|
| 242 |
+
jnp.logical_or(isfinite, notfinite_count > max_consecutive_errors),
|
| 243 |
+
do_update, reject_update, operand=None)
|
| 244 |
+
|
| 245 |
+
return updates, ApplyIfFiniteState(
|
| 246 |
+
notfinite_count=notfinite_count,
|
| 247 |
+
last_finite=isfinite,
|
| 248 |
+
total_notfinite=jnp.where(
|
| 249 |
+
isfinite, state.total_notfinite,
|
| 250 |
+
numerics.safe_int32_increment(state.total_notfinite)),
|
| 251 |
+
inner_state=new_inner_state)
|
| 252 |
+
|
| 253 |
+
return base.GradientTransformationExtraArgs(init=init, update=update)
|
testbed/google-deepmind__optax/optax/transforms/_conditionality_test.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.transforms._conditionality."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
import chex
|
| 21 |
+
import jax
|
| 22 |
+
import jax.numpy as jnp
|
| 23 |
+
|
| 24 |
+
from optax._src import alias
|
| 25 |
+
from optax._src import base
|
| 26 |
+
from optax._src import combine
|
| 27 |
+
from optax._src import transform
|
| 28 |
+
from optax._src import update
|
| 29 |
+
from optax.transforms import _conditionality
|
| 30 |
+
from optax.transforms import _constraining
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _build_sgd():
|
| 34 |
+
return alias.sgd(1.)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _build_stateful_sgd():
|
| 38 |
+
# This SGD behaves like _build_sgd but also tests the optimizer state. The
|
| 39 |
+
# momentum is set to zero rather than None so that the momentum terms are
|
| 40 |
+
# calculated, but do not change the results.
|
| 41 |
+
return alias.sgd(1., momentum=0.)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _build_sgd_extra_args():
|
| 45 |
+
|
| 46 |
+
def init_fn(params):
|
| 47 |
+
del params
|
| 48 |
+
return {'foo': 1}
|
| 49 |
+
|
| 50 |
+
def update_fn(grads, state, params=None, *, foo=None, **extra_args):
|
| 51 |
+
del extra_args, foo, params
|
| 52 |
+
return grads, state
|
| 53 |
+
|
| 54 |
+
t = base.GradientTransformationExtraArgs(init_fn, update_fn)
|
| 55 |
+
return combine.chain(_build_sgd(), t)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class ConditionalityTest(parameterized.TestCase):
|
| 59 |
+
|
| 60 |
+
@chex.variants(with_jit=True, without_jit=True, with_pmap=True)
|
| 61 |
+
@parameterized.named_parameters(
|
| 62 |
+
('sgd', _build_sgd),
|
| 63 |
+
('stateful_sgd', _build_stateful_sgd),
|
| 64 |
+
('sgd_extra_args', _build_sgd_extra_args),
|
| 65 |
+
)
|
| 66 |
+
def test_apply_if_finite(self, opt_builder):
|
| 67 |
+
one = jnp.array(1.)
|
| 68 |
+
nan = jnp.array(jnp.nan)
|
| 69 |
+
|
| 70 |
+
def fn(p, x):
|
| 71 |
+
return p * x
|
| 72 |
+
|
| 73 |
+
params = jnp.array(0.)
|
| 74 |
+
opt = _conditionality.apply_if_finite(opt_builder(), 2)
|
| 75 |
+
state = opt.init(params)
|
| 76 |
+
grads_fn = jax.grad(self.variant(fn))
|
| 77 |
+
# Do one successful param update
|
| 78 |
+
grads = grads_fn(params, one)
|
| 79 |
+
updates, state = opt.update(grads, state, params)
|
| 80 |
+
params = update.apply_updates(params, updates)
|
| 81 |
+
# We know exactly what should be the value of params since we are
|
| 82 |
+
# effectively using sgd in all cases.
|
| 83 |
+
self.assertEqual(-1., float(jax.tree_util.tree_flatten(params)[0][0]))
|
| 84 |
+
self.assertTrue(bool(getattr(state, 'last_finite')))
|
| 85 |
+
# Check 2 rejected param updates
|
| 86 |
+
for step in range(2):
|
| 87 |
+
grads = grads_fn(params, nan)
|
| 88 |
+
updates, state = opt.update(grads, state, params)
|
| 89 |
+
params = update.apply_updates(params, updates)
|
| 90 |
+
self.assertEqual(-1., float(jax.tree_util.tree_flatten(params)[0][0]))
|
| 91 |
+
self.assertFalse(bool(getattr(state, 'last_finite')))
|
| 92 |
+
self.assertEqual(step + 1, int(getattr(state, 'notfinite_count')))
|
| 93 |
+
# Next successful param update
|
| 94 |
+
grads = grads_fn(params, one)
|
| 95 |
+
updates, state = opt.update(grads, state, params)
|
| 96 |
+
params = update.apply_updates(params, updates)
|
| 97 |
+
self.assertEqual(-2., float(jax.tree_util.tree_flatten(params)[0][0]))
|
| 98 |
+
self.assertTrue(bool(getattr(state, 'last_finite')))
|
| 99 |
+
# Again 2 rejected param updates
|
| 100 |
+
for step in range(2):
|
| 101 |
+
grads = grads_fn(params, nan)
|
| 102 |
+
updates, state = opt.update(grads, state, params)
|
| 103 |
+
params = update.apply_updates(params, updates)
|
| 104 |
+
self.assertEqual(-2., float(jax.tree_util.tree_flatten(params)[0][0]))
|
| 105 |
+
self.assertFalse(bool(getattr(state, 'last_finite')))
|
| 106 |
+
self.assertEqual(step + 1, int(getattr(state, 'notfinite_count')))
|
| 107 |
+
# Next param update with NaN is accepted since we reached maximum
|
| 108 |
+
grads = grads_fn(params, nan)
|
| 109 |
+
updates, state = opt.update(grads, state, params)
|
| 110 |
+
params = update.apply_updates(params, updates)
|
| 111 |
+
self.assertTrue(bool(jnp.isnan(jax.tree_util.tree_flatten(params)[0][0])))
|
| 112 |
+
self.assertEqual(5, int(getattr(state, 'total_notfinite')))
|
| 113 |
+
|
| 114 |
+
def test_apply_if_finite_pmap(self):
|
| 115 |
+
# Unlike in `test_apply_if_finite`:
|
| 116 |
+
# * pmap is applied to the gradient computation and the optimisation;
|
| 117 |
+
# * the NaNs are caused inside the function and do not come from the inputs.
|
| 118 |
+
half = jnp.ones([1]) / 2.
|
| 119 |
+
two = jnp.ones([1]) * 2. # Causes a NaN in arctanh
|
| 120 |
+
def fn(p, x):
|
| 121 |
+
return jnp.arctanh(x) * p
|
| 122 |
+
|
| 123 |
+
opt = _conditionality.apply_if_finite(alias.sgd(1.), 2)
|
| 124 |
+
def fn_update(params, opt_state, x):
|
| 125 |
+
grads = jax.grad(fn)(params, x)
|
| 126 |
+
grads = jax.lax.psum(grads, axis_name='i')
|
| 127 |
+
updates, new_opt_state = opt.update(grads, opt_state, params)
|
| 128 |
+
new_params = update.apply_updates(params, updates)
|
| 129 |
+
return new_params, new_opt_state
|
| 130 |
+
fn_update = jax.pmap(fn_update, axis_name='i')
|
| 131 |
+
|
| 132 |
+
params = jnp.array(0.)
|
| 133 |
+
opt_state = opt.init(params)
|
| 134 |
+
params = jax.tree_util.tree_map(lambda x: x[None], params)
|
| 135 |
+
opt_state = jax.tree_util.tree_map(lambda x: x[None], opt_state)
|
| 136 |
+
# Do one successful param update
|
| 137 |
+
params, opt_state = fn_update(params, opt_state, half)
|
| 138 |
+
self.assertTrue(bool(opt_state.last_finite))
|
| 139 |
+
# Check 2 rejected param updates
|
| 140 |
+
for step in range(2):
|
| 141 |
+
params, opt_state = fn_update(params, opt_state, two)
|
| 142 |
+
self.assertFalse(bool(opt_state.last_finite))
|
| 143 |
+
self.assertEqual(step + 1, opt_state.notfinite_count.item())
|
| 144 |
+
# Next successful param update
|
| 145 |
+
params, opt_state = fn_update(params, opt_state, half)
|
| 146 |
+
self.assertTrue(bool(opt_state.last_finite))
|
| 147 |
+
# Again 2 rejected param updates
|
| 148 |
+
for step in range(2):
|
| 149 |
+
params, opt_state = fn_update(params, opt_state, two)
|
| 150 |
+
self.assertFalse(bool(opt_state.last_finite))
|
| 151 |
+
self.assertEqual(step + 1, opt_state.notfinite_count.item())
|
| 152 |
+
# Next param update with NaN is accepted since we reached maximum
|
| 153 |
+
_, opt_state = fn_update(params, opt_state, two)
|
| 154 |
+
self.assertEqual(5, opt_state.total_notfinite.item())
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class ConditionallyTransformTest(chex.TestCase):
|
| 158 |
+
"""Tests for the conditionally_transform wrapper."""
|
| 159 |
+
|
| 160 |
+
NUM_STEPS = 3
|
| 161 |
+
|
| 162 |
+
@chex.all_variants
|
| 163 |
+
def test_stateless_inner(self):
|
| 164 |
+
params = jnp.zeros([])
|
| 165 |
+
grads = jnp.ones([])
|
| 166 |
+
|
| 167 |
+
def should_update(step):
|
| 168 |
+
return step < ConditionallyTransformTest.NUM_STEPS
|
| 169 |
+
|
| 170 |
+
opt = _conditionality.conditionally_transform(
|
| 171 |
+
transform.scale(2.), should_update)
|
| 172 |
+
state = opt.init(params)
|
| 173 |
+
update_fn = self.variant(opt.update)
|
| 174 |
+
for _ in range(ConditionallyTransformTest.NUM_STEPS):
|
| 175 |
+
updates, state = update_fn(grads, state)
|
| 176 |
+
self.assertEqual(updates, 2.)
|
| 177 |
+
# Further updates stop calling the inner optimiser.
|
| 178 |
+
for _ in range(5):
|
| 179 |
+
updates, state = update_fn(grads, state)
|
| 180 |
+
self.assertEqual(updates, 1.)
|
| 181 |
+
|
| 182 |
+
@chex.all_variants
|
| 183 |
+
def test_statefull_inner(self):
|
| 184 |
+
params = jnp.zeros([])
|
| 185 |
+
grads_with_nan = jnp.array(float('nan'))
|
| 186 |
+
grads = jnp.ones([])
|
| 187 |
+
|
| 188 |
+
def should_update(step):
|
| 189 |
+
return step < ConditionallyTransformTest.NUM_STEPS
|
| 190 |
+
|
| 191 |
+
opt = _conditionality.conditionally_transform(
|
| 192 |
+
_constraining.zero_nans(), should_update)
|
| 193 |
+
state = opt.init(params)
|
| 194 |
+
update_fn = self.variant(opt.update)
|
| 195 |
+
for _ in range(ConditionallyTransformTest.NUM_STEPS - 1):
|
| 196 |
+
updates, state = update_fn(grads_with_nan, state)
|
| 197 |
+
self.assertEqual(updates, 0.)
|
| 198 |
+
self.assertEqual(state.inner_state.found_nan, True)
|
| 199 |
+
updates, state = update_fn(grads, state)
|
| 200 |
+
self.assertEqual(updates, 1.)
|
| 201 |
+
self.assertEqual(state.inner_state.found_nan, False)
|
| 202 |
+
# Further updates stop calling the inner optimiser.
|
| 203 |
+
for _ in range(5):
|
| 204 |
+
updates, state = update_fn(grads_with_nan, state)
|
| 205 |
+
# Warning: do not use assertEqual with a NaN as NaN == NaN returns False.
|
| 206 |
+
self.assertTrue(jnp.isnan(updates))
|
| 207 |
+
# Inner state is not be updated.
|
| 208 |
+
self.assertEqual(state.inner_state.found_nan, False)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class ConditionallyMaskTest(chex.TestCase):
|
| 212 |
+
"""Tests for the conditionally_mask wrapper."""
|
| 213 |
+
|
| 214 |
+
NUM_STEPS = 3
|
| 215 |
+
MIN_LOSS = 0.1
|
| 216 |
+
|
| 217 |
+
@chex.all_variants
|
| 218 |
+
def test_stateless_inner(self):
|
| 219 |
+
params = jnp.zeros([])
|
| 220 |
+
grads = jnp.ones([])
|
| 221 |
+
|
| 222 |
+
def should_update(step):
|
| 223 |
+
return step < ConditionallyMaskTest.NUM_STEPS
|
| 224 |
+
|
| 225 |
+
opt = _conditionality.conditionally_mask(transform.scale(2.), should_update)
|
| 226 |
+
state = opt.init(params)
|
| 227 |
+
update_fn = self.variant(opt.update)
|
| 228 |
+
for _ in range(ConditionallyMaskTest.NUM_STEPS):
|
| 229 |
+
updates, state = update_fn(grads, state)
|
| 230 |
+
self.assertEqual(updates, 2.)
|
| 231 |
+
# Further updates stop calling the inner optimiser.
|
| 232 |
+
for _ in range(5):
|
| 233 |
+
updates, state = update_fn(grads, state)
|
| 234 |
+
self.assertEqual(updates, 0.)
|
| 235 |
+
|
| 236 |
+
@chex.all_variants
|
| 237 |
+
def test_statefull_inner(self):
|
| 238 |
+
params = jnp.zeros([])
|
| 239 |
+
grads_with_nan = jnp.array(float('nan'))
|
| 240 |
+
grads = jnp.ones([])
|
| 241 |
+
|
| 242 |
+
def should_update(step):
|
| 243 |
+
return step < ConditionallyMaskTest.NUM_STEPS
|
| 244 |
+
|
| 245 |
+
opt = _conditionality.conditionally_mask(
|
| 246 |
+
_constraining.zero_nans(), should_update)
|
| 247 |
+
state = opt.init(params)
|
| 248 |
+
update_fn = self.variant(opt.update)
|
| 249 |
+
for _ in range(ConditionallyMaskTest.NUM_STEPS - 1):
|
| 250 |
+
updates, state = update_fn(grads_with_nan, state)
|
| 251 |
+
self.assertEqual(updates, 0.)
|
| 252 |
+
self.assertEqual(state.inner_state.found_nan, True)
|
| 253 |
+
updates, state = update_fn(grads, state)
|
| 254 |
+
self.assertEqual(updates, 1.)
|
| 255 |
+
self.assertEqual(state.inner_state.found_nan, False)
|
| 256 |
+
# Further updates stop calling the inner optimiser.
|
| 257 |
+
for _ in range(5):
|
| 258 |
+
updates, state = update_fn(grads_with_nan, state)
|
| 259 |
+
self.assertEqual(updates, 0.)
|
| 260 |
+
# Inner state is not be updated.
|
| 261 |
+
self.assertEqual(state.inner_state.found_nan, False)
|
| 262 |
+
|
| 263 |
+
@chex.all_variants
|
| 264 |
+
def test_stateless_inner_with_extra_args(self):
|
| 265 |
+
params = jnp.zeros([])
|
| 266 |
+
grads = jnp.ones([])
|
| 267 |
+
|
| 268 |
+
def should_update(step, loss, **extra_args):
|
| 269 |
+
del step, extra_args
|
| 270 |
+
return loss > ConditionallyMaskTest.MIN_LOSS
|
| 271 |
+
|
| 272 |
+
opt = _conditionality.conditionally_mask(
|
| 273 |
+
transform.scale(2.), should_update, forward_extra_args=True)
|
| 274 |
+
state = opt.init(params)
|
| 275 |
+
update_fn = self.variant(opt.update)
|
| 276 |
+
for _ in range(ConditionallyMaskTest.NUM_STEPS):
|
| 277 |
+
updates, state = update_fn(grads, state, loss=0.2)
|
| 278 |
+
self.assertEqual(updates, 2.)
|
| 279 |
+
# Further updates stop calling the inner optimiser.
|
| 280 |
+
for _ in range(5):
|
| 281 |
+
updates, state = update_fn(grads, state, loss=0.)
|
| 282 |
+
self.assertEqual(updates, 0.)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
if __name__ == '__main__':
|
| 286 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/transforms/_constraining.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Gradient transformations used to enforce specific constraints."""
|
| 16 |
+
|
| 17 |
+
from typing import Any, NamedTuple
|
| 18 |
+
|
| 19 |
+
from jax import tree_util as jtu
|
| 20 |
+
import jax.numpy as jnp
|
| 21 |
+
|
| 22 |
+
from optax._src import base
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
NonNegativeParamsState = base.EmptyState
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def keep_params_nonnegative() -> base.GradientTransformation:
|
| 29 |
+
"""Modifies the updates to keep parameters non-negative, i.e. >= 0.
|
| 30 |
+
|
| 31 |
+
This transformation ensures that parameters after the update will be
|
| 32 |
+
larger than or equal to zero.
|
| 33 |
+
In a chain of transformations, this should be the last one.
|
| 34 |
+
|
| 35 |
+
WARNING: the transformation expects input params to be non-negative.
|
| 36 |
+
When params is negative the transformed update will move them to 0.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
A `GradientTransformation` object.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def init_fn(params):
|
| 43 |
+
del params
|
| 44 |
+
return NonNegativeParamsState()
|
| 45 |
+
|
| 46 |
+
def update_fn(updates, state, params):
|
| 47 |
+
if params is None:
|
| 48 |
+
raise ValueError(base.NO_PARAMS_MSG)
|
| 49 |
+
|
| 50 |
+
updates = jtu.tree_map(
|
| 51 |
+
lambda p, u: jnp.where((p + u) < 0., -p, u), params, updates)
|
| 52 |
+
return updates, state
|
| 53 |
+
|
| 54 |
+
return base.GradientTransformation(init_fn, update_fn)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class ZeroNansState(NamedTuple):
|
| 58 |
+
"""Contains a tree.
|
| 59 |
+
|
| 60 |
+
The entry `found_nan` has the same tree structure as that of the parameters.
|
| 61 |
+
Each leaf is a single boolean which contains True iff a NaN was detected in
|
| 62 |
+
the corresponding parameter array at the last call to `update`.
|
| 63 |
+
"""
|
| 64 |
+
found_nan: Any
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def zero_nans() -> base.GradientTransformation:
|
| 68 |
+
"""A transformation which replaces NaNs with 0.
|
| 69 |
+
|
| 70 |
+
The state of the transformation has the same tree structure as that of the
|
| 71 |
+
parameters. Each leaf is a single boolean which contains True iff a NaN was
|
| 72 |
+
detected in the corresponding parameter array at the last call to ``update``.
|
| 73 |
+
This state is not used by the transformation internally, but lets users be
|
| 74 |
+
aware when NaNs have been zeroed out.
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
A `GradientTransformation`.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
def init_fn(params):
|
| 81 |
+
return ZeroNansState(
|
| 82 |
+
found_nan=jtu.tree_map(
|
| 83 |
+
lambda p: jnp.array(False, dtype=jnp.bool_), params))
|
| 84 |
+
|
| 85 |
+
def update_fn(updates, opt_state, params=None):
|
| 86 |
+
del params, opt_state
|
| 87 |
+
opt_state = ZeroNansState(
|
| 88 |
+
found_nan=jtu.tree_map(lambda p: jnp.any(jnp.isnan(p)), updates))
|
| 89 |
+
updates = jtu.tree_map(
|
| 90 |
+
lambda p: jnp.where(jnp.isnan(p), jnp.zeros_like(p), p), updates)
|
| 91 |
+
return updates, opt_state
|
| 92 |
+
|
| 93 |
+
return base.GradientTransformation(init=init_fn, update=update_fn)
|
testbed/google-deepmind__optax/optax/transforms/_constraining_test.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.transforms._constraining."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
import chex
|
| 19 |
+
import jax.numpy as jnp
|
| 20 |
+
|
| 21 |
+
from optax._src import combine
|
| 22 |
+
from optax._src import transform
|
| 23 |
+
from optax._src import update
|
| 24 |
+
from optax.transforms import _accumulation
|
| 25 |
+
from optax.transforms import _constraining
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
STEPS = 50
|
| 29 |
+
LR = 1e-2
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class ConstraintsTest(chex.TestCase):
|
| 33 |
+
|
| 34 |
+
def test_keep_params_nonnegative(self):
|
| 35 |
+
grads = (jnp.array([500., -500., 0.]),
|
| 36 |
+
jnp.array([500., -500., 0.]),
|
| 37 |
+
jnp.array([500., -500., 0.]))
|
| 38 |
+
|
| 39 |
+
params = (jnp.array([-1., -1., -1.]),
|
| 40 |
+
jnp.array([1., 1., 1.]),
|
| 41 |
+
jnp.array([0., 0., 0.]))
|
| 42 |
+
|
| 43 |
+
# vanilla sgd
|
| 44 |
+
opt = combine.chain(
|
| 45 |
+
_accumulation.trace(decay=0, nesterov=False),
|
| 46 |
+
transform.scale(-LR))
|
| 47 |
+
opt_state = opt.init(params)
|
| 48 |
+
|
| 49 |
+
updates, _ = opt.update(grads, opt_state, params)
|
| 50 |
+
new_params = update.apply_updates(params, updates)
|
| 51 |
+
|
| 52 |
+
chex.assert_trees_all_close(new_params, (jnp.array([-6., 4., -1.]),
|
| 53 |
+
jnp.array([-4., 6., 1.]),
|
| 54 |
+
jnp.array([-5., 5., 0.])))
|
| 55 |
+
|
| 56 |
+
# sgd with keeping parameters non-negative
|
| 57 |
+
opt = combine.chain(
|
| 58 |
+
_accumulation.trace(decay=0, nesterov=False),
|
| 59 |
+
transform.scale(-LR),
|
| 60 |
+
_constraining.keep_params_nonnegative())
|
| 61 |
+
opt_state = opt.init(params)
|
| 62 |
+
|
| 63 |
+
updates, _ = opt.update(grads, opt_state, params)
|
| 64 |
+
new_params = update.apply_updates(params, updates)
|
| 65 |
+
|
| 66 |
+
chex.assert_trees_all_close(new_params, (jnp.array([0., 4., 0.]),
|
| 67 |
+
jnp.array([0., 6., 1.]),
|
| 68 |
+
jnp.array([0., 5., 0.])))
|
| 69 |
+
|
| 70 |
+
@chex.all_variants
|
| 71 |
+
def test_zero_nans(self):
|
| 72 |
+
params = (jnp.zeros([3]), jnp.zeros([3]), jnp.zeros([3]))
|
| 73 |
+
|
| 74 |
+
opt = _constraining.zero_nans()
|
| 75 |
+
opt_state = self.variant(opt.init)(params)
|
| 76 |
+
update_fn = self.variant(opt.update)
|
| 77 |
+
|
| 78 |
+
chex.assert_trees_all_close(
|
| 79 |
+
opt_state,
|
| 80 |
+
_constraining.ZeroNansState((jnp.array(False),) * 3))
|
| 81 |
+
|
| 82 |
+
# Check an upate with nans
|
| 83 |
+
grads_with_nans = (jnp.ones([3]),
|
| 84 |
+
jnp.array([1., float('nan'), float('nan')]),
|
| 85 |
+
jnp.array([float('nan'), 1., 1.]))
|
| 86 |
+
updates, opt_state = update_fn(grads_with_nans, opt_state)
|
| 87 |
+
chex.assert_trees_all_close(
|
| 88 |
+
opt_state,
|
| 89 |
+
_constraining.ZeroNansState(
|
| 90 |
+
(jnp.array(False), jnp.array(True), jnp.array(True))))
|
| 91 |
+
chex.assert_trees_all_close(
|
| 92 |
+
updates,
|
| 93 |
+
(jnp.ones([3]), jnp.array([1., 0., 0.]), jnp.array([0., 1., 1.])))
|
| 94 |
+
|
| 95 |
+
# Check an upate with nans and infs
|
| 96 |
+
grads_with_nans_infs = (jnp.ones([3]),
|
| 97 |
+
jnp.array([1., float('nan'),
|
| 98 |
+
float('nan')]),
|
| 99 |
+
jnp.array([float('inf'), 1., 1.]))
|
| 100 |
+
updates, opt_state = update_fn(grads_with_nans_infs, opt_state)
|
| 101 |
+
chex.assert_trees_all_close(
|
| 102 |
+
opt_state,
|
| 103 |
+
_constraining.ZeroNansState(
|
| 104 |
+
(jnp.array(False), jnp.array(True), jnp.array(False))))
|
| 105 |
+
chex.assert_trees_all_close(updates, (jnp.ones([3]), jnp.array(
|
| 106 |
+
[1., 0., 0.]), jnp.array([float('inf'), 1., 1.])))
|
| 107 |
+
|
| 108 |
+
# Check an upate with only good values
|
| 109 |
+
grads = (jnp.ones([3]), jnp.ones([3]), jnp.ones([3]))
|
| 110 |
+
updates, opt_state = update_fn(grads, opt_state)
|
| 111 |
+
chex.assert_trees_all_close(
|
| 112 |
+
opt_state,
|
| 113 |
+
_constraining.ZeroNansState(
|
| 114 |
+
(jnp.array(False), jnp.array(False), jnp.array(False))))
|
| 115 |
+
chex.assert_trees_all_close(updates, grads)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
if __name__ == '__main__':
|
| 119 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/transforms/_layouts.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Wrappers changing the layouts of the tensors that transforms operate on."""
|
| 16 |
+
|
| 17 |
+
from jax import tree_util as jtu
|
| 18 |
+
import jax.numpy as jnp
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from optax._src import base
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def flatten(
|
| 25 |
+
inner: base.GradientTransformation
|
| 26 |
+
) -> base.GradientTransformationExtraArgs:
|
| 27 |
+
"""Flattens parameters and gradients for init and update of inner transform.
|
| 28 |
+
|
| 29 |
+
This can reduce the overhead of performing many calculations on lots of small
|
| 30 |
+
variables, at the cost of slightly increased memory usage.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
inner: Inner transformation to flatten inputs for.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
New ``GradientTransformationExtraArgs``
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
inner = base.with_extra_args_support(inner)
|
| 40 |
+
|
| 41 |
+
def _flatten(params):
|
| 42 |
+
"""Flattens and concatenates all tensors in params to a single vector."""
|
| 43 |
+
params, _ = jtu.tree_flatten(params)
|
| 44 |
+
return jnp.concatenate([jnp.reshape(param, [-1]) for param in params])
|
| 45 |
+
|
| 46 |
+
def _unflatten(updates, flat):
|
| 47 |
+
"""Extracts tensors from flat, using the structure and shapes of params."""
|
| 48 |
+
updates_flat, treedef = jtu.tree_flatten(updates)
|
| 49 |
+
offsets = []
|
| 50 |
+
for update in updates_flat:
|
| 51 |
+
size = np.size(update)
|
| 52 |
+
if offsets:
|
| 53 |
+
offsets.append(size + offsets[-1])
|
| 54 |
+
else:
|
| 55 |
+
offsets.append(size)
|
| 56 |
+
del offsets[-1]
|
| 57 |
+
flat_split = jnp.split(flat, offsets)
|
| 58 |
+
reshaped = [
|
| 59 |
+
jnp.reshape(flat_update, update.shape)
|
| 60 |
+
for flat_update, update in zip(flat_split, updates_flat)
|
| 61 |
+
]
|
| 62 |
+
return jtu.tree_unflatten(treedef, reshaped)
|
| 63 |
+
|
| 64 |
+
def init_fn(params):
|
| 65 |
+
flat = _flatten(params)
|
| 66 |
+
return inner.init(flat)
|
| 67 |
+
|
| 68 |
+
def update_fn(updates, state, params=None, **extra_args):
|
| 69 |
+
if params is not None:
|
| 70 |
+
params = _flatten(params)
|
| 71 |
+
updates_flat, state = inner.update(
|
| 72 |
+
_flatten(updates), state, params, **extra_args
|
| 73 |
+
)
|
| 74 |
+
updates = _unflatten(updates, updates_flat)
|
| 75 |
+
return updates, state
|
| 76 |
+
|
| 77 |
+
return base.GradientTransformationExtraArgs(init_fn, update_fn)
|
testbed/google-deepmind__optax/optax/transforms/_layouts_test.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.transforms._layouts."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
import chex
|
| 19 |
+
import jax.numpy as jnp
|
| 20 |
+
|
| 21 |
+
from optax._src import alias
|
| 22 |
+
from optax._src import update
|
| 23 |
+
from optax.transforms import _layouts
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class LayoutsTest(absltest.TestCase):
|
| 27 |
+
|
| 28 |
+
def test_flatten(self):
|
| 29 |
+
def init_params():
|
| 30 |
+
return (jnp.array(2.), jnp.array([1., 2.]), jnp.array([3., 4.]))
|
| 31 |
+
|
| 32 |
+
per_step_updates = (
|
| 33 |
+
jnp.array(1.0),
|
| 34 |
+
jnp.array([500.0, 5.0]),
|
| 35 |
+
jnp.array([300.0, 3.0]),
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# First calculate new params without flattening
|
| 39 |
+
optax_sgd_params = init_params()
|
| 40 |
+
sgd = alias.sgd(1e-2, 0.0)
|
| 41 |
+
state_sgd = sgd.init(optax_sgd_params)
|
| 42 |
+
updates_sgd, _ = sgd.update(per_step_updates, state_sgd)
|
| 43 |
+
sgd_params_no_flatten = update.apply_updates(optax_sgd_params, updates_sgd)
|
| 44 |
+
|
| 45 |
+
# And now calculate new params with flattening
|
| 46 |
+
optax_sgd_params = init_params()
|
| 47 |
+
sgd = _layouts.flatten(sgd)
|
| 48 |
+
|
| 49 |
+
state_sgd = sgd.init(optax_sgd_params)
|
| 50 |
+
updates_sgd, _ = sgd.update(per_step_updates, state_sgd)
|
| 51 |
+
sgd_params_flatten = update.apply_updates(optax_sgd_params, updates_sgd)
|
| 52 |
+
|
| 53 |
+
# Test that both give the same result
|
| 54 |
+
chex.assert_trees_all_close(
|
| 55 |
+
sgd_params_no_flatten, sgd_params_flatten, atol=1e-7, rtol=1e-7)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
if __name__ == "__main__":
|
| 59 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/transforms/_masking.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Wrappers that mask out part of the parameters when applying a transform."""
|
| 16 |
+
|
| 17 |
+
from typing import Any, Callable, NamedTuple, Union
|
| 18 |
+
|
| 19 |
+
from jax import tree_util as jtu
|
| 20 |
+
|
| 21 |
+
from optax._src import base
|
| 22 |
+
from optax.tree_utils import _state_utils
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class MaskedState(NamedTuple):
|
| 26 |
+
"""Maintains inner transform state for masked transformations."""
|
| 27 |
+
inner_state: Any
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class MaskedNode(NamedTuple):
|
| 31 |
+
"""A node used to mask out unspecified parts of a tree.
|
| 32 |
+
|
| 33 |
+
This node is ignored when mapping functions across the tree e.g. using
|
| 34 |
+
`jtu.tree_map` since it is a container without children. It can
|
| 35 |
+
therefore be used to mask out parts of a tree.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def masked(
|
| 40 |
+
inner: base.GradientTransformation,
|
| 41 |
+
mask: Union[base.PyTree, Callable[[base.Params], base.PyTree]],
|
| 42 |
+
*,
|
| 43 |
+
mask_compatible_extra_args: bool = False,
|
| 44 |
+
) -> base.GradientTransformationExtraArgs:
|
| 45 |
+
"""Mask updates so only some are transformed, the rest are passed through.
|
| 46 |
+
|
| 47 |
+
For example, it is common to skip weight decay for BatchNorm scale and all
|
| 48 |
+
bias parameters. Since in many networks, these are the only 1D parameters,
|
| 49 |
+
you may for instance create a mask function to mask them out as follows::
|
| 50 |
+
|
| 51 |
+
mask_fn = lambda p: jtu.tree_map(lambda x: x.ndim != 1, p)
|
| 52 |
+
weight_decay = optax.masked(optax.add_decayed_weights(0.001), mask_fn)
|
| 53 |
+
|
| 54 |
+
You may alternatively create the mask pytree upfront::
|
| 55 |
+
|
| 56 |
+
mask = jtu.tree_map(lambda x: x.ndim != 1, params)
|
| 57 |
+
weight_decay = optax.masked(optax.add_decayed_weights(0.001), mask)
|
| 58 |
+
|
| 59 |
+
For the ``inner`` transform, state will only be stored for the parameters that
|
| 60 |
+
have a mask value of ``True``.
|
| 61 |
+
|
| 62 |
+
Note that, when using ``tree_map_params``, it may be required to pass the
|
| 63 |
+
argument `is_leaf=lambda v: isinstance(v, optax.MaskedNode)`, if the tree
|
| 64 |
+
map needs to take additional arguments with the same shape as the original
|
| 65 |
+
input tree.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
inner: Inner transformation to mask.
|
| 69 |
+
mask: a PyTree with same structure as (or a prefix of) the params PyTree, or
|
| 70 |
+
a Callable that returns such a pytree given the params/updates. The leaves
|
| 71 |
+
should be booleans, ``True`` for leaves/subtrees you want to apply the
|
| 72 |
+
transformation to, and ``False`` for those you want to skip. The mask must
|
| 73 |
+
be static for the gradient transformation to be jit-compilable.
|
| 74 |
+
mask_compatible_extra_args: whether to also apply the same masking to
|
| 75 |
+
extra_arg fields with the same tree structure as params/updates.
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
New ``GradientTransformationExtraArgs`` wrapping ``inner``.
|
| 79 |
+
"""
|
| 80 |
+
inner = base.with_extra_args_support(inner)
|
| 81 |
+
|
| 82 |
+
def mask_pytree(pytree, mask_tree):
|
| 83 |
+
return jtu.tree_map(
|
| 84 |
+
lambda m, p: p if m else MaskedNode(), mask_tree, pytree
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
# It is possible that `extra_args` of update_fn has pytrees with the same
|
| 88 |
+
# structure as params/updates, e.g. parameter tags. This function applies
|
| 89 |
+
# the mask to those pytrees.
|
| 90 |
+
def maybe_mask_values(pytree_dict, base_pytree, mask_tree):
|
| 91 |
+
base_structure = jtu.tree_structure(base_pytree)
|
| 92 |
+
|
| 93 |
+
def _maybe_mask(pytree):
|
| 94 |
+
if mask_compatible_extra_args and (
|
| 95 |
+
jtu.tree_structure(pytree) == base_structure):
|
| 96 |
+
return mask_pytree(pytree, mask_tree)
|
| 97 |
+
else:
|
| 98 |
+
return pytree
|
| 99 |
+
|
| 100 |
+
return {k: _maybe_mask(v) for k, v in pytree_dict.items()}
|
| 101 |
+
|
| 102 |
+
def init_fn(params):
|
| 103 |
+
# This is a workaround to make tree_map_params work with masking.
|
| 104 |
+
# The API of `masked` takes a mask on construction, instead of at init.
|
| 105 |
+
# This means that this gradient transformation can only work for parameter
|
| 106 |
+
# trees that match the shape of the mask. Technically this breaks the API
|
| 107 |
+
# of optax, and this causes tree_map_params to break. This is because
|
| 108 |
+
# tree_map_params calls init with a placeholder in order to detect copies
|
| 109 |
+
# of the parameter tree. As a (slightly ugly) workaround, we detect when
|
| 110 |
+
# the init is being called by tree_map_params, and pass the placeholder
|
| 111 |
+
# down without masking. This is safe, since tree_map_params does not impose
|
| 112 |
+
# any particular constraints on the shape of the parameter tree, as long
|
| 113 |
+
# as tree_map_params is being called on a tree with the correct structure.
|
| 114 |
+
# See wrappers_test for proof that this works!
|
| 115 |
+
if isinstance(params, _state_utils._ParamsPlaceholder): # pylint:disable=protected-access
|
| 116 |
+
return MaskedState(inner_state=inner.init(params))
|
| 117 |
+
|
| 118 |
+
mask_tree = mask(params) if callable(mask) else mask
|
| 119 |
+
masked_params = mask_pytree(params, mask_tree)
|
| 120 |
+
return MaskedState(inner_state=inner.init(masked_params))
|
| 121 |
+
|
| 122 |
+
def update_fn(updates, state, params=None, **extra_args):
|
| 123 |
+
mask_tree = mask(updates) if callable(mask) else mask
|
| 124 |
+
masked_extra_args = maybe_mask_values(extra_args, updates, mask_tree)
|
| 125 |
+
masked_updates = mask_pytree(updates, mask_tree)
|
| 126 |
+
masked_params = None if params is None else mask_pytree(params, mask_tree)
|
| 127 |
+
|
| 128 |
+
new_masked_updates, new_inner_state = inner.update(
|
| 129 |
+
masked_updates, state.inner_state, masked_params, **masked_extra_args)
|
| 130 |
+
|
| 131 |
+
new_updates = jtu.tree_map(
|
| 132 |
+
lambda m, new_u, old_u: new_u if m else old_u,
|
| 133 |
+
mask_tree, new_masked_updates, updates)
|
| 134 |
+
return new_updates, MaskedState(inner_state=new_inner_state)
|
| 135 |
+
|
| 136 |
+
return base.GradientTransformationExtraArgs(init_fn, update_fn)
|
testbed/google-deepmind__optax/optax/transforms/_masking_test.py
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.transforms._masking."""
|
| 16 |
+
|
| 17 |
+
import copy
|
| 18 |
+
from typing import cast
|
| 19 |
+
|
| 20 |
+
from absl.testing import absltest
|
| 21 |
+
from absl.testing import parameterized
|
| 22 |
+
|
| 23 |
+
import chex
|
| 24 |
+
from jax import tree_util as jtu
|
| 25 |
+
import jax.numpy as jnp
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
from optax._src import alias
|
| 29 |
+
from optax._src import base
|
| 30 |
+
from optax._src import combine
|
| 31 |
+
from optax._src import transform
|
| 32 |
+
from optax._src import update
|
| 33 |
+
from optax.transforms import _masking
|
| 34 |
+
from optax.tree_utils import _state_utils
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _build_sgd():
|
| 38 |
+
return alias.sgd(1.)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _build_stateful_sgd():
|
| 42 |
+
# This SGD behaves like _build_sgd but also tests the optimizer state. The
|
| 43 |
+
# momentum is set to zero rather than None so that the momentum terms are
|
| 44 |
+
# calculated, but do not change the results.
|
| 45 |
+
return alias.sgd(1., momentum=0.)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _build_sgd_extra_args():
|
| 49 |
+
|
| 50 |
+
def init_fn(params):
|
| 51 |
+
del params
|
| 52 |
+
return {'foo': 1}
|
| 53 |
+
|
| 54 |
+
def update_fn(grads, state, params=None, *, foo=None, **extra_args):
|
| 55 |
+
del extra_args, foo, params
|
| 56 |
+
return grads, state
|
| 57 |
+
|
| 58 |
+
t = base.GradientTransformationExtraArgs(init_fn, update_fn)
|
| 59 |
+
return combine.chain(_build_sgd(), t)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class MaskedTest(chex.TestCase):
|
| 63 |
+
"""Tests for the masked wrapper."""
|
| 64 |
+
|
| 65 |
+
def test_tree_map_params(self):
|
| 66 |
+
params = {
|
| 67 |
+
'a': {
|
| 68 |
+
'b': (jnp.zeros((1, 2)), jnp.zeros((2, 2))),
|
| 69 |
+
},
|
| 70 |
+
'c': {
|
| 71 |
+
'd': jnp.zeros((1, 2)),
|
| 72 |
+
'e': (jnp.zeros((1, 2)), jnp.zeros((1, 2))),
|
| 73 |
+
},
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
sharding_axes = {
|
| 77 |
+
'a': {
|
| 78 |
+
'b': (1, 2),
|
| 79 |
+
},
|
| 80 |
+
'c': {
|
| 81 |
+
'd': 1,
|
| 82 |
+
'e': (1, 2),
|
| 83 |
+
},
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
mask = {
|
| 87 |
+
'a': {
|
| 88 |
+
'b': (True, False),
|
| 89 |
+
},
|
| 90 |
+
'c': {
|
| 91 |
+
'd': True,
|
| 92 |
+
'e': (False, True),
|
| 93 |
+
},
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
expected = {
|
| 97 |
+
'a': {
|
| 98 |
+
'b': (jnp.ones((1, 2)), jnp.zeros((2, 2))),
|
| 99 |
+
},
|
| 100 |
+
'c': {
|
| 101 |
+
'd': jnp.ones((1, 2)),
|
| 102 |
+
'e': (jnp.ones((1, 2)), jnp.ones((1, 2))),
|
| 103 |
+
},
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
def init_fn(params):
|
| 107 |
+
return {'count': 1, 'params': params, 'params_copy': params}
|
| 108 |
+
|
| 109 |
+
def update_fn(updates, state, params=None):
|
| 110 |
+
del params
|
| 111 |
+
return updates, state
|
| 112 |
+
|
| 113 |
+
inner = base.GradientTransformation(init_fn, update_fn)
|
| 114 |
+
masked = _masking.masked(inner, mask)
|
| 115 |
+
|
| 116 |
+
def increment_dim_1(v):
|
| 117 |
+
return v + 1 if v.shape[0] == 1 else v
|
| 118 |
+
|
| 119 |
+
# For this optimizer, tree_map_params should have the same effect on a
|
| 120 |
+
# masked optimizer state as it does on an unmasked optimizer state.
|
| 121 |
+
with self.subTest('inner'):
|
| 122 |
+
state = inner.init(params)
|
| 123 |
+
result = _state_utils.tree_map_params(inner, increment_dim_1, state)
|
| 124 |
+
chex.assert_trees_all_equal(result, inner.init(expected))
|
| 125 |
+
|
| 126 |
+
with self.subTest('masked'):
|
| 127 |
+
state = masked.init(params)
|
| 128 |
+
result = _state_utils.tree_map_params(masked, increment_dim_1, state)
|
| 129 |
+
chex.assert_trees_all_equal(result, masked.init(expected))
|
| 130 |
+
|
| 131 |
+
with self.subTest('masked_with_extra_args'):
|
| 132 |
+
# Users wishing to pass additional arguments with the same tree structure
|
| 133 |
+
# as the original params pytree will need to add the additional `is_leaf`
|
| 134 |
+
# callable. This makes it possible to ignore the masked parts of the
|
| 135 |
+
# pytree.
|
| 136 |
+
|
| 137 |
+
# Replace all non-masked parameters in the opt-state tree with the
|
| 138 |
+
# sharding axis values given in the tree above. Everything else is set to
|
| 139 |
+
# None.
|
| 140 |
+
new_state = _state_utils.tree_map_params(
|
| 141 |
+
masked,
|
| 142 |
+
lambda p, axis: None if isinstance(p, _masking.MaskedNode) else axis,
|
| 143 |
+
state,
|
| 144 |
+
sharding_axes,
|
| 145 |
+
is_leaf=lambda v: isinstance(v, _masking.MaskedNode),
|
| 146 |
+
transform_non_params=lambda v: None,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
sharded_params = {
|
| 150 |
+
'a': {
|
| 151 |
+
'b': (1, None),
|
| 152 |
+
},
|
| 153 |
+
'c': {
|
| 154 |
+
'd': 1,
|
| 155 |
+
'e': (None, 2),
|
| 156 |
+
},
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
# Required to make pytype happy
|
| 160 |
+
new_state = cast(_masking.MaskedState, new_state)
|
| 161 |
+
|
| 162 |
+
chex.assert_equal(None, new_state.inner_state['count'])
|
| 163 |
+
chex.assert_equal(sharded_params, new_state.inner_state['params'])
|
| 164 |
+
chex.assert_equal(sharded_params, new_state.inner_state['params_copy'])
|
| 165 |
+
|
| 166 |
+
@chex.all_variants
|
| 167 |
+
@parameterized.named_parameters(
|
| 168 |
+
('sgd', _build_sgd, False),
|
| 169 |
+
('stateful_sgd', _build_stateful_sgd, False),
|
| 170 |
+
('sgd_w_mask_fn', _build_sgd, True),
|
| 171 |
+
('stateful_sgd_w_mask_fn', _build_stateful_sgd, True),
|
| 172 |
+
)
|
| 173 |
+
def test_masked(self, opt_builder, use_fn):
|
| 174 |
+
mask = {'a': True,
|
| 175 |
+
'b': [False, True],
|
| 176 |
+
'c': {'d': True, 'e': (False, True)}}
|
| 177 |
+
mask_arg = lambda _: mask if use_fn else mask
|
| 178 |
+
params = {'a': 1., 'b': [2., 3.], 'c': {'d': 4., 'e': (5., 6.)}}
|
| 179 |
+
params = jtu.tree_map(jnp.asarray, params)
|
| 180 |
+
input_updates = jtu.tree_map(lambda x: x/10., params)
|
| 181 |
+
|
| 182 |
+
# Negate the updates wherever the mask is True
|
| 183 |
+
def masked_negate(updates):
|
| 184 |
+
return jtu.tree_map(
|
| 185 |
+
lambda upd, m: -upd if m else upd, updates, mask)
|
| 186 |
+
correct_updates = masked_negate(input_updates)
|
| 187 |
+
|
| 188 |
+
init_fn, update_fn = _masking.masked(opt_builder(), mask_arg)
|
| 189 |
+
update_fn = self.variant(update_fn)
|
| 190 |
+
state = self.variant(init_fn)(params)
|
| 191 |
+
|
| 192 |
+
with self.subTest('tree_map_params'):
|
| 193 |
+
result = _state_utils.tree_map_params(init_fn, lambda v: v, state)
|
| 194 |
+
chex.assert_trees_all_equal_structs(result, state)
|
| 195 |
+
|
| 196 |
+
updates, state = update_fn(input_updates, state, params)
|
| 197 |
+
chex.assert_trees_all_close(updates, correct_updates)
|
| 198 |
+
|
| 199 |
+
# Check repeated application, this time with no params.
|
| 200 |
+
correct_updates = masked_negate(correct_updates)
|
| 201 |
+
updates, _ = update_fn(updates, state)
|
| 202 |
+
chex.assert_trees_all_close(updates, correct_updates)
|
| 203 |
+
|
| 204 |
+
@chex.all_variants
|
| 205 |
+
@parameterized.named_parameters(
|
| 206 |
+
('sgd', _build_sgd),
|
| 207 |
+
('stateful_sgd', _build_stateful_sgd),
|
| 208 |
+
)
|
| 209 |
+
def test_prefix_mask(self, opt_builder):
|
| 210 |
+
"""Test when the mask is a prefix of the updates PyTree."""
|
| 211 |
+
mask = {'a': True, 'b': False, 'c': {'d': False, 'e': True}}
|
| 212 |
+
params = {'a': 1., 'b': {'f': 2.}, 'c': {'d': 3., 'e': ([4., 5.], 6.)}}
|
| 213 |
+
params = jtu.tree_map(jnp.asarray, params)
|
| 214 |
+
input_updates = jtu.tree_map(lambda x: x/10., params)
|
| 215 |
+
|
| 216 |
+
# Negate the updates wherever the mask (or mask parent) is True
|
| 217 |
+
def _masked_sgd_on_updates(m, upd):
|
| 218 |
+
return jtu.tree_map(lambda x: -x, upd) if m else upd
|
| 219 |
+
correct_updates = jtu.tree_map(
|
| 220 |
+
_masked_sgd_on_updates, mask, input_updates)
|
| 221 |
+
|
| 222 |
+
init_fn, update_fn = _masking.masked(opt_builder(), mask)
|
| 223 |
+
update_fn = self.variant(update_fn)
|
| 224 |
+
state = self.variant(init_fn)(params)
|
| 225 |
+
updates, state = update_fn(input_updates, state, params)
|
| 226 |
+
chex.assert_trees_all_close(updates, correct_updates)
|
| 227 |
+
|
| 228 |
+
# Check repeated application, this time with no params.
|
| 229 |
+
correct_updates = jtu.tree_map(
|
| 230 |
+
_masked_sgd_on_updates, mask, correct_updates)
|
| 231 |
+
updates, _ = update_fn(updates, state)
|
| 232 |
+
chex.assert_trees_all_close(updates, correct_updates)
|
| 233 |
+
|
| 234 |
+
@chex.all_variants
|
| 235 |
+
def test_update_requires_params(self):
|
| 236 |
+
weight_decay = 0.1
|
| 237 |
+
mask = {'a': True,
|
| 238 |
+
'b': [False, True],
|
| 239 |
+
'c': {'d': True, 'e': (False, True)}}
|
| 240 |
+
params = {'a': 1., 'b': [2., 3.], 'c': {'d': 4., 'e': (5., 6.)}}
|
| 241 |
+
params = jtu.tree_map(jnp.asarray, params)
|
| 242 |
+
input_updates = jtu.tree_map(lambda x: x/10., params)
|
| 243 |
+
|
| 244 |
+
correct_updates = jtu.tree_map(
|
| 245 |
+
lambda m, u, p: u + weight_decay * p if m else u,
|
| 246 |
+
mask, input_updates, params)
|
| 247 |
+
|
| 248 |
+
init_fn, update_fn = _masking.masked(
|
| 249 |
+
transform.add_decayed_weights(weight_decay), mask)
|
| 250 |
+
update_fn = self.variant(update_fn)
|
| 251 |
+
|
| 252 |
+
state = self.variant(init_fn)(params)
|
| 253 |
+
updates, state = update_fn(input_updates, state, params)
|
| 254 |
+
chex.assert_trees_all_close(updates, correct_updates)
|
| 255 |
+
|
| 256 |
+
params = update.apply_updates(params, updates)
|
| 257 |
+
|
| 258 |
+
# Test repeated application
|
| 259 |
+
new_correct_updates = jtu.tree_map(
|
| 260 |
+
lambda m, u, p: u + weight_decay * p if m else u,
|
| 261 |
+
mask, correct_updates, params)
|
| 262 |
+
updates, _ = update_fn(correct_updates, state, params)
|
| 263 |
+
chex.assert_trees_all_close(updates, new_correct_updates)
|
| 264 |
+
|
| 265 |
+
@parameterized.parameters(list, tuple, dict)
|
| 266 |
+
def test_empty(self, container):
|
| 267 |
+
init_fn, update_fn = _masking.masked(_build_sgd(), container())
|
| 268 |
+
update_fn(container(), init_fn(container()))
|
| 269 |
+
|
| 270 |
+
@parameterized.parameters(
|
| 271 |
+
(False, False), (False, True), (True, False), (True, True))
|
| 272 |
+
def test_tree_mismatch_fails(self, extra_key_in_mask, use_fn):
|
| 273 |
+
mask = {'a': True,
|
| 274 |
+
'b': [False, True],
|
| 275 |
+
'c': {'d': True, 'e': (False, True)}}
|
| 276 |
+
mask_arg = lambda _: mask if use_fn else mask
|
| 277 |
+
params = {'a': 1., 'b': [2., 3.], 'c': {'d': 4., 'e': (5., 6.)}}
|
| 278 |
+
params = jtu.tree_map(jnp.asarray, params)
|
| 279 |
+
|
| 280 |
+
if extra_key_in_mask:
|
| 281 |
+
mask['c']['extra'] = True
|
| 282 |
+
else:
|
| 283 |
+
params['c']['extra'] = 7
|
| 284 |
+
|
| 285 |
+
init_fn = _masking.masked(_build_sgd(), mask_arg)[0]
|
| 286 |
+
with self.assertRaises(ValueError):
|
| 287 |
+
init_fn(params)
|
| 288 |
+
|
| 289 |
+
@chex.all_variants
|
| 290 |
+
def test_mask_fn(self):
|
| 291 |
+
params = {'a': jnp.ones((1, 2)), 'b': (jnp.ones((1,)), np.ones((1, 2, 3)))}
|
| 292 |
+
mask_fn = lambda p: jtu.tree_map(lambda x: x.ndim > 1, p)
|
| 293 |
+
init_fn, update_fn = _masking.masked(
|
| 294 |
+
transform.add_decayed_weights(0.1), mask_fn)
|
| 295 |
+
update_fn = self.variant(update_fn)
|
| 296 |
+
|
| 297 |
+
state = self.variant(init_fn)(params)
|
| 298 |
+
grads = jtu.tree_map(lambda x: x*2, params)
|
| 299 |
+
updates, _ = update_fn(grads, state, params)
|
| 300 |
+
np.testing.assert_allclose(updates['a'], grads['a'] + 0.1*params['a'])
|
| 301 |
+
np.testing.assert_allclose(updates['b'][0], grads['b'][0])
|
| 302 |
+
np.testing.assert_allclose(updates['b'][1],
|
| 303 |
+
grads['b'][1] + 0.1*params['b'][1])
|
| 304 |
+
|
| 305 |
+
@chex.all_variants
|
| 306 |
+
@parameterized.named_parameters(
|
| 307 |
+
('sgd', _build_sgd),
|
| 308 |
+
('stateful_sgd', _build_stateful_sgd),
|
| 309 |
+
)
|
| 310 |
+
def test_nested_mask(self, opt_builder):
|
| 311 |
+
# https://github.com/deepmind/optax/issues/271
|
| 312 |
+
params = {'linear_1': {'w': jnp.zeros((1, 1)), 'b': jnp.zeros(1)},
|
| 313 |
+
'linear_2': {'w': jnp.zeros((1, 2)), 'b': jnp.zeros(2)},
|
| 314 |
+
'linear_3': {'w': jnp.zeros((2, 3)), 'b': jnp.zeros(3)}}
|
| 315 |
+
|
| 316 |
+
outer_mask = lambda p: jtu.tree_map(lambda x: x.ndim > 1, p)
|
| 317 |
+
inner_mask = jtu.tree_map(lambda _: True, params)
|
| 318 |
+
inner_mask['linear_2'] = False
|
| 319 |
+
|
| 320 |
+
inner = _masking.masked(opt_builder(), inner_mask)
|
| 321 |
+
init_fn, update_fn = _masking.masked(inner, outer_mask)
|
| 322 |
+
|
| 323 |
+
input_updates = jtu.tree_map(jnp.ones_like, params)
|
| 324 |
+
correct_updates = copy.deepcopy(input_updates)
|
| 325 |
+
correct_updates['linear_1']['w'] *= -1.0
|
| 326 |
+
correct_updates['linear_3']['w'] *= -1.0
|
| 327 |
+
|
| 328 |
+
state = self.variant(init_fn)(params)
|
| 329 |
+
updates, _ = self.variant(update_fn)(input_updates, state, params)
|
| 330 |
+
chex.assert_trees_all_close(updates, correct_updates)
|
| 331 |
+
|
| 332 |
+
@chex.all_variants
|
| 333 |
+
def test_masked_state_structure(self):
|
| 334 |
+
# https://github.com/deepmind/optax/issues/271
|
| 335 |
+
params = {'a': [jnp.ones(1), (jnp.ones(2), jnp.ones(3))],
|
| 336 |
+
'b': {'c': jnp.ones(4), 'd': jnp.ones(5)}}
|
| 337 |
+
mask = {'a': [True, (True, False)], 'b': False}
|
| 338 |
+
tx = _masking.masked(_build_stateful_sgd(), mask)
|
| 339 |
+
trace = self.variant(tx.init)(params).inner_state[0].trace
|
| 340 |
+
expected_trace = {
|
| 341 |
+
'a': [jnp.zeros(1), (jnp.zeros(2), _masking.MaskedNode())],
|
| 342 |
+
'b': _masking.MaskedNode()
|
| 343 |
+
}
|
| 344 |
+
chex.assert_trees_all_equal_structs(trace, expected_trace)
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
if __name__ == '__main__':
|
| 348 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/tree_utils/__init__.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""The tree_utils sub-package."""
|
| 16 |
+
|
| 17 |
+
from optax.tree_utils._casting import tree_cast
|
| 18 |
+
from optax.tree_utils._random import tree_random_like
|
| 19 |
+
from optax.tree_utils._state_utils import NamedTupleKey
|
| 20 |
+
from optax.tree_utils._state_utils import tree_get
|
| 21 |
+
from optax.tree_utils._state_utils import tree_get_all_with_path
|
| 22 |
+
from optax.tree_utils._state_utils import tree_map_params
|
| 23 |
+
from optax.tree_utils._state_utils import tree_set
|
| 24 |
+
from optax.tree_utils._tree_math import tree_add
|
| 25 |
+
from optax.tree_utils._tree_math import tree_add_scalar_mul
|
| 26 |
+
from optax.tree_utils._tree_math import tree_bias_correction
|
| 27 |
+
from optax.tree_utils._tree_math import tree_clip
|
| 28 |
+
from optax.tree_utils._tree_math import tree_div
|
| 29 |
+
from optax.tree_utils._tree_math import tree_full_like
|
| 30 |
+
from optax.tree_utils._tree_math import tree_l1_norm
|
| 31 |
+
from optax.tree_utils._tree_math import tree_l2_norm
|
| 32 |
+
from optax.tree_utils._tree_math import tree_mul
|
| 33 |
+
from optax.tree_utils._tree_math import tree_ones_like
|
| 34 |
+
from optax.tree_utils._tree_math import tree_scalar_mul
|
| 35 |
+
from optax.tree_utils._tree_math import tree_sub
|
| 36 |
+
from optax.tree_utils._tree_math import tree_sum
|
| 37 |
+
from optax.tree_utils._tree_math import tree_update_infinity_moment
|
| 38 |
+
from optax.tree_utils._tree_math import tree_update_moment
|
| 39 |
+
from optax.tree_utils._tree_math import tree_update_moment_per_elem_norm
|
| 40 |
+
from optax.tree_utils._tree_math import tree_vdot
|
| 41 |
+
from optax.tree_utils._tree_math import tree_zeros_like
|
testbed/google-deepmind__optax/optax/tree_utils/_casting.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utilities to cast pytrees to specific dtypes."""
|
| 16 |
+
|
| 17 |
+
from typing import Optional
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
from jax import tree_util as jtu
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def tree_cast(
|
| 24 |
+
tree: chex.ArrayTree,
|
| 25 |
+
dtype: Optional[chex.ArrayDType]
|
| 26 |
+
) -> chex.ArrayTree:
|
| 27 |
+
"""Cast tree to given dtype, skip if None."""
|
| 28 |
+
if dtype is not None:
|
| 29 |
+
return jtu.tree_map(lambda t: t.astype(dtype), tree)
|
| 30 |
+
else:
|
| 31 |
+
return tree
|
testbed/google-deepmind__optax/optax/tree_utils/_casting_test.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.tree_utils._casting."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
from absl.testing import parameterized
|
| 19 |
+
|
| 20 |
+
from jax import tree_util as jtu
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
import numpy as np
|
| 23 |
+
|
| 24 |
+
from optax import tree_utils as otu
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class CastingTest(parameterized.TestCase):
|
| 28 |
+
|
| 29 |
+
@parameterized.parameters([
|
| 30 |
+
(jnp.float32, [1.3, 2.001, 3.6], [-3.3], [1.3, 2.001, 3.6], [-3.3]),
|
| 31 |
+
(jnp.float32, [1.3, 2.001, 3.6], [-3], [1.3, 2.001, 3.6], [-3.0]),
|
| 32 |
+
(jnp.int32, [1.3, 2.001, 3.6], [-3.3], [1, 2, 3], [-3]),
|
| 33 |
+
(jnp.int32, [1.3, 2.001, 3.6], [-3], [1, 2, 3], [-3]),
|
| 34 |
+
(None, [1.123, 2.33], [0.0], [1.123, 2.33], [0.0]),
|
| 35 |
+
(None, [1, 2, 3], [0.0], [1, 2, 3], [0.0]),
|
| 36 |
+
])
|
| 37 |
+
def test_tree_cast(self, dtype, b, c, new_b, new_c):
|
| 38 |
+
def _build_tree(val1, val2):
|
| 39 |
+
dict_tree = {'a': {'b': jnp.array(val1)}, 'c': jnp.array(val2)}
|
| 40 |
+
return jtu.tree_map(lambda x: x, dict_tree)
|
| 41 |
+
|
| 42 |
+
tree = _build_tree(b, c)
|
| 43 |
+
tree = otu.tree_cast(tree, dtype=dtype)
|
| 44 |
+
jtu.tree_map(
|
| 45 |
+
np.testing.assert_array_equal, tree, _build_tree(new_b, new_c)
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
if __name__ == '__main__':
|
| 50 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/tree_utils/_random.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utilities to generate random pytrees."""
|
| 16 |
+
|
| 17 |
+
from typing import Callable
|
| 18 |
+
|
| 19 |
+
import chex
|
| 20 |
+
import jax
|
| 21 |
+
from jax import tree_util as jtu
|
| 22 |
+
|
| 23 |
+
from optax._src import base
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _tree_rng_keys_split(
|
| 27 |
+
rng_key: chex.PRNGKey, target_tree: chex.ArrayTree
|
| 28 |
+
) -> chex.ArrayTree:
|
| 29 |
+
"""Split keys to match structure of target tree.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
rng_key: the key to split.
|
| 33 |
+
target_tree: the tree whose structure to match.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
a tree of rng keys.
|
| 37 |
+
"""
|
| 38 |
+
tree_def = jtu.tree_structure(target_tree)
|
| 39 |
+
keys = jax.random.split(rng_key, tree_def.num_leaves)
|
| 40 |
+
return jtu.tree_unflatten(tree_def, keys)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def tree_random_like(
|
| 44 |
+
rng_key: chex.PRNGKey,
|
| 45 |
+
target_tree: chex.ArrayTree,
|
| 46 |
+
sampler: Callable[
|
| 47 |
+
[chex.PRNGKey, base.Shape], chex.Array
|
| 48 |
+
] = jax.random.normal,
|
| 49 |
+
) -> chex.ArrayTree:
|
| 50 |
+
"""Create tree with random entries of the same shape as target tree.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
rng_key: the key for the random number generator.
|
| 54 |
+
target_tree: the tree whose structure to match. Leaves must be arrays.
|
| 55 |
+
sampler: the noise sampling function, by default ``jax.random.normal``.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
a random tree with the same structure as ``target_tree``, whose leaves have
|
| 59 |
+
distribution ``sampler``.
|
| 60 |
+
|
| 61 |
+
.. versionadded:: 0.2.1
|
| 62 |
+
"""
|
| 63 |
+
keys_tree = _tree_rng_keys_split(rng_key, target_tree)
|
| 64 |
+
return jtu.tree_map(
|
| 65 |
+
lambda l, k: sampler(k, l.shape),
|
| 66 |
+
target_tree,
|
| 67 |
+
keys_tree,
|
| 68 |
+
)
|
testbed/google-deepmind__optax/optax/tree_utils/_random_test.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for optax.tree_utils._random."""
|
| 16 |
+
|
| 17 |
+
from absl.testing import absltest
|
| 18 |
+
import chex
|
| 19 |
+
import jax
|
| 20 |
+
from jax import tree_util as jtu
|
| 21 |
+
import jax.numpy as jnp
|
| 22 |
+
import numpy as np
|
| 23 |
+
|
| 24 |
+
from optax import tree_utils as otu
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class RandomTest(absltest.TestCase):
|
| 28 |
+
|
| 29 |
+
def setUp(self):
|
| 30 |
+
super().setUp()
|
| 31 |
+
rng = np.random.RandomState(0)
|
| 32 |
+
|
| 33 |
+
self.rng_jax = jax.random.PRNGKey(0)
|
| 34 |
+
|
| 35 |
+
self.tree_a = (rng.randn(20, 10) + 1j * rng.randn(20, 10), rng.randn(20))
|
| 36 |
+
self.tree_b = (rng.randn(20, 10), rng.randn(20))
|
| 37 |
+
|
| 38 |
+
self.tree_a_dict = jtu.tree_map(
|
| 39 |
+
jnp.asarray,
|
| 40 |
+
(
|
| 41 |
+
1.0,
|
| 42 |
+
{'k1': 1.0, 'k2': (1.0, 1.0)},
|
| 43 |
+
1.0
|
| 44 |
+
)
|
| 45 |
+
)
|
| 46 |
+
self.tree_b_dict = jtu.tree_map(
|
| 47 |
+
jnp.asarray,
|
| 48 |
+
(
|
| 49 |
+
1.0,
|
| 50 |
+
{'k1': 2.0, 'k2': (3.0, 4.0)},
|
| 51 |
+
5.0
|
| 52 |
+
)
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
self.array_a = rng.randn(20) + 1j * rng.randn(20)
|
| 56 |
+
self.array_b = rng.randn(20)
|
| 57 |
+
|
| 58 |
+
self.tree_a_dict_jax = jtu.tree_map(jnp.array, self.tree_a_dict)
|
| 59 |
+
self.tree_b_dict_jax = jtu.tree_map(jnp.array, self.tree_b_dict)
|
| 60 |
+
|
| 61 |
+
def test_tree_random_like(self, eps=1e-6):
|
| 62 |
+
"""Test for `tree_random_like`.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
eps: amount of noise.
|
| 66 |
+
|
| 67 |
+
Tests that `tree_random_like` generates a tree of the proper structure,
|
| 68 |
+
that it can be added to a target tree with a small multiplicative factor
|
| 69 |
+
without errors, and that the resulting addition is close to the original.
|
| 70 |
+
"""
|
| 71 |
+
rand_tree_a = otu.tree_random_like(self.rng_jax, self.tree_a)
|
| 72 |
+
rand_tree_b = otu.tree_random_like(self.rng_jax, self.tree_b)
|
| 73 |
+
rand_tree_a_dict = otu.tree_random_like(self.rng_jax, self.tree_a_dict_jax)
|
| 74 |
+
rand_tree_b_dict = otu.tree_random_like(self.rng_jax, self.tree_b_dict_jax)
|
| 75 |
+
rand_array_a = otu.tree_random_like(self.rng_jax, self.array_a)
|
| 76 |
+
rand_array_b = otu.tree_random_like(self.rng_jax, self.array_b)
|
| 77 |
+
sum_tree_a = otu.tree_add_scalar_mul(self.tree_a, eps, rand_tree_a)
|
| 78 |
+
sum_tree_b = otu.tree_add_scalar_mul(self.tree_b, eps, rand_tree_b)
|
| 79 |
+
sum_tree_a_dict = otu.tree_add_scalar_mul(self.tree_a_dict,
|
| 80 |
+
eps,
|
| 81 |
+
rand_tree_a_dict)
|
| 82 |
+
sum_tree_b_dict = otu.tree_add_scalar_mul(self.tree_b_dict,
|
| 83 |
+
eps,
|
| 84 |
+
rand_tree_b_dict)
|
| 85 |
+
sum_array_a = otu.tree_add_scalar_mul(self.array_a, eps, rand_array_a)
|
| 86 |
+
sum_array_b = otu.tree_add_scalar_mul(self.array_b, eps, rand_array_b)
|
| 87 |
+
tree_sums = [sum_tree_a,
|
| 88 |
+
sum_tree_b,
|
| 89 |
+
sum_tree_a_dict,
|
| 90 |
+
sum_tree_b_dict,
|
| 91 |
+
sum_array_a,
|
| 92 |
+
sum_array_b]
|
| 93 |
+
trees = [self.tree_a,
|
| 94 |
+
self.tree_b,
|
| 95 |
+
self.tree_a_dict,
|
| 96 |
+
self.tree_b_dict,
|
| 97 |
+
self.array_a,
|
| 98 |
+
self.array_b]
|
| 99 |
+
chex.assert_trees_all_close(trees, tree_sums, atol=1e-5)
|
| 100 |
+
|
| 101 |
+
if __name__ == '__main__':
|
| 102 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/tree_utils/_state_utils.py
ADDED
|
@@ -0,0 +1,786 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tools for mapping over optimizer states."""
|
| 16 |
+
|
| 17 |
+
import dataclasses
|
| 18 |
+
import functools
|
| 19 |
+
import typing
|
| 20 |
+
from typing import Any, Callable, Optional, Protocol, Tuple, Union, cast
|
| 21 |
+
|
| 22 |
+
import jax
|
| 23 |
+
from optax._src import base
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@jax.tree_util.register_pytree_node_class
|
| 27 |
+
class _ParamsPlaceholder:
|
| 28 |
+
|
| 29 |
+
def tree_flatten(self):
|
| 30 |
+
return ((), None)
|
| 31 |
+
|
| 32 |
+
@classmethod
|
| 33 |
+
def tree_unflatten(cls, aux, children):
|
| 34 |
+
del aux, children
|
| 35 |
+
return cls()
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@dataclasses.dataclass(frozen=True)
|
| 39 |
+
class NamedTupleKey:
|
| 40 |
+
"""KeyType for a NamedTuple in a tree.
|
| 41 |
+
|
| 42 |
+
When using a function ``filtering(path: KeyPath, value: Any) -> bool: ...``
|
| 43 |
+
in a tree in :func:`optax.tree_utils.tree_get_all_with_path`,
|
| 44 |
+
:func:`optax.tree_utils.tree_get`, or :func:`optax.tree_utils.tree_set`, can
|
| 45 |
+
filter the path to check if of the KeyEntry is a NamedTupleKey and then check
|
| 46 |
+
if the name of named tuple is the one intended to be searched.
|
| 47 |
+
|
| 48 |
+
.. seealso:: :class:`jax.tree_util.DictKey`,
|
| 49 |
+
:class:`jax.tree_util.FlattenedIndexKey`,
|
| 50 |
+
:class:`jax.tree_util.GetAttrKey`,
|
| 51 |
+
:class:`jax.tree_util.SequenceKey`,
|
| 52 |
+
:func:`optax.tree_utils.tree_get_all_with_path`,
|
| 53 |
+
:func:`optax.tree_utils.tree_get`,
|
| 54 |
+
:func:`optax.tree_utils.tree_set`,
|
| 55 |
+
|
| 56 |
+
Attributes:
|
| 57 |
+
tuple_name (str): name of the tuple containing the key.
|
| 58 |
+
name (str): name of the key.
|
| 59 |
+
|
| 60 |
+
.. versionadded:: 0.2.2
|
| 61 |
+
"""
|
| 62 |
+
tuple_name: str
|
| 63 |
+
name: str
|
| 64 |
+
|
| 65 |
+
def __str__(self):
|
| 66 |
+
return f"{self.tuple_name}.{self.name}"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
_KeyEntry = Union[
|
| 70 |
+
jax.tree_util.DictKey,
|
| 71 |
+
jax.tree_util.FlattenedIndexKey,
|
| 72 |
+
jax.tree_util.GetAttrKey,
|
| 73 |
+
jax.tree_util.SequenceKey,
|
| 74 |
+
NamedTupleKey,
|
| 75 |
+
]
|
| 76 |
+
|
| 77 |
+
_KeyPath = Tuple[_KeyEntry, ...]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@typing.runtime_checkable
|
| 81 |
+
class Initable(Protocol):
|
| 82 |
+
"""An object with an init function."""
|
| 83 |
+
|
| 84 |
+
def init(self, params: base.Params) -> base.OptState:
|
| 85 |
+
"""Calling the init for given parameters returns a fresh opt state."""
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def tree_map_params(
|
| 89 |
+
initable: Union[
|
| 90 |
+
Callable[[base.Params], base.OptState],
|
| 91 |
+
Initable,
|
| 92 |
+
],
|
| 93 |
+
f: Callable[..., Any],
|
| 94 |
+
state: base.OptState,
|
| 95 |
+
/,
|
| 96 |
+
*rest: Any,
|
| 97 |
+
transform_non_params: Optional[Callable[..., Any]] = None,
|
| 98 |
+
is_leaf: Optional[Callable[[base.Params], bool]] = None,
|
| 99 |
+
) -> base.OptState:
|
| 100 |
+
"""Apply a callable over all params in the given optimizer state.
|
| 101 |
+
|
| 102 |
+
This function exists to help construct partition specs over optimizer
|
| 103 |
+
states, in the case that a partition spec is already known for the parameters.
|
| 104 |
+
|
| 105 |
+
For example, the following will replace all optimizer state parameter trees
|
| 106 |
+
with copies of the given partition spec instead. The argument
|
| 107 |
+
`transform_non_params` can be used to replace any remaining fields as
|
| 108 |
+
required, in this case, we replace those fields by None.
|
| 109 |
+
|
| 110 |
+
>>> params, specs = jnp.array(0.), jnp.array(0.) # Trees with the same shape
|
| 111 |
+
>>> opt = optax.sgd(1e-3)
|
| 112 |
+
>>> state = opt.init(params)
|
| 113 |
+
>>> opt_specs = optax.tree_map_params(
|
| 114 |
+
... opt,
|
| 115 |
+
... lambda _, spec: spec,
|
| 116 |
+
... state,
|
| 117 |
+
... specs,
|
| 118 |
+
... transform_non_params=lambda _: None,
|
| 119 |
+
... )
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
initable: A callable taking parameters and returning an optimizer state, or
|
| 123 |
+
an object with an `init` attribute having the same function.
|
| 124 |
+
f: A callable that will be applied for all copies of the parameter tree
|
| 125 |
+
within this optimizer state.
|
| 126 |
+
state: The optimizer state to map over.
|
| 127 |
+
*rest: Additional arguments, having the same shape as the parameter tree,
|
| 128 |
+
that will be passed to f.
|
| 129 |
+
transform_non_params: An optional function that will be called on all
|
| 130 |
+
non-parameter fields within the optimizer state.
|
| 131 |
+
is_leaf: Passed through to `jax.tree.map`. This makes it possible to ignore
|
| 132 |
+
parts of the parameter tree e.g. when the gradient transformations modify
|
| 133 |
+
the shape of the original pytree, such as for ``optax.masked``.
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
The result of applying the function f on all trees in the optimizer's state
|
| 137 |
+
that have the same shape as the parameter tree, along with the given
|
| 138 |
+
optional extra arguments.
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
# Cast for pytype checks (no-op for other usages).
|
| 142 |
+
placeholder = cast(base.chex.ArrayTree, _ParamsPlaceholder())
|
| 143 |
+
|
| 144 |
+
if isinstance(initable, Initable):
|
| 145 |
+
initable = cast(Initable, initable) # for pytype checks
|
| 146 |
+
state_with_placeholders = initable.init(placeholder)
|
| 147 |
+
else:
|
| 148 |
+
state_with_placeholders = initable(placeholder)
|
| 149 |
+
|
| 150 |
+
def map_params(maybe_placeholder_value, value):
|
| 151 |
+
if isinstance(maybe_placeholder_value, _ParamsPlaceholder):
|
| 152 |
+
return jax.tree_util.tree_map(f, value, *rest, is_leaf=is_leaf)
|
| 153 |
+
elif transform_non_params is not None:
|
| 154 |
+
return transform_non_params(value)
|
| 155 |
+
else:
|
| 156 |
+
return value
|
| 157 |
+
|
| 158 |
+
return jax.tree_util.tree_map(
|
| 159 |
+
map_params,
|
| 160 |
+
state_with_placeholders,
|
| 161 |
+
state,
|
| 162 |
+
is_leaf=lambda v: isinstance(v, _ParamsPlaceholder),
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def tree_get_all_with_path(
|
| 167 |
+
tree: base.PyTree,
|
| 168 |
+
key: Any,
|
| 169 |
+
filtering: Optional[Callable[[_KeyPath, Any], bool]] = None,
|
| 170 |
+
) -> list[tuple[_KeyPath, Any]]:
|
| 171 |
+
# pylint: disable=line-too-long
|
| 172 |
+
r"""Extract values of a pytree matching a given key.
|
| 173 |
+
|
| 174 |
+
Search in a pytree ``tree`` for a specific ``key`` (which can be a key
|
| 175 |
+
from a dictionary, a field from a NamedTuple or the name of a NamedTuple).
|
| 176 |
+
|
| 177 |
+
That key/field ``key`` may appear more than once in ``tree``. So this function
|
| 178 |
+
returns a list of all values corresponding to ``key`` with the path to
|
| 179 |
+
that value. The path is a sequence of ``KeyEntry`` that can be transformed in
|
| 180 |
+
readable format using :func:`jax.tree_util.keystr`, see the example below.
|
| 181 |
+
|
| 182 |
+
Examples:
|
| 183 |
+
|
| 184 |
+
Basic usage
|
| 185 |
+
|
| 186 |
+
>>> import jax.numpy as jnp
|
| 187 |
+
>>> import optax
|
| 188 |
+
>>> params = jnp.array([1., 2., 3.])
|
| 189 |
+
>>> solver = optax.inject_hyperparams(optax.sgd)(
|
| 190 |
+
... learning_rate=lambda count: 1/(count+1)
|
| 191 |
+
... )
|
| 192 |
+
>>> state = solver.init(params)
|
| 193 |
+
>>> found_values_with_path = optax.tree_utils.tree_get_all_with_path(
|
| 194 |
+
... state, 'learning_rate'
|
| 195 |
+
... )
|
| 196 |
+
>>> print(
|
| 197 |
+
... *[(jax.tree_util.keystr(p), v) for p, v in found_values_with_path],
|
| 198 |
+
... sep="\n",
|
| 199 |
+
... )
|
| 200 |
+
("InjectStatefulHyperparamsState.hyperparams['learning_rate']", Array(1., dtype=float32))
|
| 201 |
+
("InjectStatefulHyperparamsState.hyperparams_states['learning_rate']", WrappedScheduleState(count=Array(0, dtype=int32)))
|
| 202 |
+
|
| 203 |
+
Usage with a filtering operation
|
| 204 |
+
|
| 205 |
+
>>> import jax.numpy as jnp
|
| 206 |
+
>>> import optax
|
| 207 |
+
>>> params = jnp.array([1., 2., 3.])
|
| 208 |
+
>>> solver = optax.inject_hyperparams(optax.sgd)(
|
| 209 |
+
... learning_rate=lambda count: 1/(count+1)
|
| 210 |
+
... )
|
| 211 |
+
>>> state = solver.init(params)
|
| 212 |
+
>>> filtering = lambda path, value: isinstance(value, tuple)
|
| 213 |
+
>>> found_values_with_path = optax.tree_utils.tree_get_all_with_path(
|
| 214 |
+
... state, 'learning_rate', filtering
|
| 215 |
+
... )
|
| 216 |
+
>>> print(
|
| 217 |
+
... *[(jax.tree_util.keystr(p), v) for p, v in found_values_with_path],
|
| 218 |
+
... sep="\n",
|
| 219 |
+
... )
|
| 220 |
+
("InjectStatefulHyperparamsState.hyperparams_states['learning_rate']", WrappedScheduleState(count=Array(0, dtype=int32)))
|
| 221 |
+
|
| 222 |
+
.. seealso:: :func:`optax.tree_utils.tree_get`,
|
| 223 |
+
:func:`optax.tree_utils.tree_set`
|
| 224 |
+
|
| 225 |
+
Args:
|
| 226 |
+
tree: tree to search in.
|
| 227 |
+
key: keyword or field to search in tree for.
|
| 228 |
+
filtering: optional callable to further filter values in tree that match the
|
| 229 |
+
key. ``filtering(path: Key_Path, value: Any) -> bool: ...``
|
| 230 |
+
takes as arguments both the path to the value (as returned by
|
| 231 |
+
:func:`optax.tree_utils.tree_get_all_with_path`) and the
|
| 232 |
+
value that match the given key.
|
| 233 |
+
|
| 234 |
+
Returns:
|
| 235 |
+
values_with_path
|
| 236 |
+
list of tuples where each tuple is of the form
|
| 237 |
+
(``path_to_value``, ``value``). Here ``value`` is one entry of the tree
|
| 238 |
+
that corresponds to the ``key``, and ``path_to_value`` is a tuple of
|
| 239 |
+
`KeyEntry` that is a tuple of :class:`jax.tree_util.DictKey`,
|
| 240 |
+
:class:`jax.tree_util.FlattenedIndexKey`,
|
| 241 |
+
:class:`jax.tree_util.GetAttrKey`,
|
| 242 |
+
:class:`jax.tree_util.SequenceKey`, or
|
| 243 |
+
:class:`optax.tree_utils.NamedTupleKey`.
|
| 244 |
+
|
| 245 |
+
.. versionadded:: 0.2.2
|
| 246 |
+
"""
|
| 247 |
+
# pylint: enable=line-too-long
|
| 248 |
+
found_values_with_path = _tree_get_all_with_path(tree, key)
|
| 249 |
+
if filtering:
|
| 250 |
+
found_values_with_path = [
|
| 251 |
+
(path, value)
|
| 252 |
+
for path, value in found_values_with_path
|
| 253 |
+
if filtering(path, value)
|
| 254 |
+
]
|
| 255 |
+
return found_values_with_path
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def tree_get(
|
| 259 |
+
tree: base.PyTree,
|
| 260 |
+
key: Any,
|
| 261 |
+
default: Optional[Any] = None,
|
| 262 |
+
filtering: Optional[Callable[[_KeyPath, Any], bool]] = None,
|
| 263 |
+
) -> Any:
|
| 264 |
+
# pylint: disable=line-too-long
|
| 265 |
+
"""Extract a value from a pytree matching a given key.
|
| 266 |
+
|
| 267 |
+
Search in the ``tree`` for a specific ``key`` (which can be a key
|
| 268 |
+
from a dictionary, a field from a NamedTuple or the name of a NamedTuple).
|
| 269 |
+
|
| 270 |
+
If the ``tree`` does not containt ``key`` returns ``default``.
|
| 271 |
+
|
| 272 |
+
Raises a ``KeyError`` if multiple values of ``key`` are found in ``tree``.
|
| 273 |
+
|
| 274 |
+
Generally, you may first get all pairs ``(path_to_value, value)`` for a given
|
| 275 |
+
``key`` using :func:`optax.tree_utils.tree_get_all_with_path`. You may then
|
| 276 |
+
define a filtering operation
|
| 277 |
+
``filtering(path: Key_Path, value: Any) -> bool: ...`` that enables you to
|
| 278 |
+
select the specific values you wanted to fetch by looking at the type of the
|
| 279 |
+
value, or looking at the path to that value.
|
| 280 |
+
Note that contrarily to the paths returned by
|
| 281 |
+
:func:`jax.tree_util.tree_leaves_with_path` the paths analyzed by the
|
| 282 |
+
filtering operation in :func:`optax.tree_utils.tree_get_all_with_path`,
|
| 283 |
+
:func:`optax.tree_utils.tree_get`, or :func:`optax.tree_utils.tree_set` detail
|
| 284 |
+
the names of the named tuples considered in the path. Concretely, if the value
|
| 285 |
+
considered is in the attribute ``key`` of a named tuple called
|
| 286 |
+
``MyNamedTuple`` the last element of the path will be a
|
| 287 |
+
:class:`optax.tree_utils.NamedTupleKey` containing both ``name=key`` and
|
| 288 |
+
``tuple_name='MyNamedTuple'``. That way you may distinguish between identical
|
| 289 |
+
values in different named tuples (arising for example when chaining
|
| 290 |
+
transformations in optax). See the last example below.
|
| 291 |
+
|
| 292 |
+
.. seealso:: :func:`optax.tree_utils.tree_get_all_with_path`,
|
| 293 |
+
:func:`optax.tree_utils.tree_set`
|
| 294 |
+
|
| 295 |
+
Examples:
|
| 296 |
+
|
| 297 |
+
Basic usage
|
| 298 |
+
|
| 299 |
+
>>> import jax.numpy as jnp
|
| 300 |
+
>>> import optax
|
| 301 |
+
>>> params = jnp.array([1., 2., 3.])
|
| 302 |
+
>>> opt = optax.adam(learning_rate=1.)
|
| 303 |
+
>>> state = opt.init(params)
|
| 304 |
+
>>> count = optax.tree_utils.tree_get(state, 'count')
|
| 305 |
+
>>> print(count)
|
| 306 |
+
0
|
| 307 |
+
|
| 308 |
+
Usage with a filtering operation
|
| 309 |
+
|
| 310 |
+
>>> import jax.numpy as jnp
|
| 311 |
+
>>> import optax
|
| 312 |
+
>>> params = jnp.array([1., 2., 3.])
|
| 313 |
+
>>> opt = optax.inject_hyperparams(optax.sgd)(
|
| 314 |
+
... learning_rate=lambda count: 1/(count+1)
|
| 315 |
+
... )
|
| 316 |
+
>>> state = opt.init(params)
|
| 317 |
+
>>> filtering = lambda path, value: isinstance(value, jnp.ndarray)
|
| 318 |
+
>>> lr = optax.tree_utils.tree_get(
|
| 319 |
+
... state, 'learning_rate', filtering=filtering
|
| 320 |
+
... )
|
| 321 |
+
>>> print(lr)
|
| 322 |
+
1.0
|
| 323 |
+
|
| 324 |
+
Extracting a named tuple by its name
|
| 325 |
+
|
| 326 |
+
>>> params = jnp.array([1., 2., 3.])
|
| 327 |
+
>>> opt = optax.chain(
|
| 328 |
+
... optax.add_noise(1.0, 0.9, 0), optax.scale_by_adam()
|
| 329 |
+
... )
|
| 330 |
+
>>> state = opt.init(params)
|
| 331 |
+
>>> noise_state = optax.tree_utils.tree_get(state, 'AddNoiseState')
|
| 332 |
+
>>> print(noise_state)
|
| 333 |
+
AddNoiseState(count=Array(0, dtype=int32), rng_key=Array([0, 0], dtype=uint32))
|
| 334 |
+
|
| 335 |
+
Differentiating between two values by the name of their named tuples.
|
| 336 |
+
|
| 337 |
+
>>> import jax.numpy as jnp
|
| 338 |
+
>>> import optax
|
| 339 |
+
>>> params = jnp.array([1., 2., 3.])
|
| 340 |
+
>>> opt = optax.chain(
|
| 341 |
+
... optax.add_noise(1.0, 0.9, 0), optax.scale_by_adam()
|
| 342 |
+
... )
|
| 343 |
+
>>> state = opt.init(params)
|
| 344 |
+
>>> filtering = (
|
| 345 |
+
... lambda p, v: isinstance(p[-1], optax.tree_utils.NamedTupleKey)
|
| 346 |
+
... and p[-1].tuple_name == 'ScaleByAdamState'
|
| 347 |
+
... )
|
| 348 |
+
>>> count = optax.tree_utils.tree_get(state, 'count', filtering=filtering)
|
| 349 |
+
>>> print(count)
|
| 350 |
+
0
|
| 351 |
+
|
| 352 |
+
Args:
|
| 353 |
+
tree: tree to search in.
|
| 354 |
+
key: keyword or field to search in ``tree`` for.
|
| 355 |
+
default: default value to return if ``key`` is not found in ``tree``.
|
| 356 |
+
filtering: optional callable to further filter values in ``tree`` that match
|
| 357 |
+
the ``key``. ``filtering(path: Key_Path, value: Any) -> bool: ...``
|
| 358 |
+
takes as arguments both the path to the value (as returned by
|
| 359 |
+
:func:`optax.tree_utils.tree_get_all_with_path`) and the
|
| 360 |
+
value that match the given key.
|
| 361 |
+
|
| 362 |
+
Returns:
|
| 363 |
+
value
|
| 364 |
+
value in ``tree`` matching the given ``key``. If none are
|
| 365 |
+
found return ``default`` value. If multiple are found raises an error.
|
| 366 |
+
|
| 367 |
+
Raises:
|
| 368 |
+
KeyError: If multiple values of ``key`` are found in ``tree``.
|
| 369 |
+
|
| 370 |
+
.. versionadded:: 0.2.2
|
| 371 |
+
"""
|
| 372 |
+
# pylint: enable=line-too-long
|
| 373 |
+
found_values_with_path = tree_get_all_with_path(
|
| 374 |
+
tree, key, filtering=filtering
|
| 375 |
+
)
|
| 376 |
+
if len(found_values_with_path) > 1:
|
| 377 |
+
raise KeyError(f"Found multiple values for '{key}' in {tree}.")
|
| 378 |
+
elif not found_values_with_path:
|
| 379 |
+
return default
|
| 380 |
+
else:
|
| 381 |
+
return found_values_with_path[0][1]
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def tree_set(
|
| 385 |
+
tree: base.PyTree,
|
| 386 |
+
filtering: Optional[Callable[[_KeyPath, Any], bool]] = None,
|
| 387 |
+
/,
|
| 388 |
+
**kwargs: Any,
|
| 389 |
+
) -> base.PyTree:
|
| 390 |
+
# pylint: disable=line-too-long
|
| 391 |
+
r"""Creates a copy of tree with some values replaced as specified by kwargs.
|
| 392 |
+
|
| 393 |
+
Search in the ``tree`` for ``keys`` in ``**kwargs`` (which can be a key
|
| 394 |
+
from a dictionary, a field from a NamedTuple or the name of a NamedTuple).
|
| 395 |
+
If such a key is found, replace the corresponding value with the one given in
|
| 396 |
+
``**kwargs``.
|
| 397 |
+
|
| 398 |
+
Raises a ``KeyError`` if some keys in ``**kwargs`` are not present in the
|
| 399 |
+
tree.
|
| 400 |
+
|
| 401 |
+
.. note:: The recommended usage to inject hyperparameters schedules is through
|
| 402 |
+
:func:`optax.inject_hyperparams`. This function is a helper for other
|
| 403 |
+
purposes.
|
| 404 |
+
|
| 405 |
+
Examples:
|
| 406 |
+
|
| 407 |
+
Basic usage
|
| 408 |
+
|
| 409 |
+
>>> import jax.numpy as jnp
|
| 410 |
+
>>> import optax
|
| 411 |
+
>>> params = jnp.array([1., 2., 3.])
|
| 412 |
+
>>> opt = optax.adam(learning_rate=1.)
|
| 413 |
+
>>> state = opt.init(params)
|
| 414 |
+
>>> print(state)
|
| 415 |
+
(ScaleByAdamState(count=Array(0, dtype=int32), mu=Array([0., 0., 0.], dtype=float32), nu=Array([0., 0., 0.], dtype=float32)), EmptyState())
|
| 416 |
+
>>> new_state = optax.tree_utils.tree_set(state, count=2.)
|
| 417 |
+
>>> print(new_state)
|
| 418 |
+
(ScaleByAdamState(count=2.0, mu=Array([0., 0., 0.], dtype=float32), nu=Array([0., 0., 0.], dtype=float32)), EmptyState())
|
| 419 |
+
|
| 420 |
+
Usage with a filtering operation
|
| 421 |
+
|
| 422 |
+
>>> import jax.numpy as jnp
|
| 423 |
+
>>> import optax
|
| 424 |
+
>>> params = jnp.array([1., 2., 3.])
|
| 425 |
+
>>> opt = optax.inject_hyperparams(optax.sgd)(
|
| 426 |
+
... learning_rate=lambda count: 1/(count+1)
|
| 427 |
+
... )
|
| 428 |
+
>>> state = opt.init(params)
|
| 429 |
+
>>> print(state)
|
| 430 |
+
InjectStatefulHyperparamsState(count=Array(0, dtype=int32), hyperparams={'learning_rate': Array(1., dtype=float32)}, hyperparams_states={'learning_rate': WrappedScheduleState(count=Array(0, dtype=int32))}, inner_state=(EmptyState(), EmptyState()))
|
| 431 |
+
>>> filtering = lambda path, value: isinstance(value, jnp.ndarray)
|
| 432 |
+
>>> new_state = optax.tree_utils.tree_set(
|
| 433 |
+
... state, filtering, learning_rate=jnp.asarray(0.1)
|
| 434 |
+
... )
|
| 435 |
+
>>> print(new_state)
|
| 436 |
+
InjectStatefulHyperparamsState(count=Array(0, dtype=int32), hyperparams={'learning_rate': Array(0.1, dtype=float32, weak_type=True)}, hyperparams_states={'learning_rate': WrappedScheduleState(count=Array(0, dtype=int32))}, inner_state=(EmptyState(), EmptyState()))
|
| 437 |
+
|
| 438 |
+
.. seealso:: :func:`optax.tree_utils.tree_get_all_with_path`,
|
| 439 |
+
:func:`optax.tree_utils.tree_get`
|
| 440 |
+
|
| 441 |
+
Args:
|
| 442 |
+
tree: pytree whose values are to be replaced.
|
| 443 |
+
filtering: optional callable to further filter values in ``tree`` that match
|
| 444 |
+
the keys to replace.
|
| 445 |
+
``filtering(path: Key_Path, value: Any) -> bool: ...``
|
| 446 |
+
takes as arguments both the path to the value (as returned by
|
| 447 |
+
:func:`optax.tree_utils.tree_get_all_with_path`) and the
|
| 448 |
+
value that match a given key.
|
| 449 |
+
**kwargs: dictionary of keys with values to replace in ``tree``.
|
| 450 |
+
|
| 451 |
+
Returns:
|
| 452 |
+
new_tree
|
| 453 |
+
new pytree with the same structure as ``tree``. For each element in
|
| 454 |
+
``tree`` whose key/field matches a key in ``**kwargs``, its value is
|
| 455 |
+
set by the corresponding value in ``**kwargs``.
|
| 456 |
+
|
| 457 |
+
Raises:
|
| 458 |
+
KeyError: If no values of some key in ``**kwargs`` are found in ``tree``
|
| 459 |
+
or none of the values satisfy the filtering operation.
|
| 460 |
+
|
| 461 |
+
.. versionadded:: 0.2.2
|
| 462 |
+
"""
|
| 463 |
+
# pylint: enable=line-too-long
|
| 464 |
+
|
| 465 |
+
# First check if the keys are present in the tree
|
| 466 |
+
for key in kwargs:
|
| 467 |
+
found_values_with_path = tree_get_all_with_path(tree, key, filtering)
|
| 468 |
+
if not found_values_with_path:
|
| 469 |
+
if filtering:
|
| 470 |
+
raise KeyError(
|
| 471 |
+
f"Found no values matching '{key}' given the filtering operation in"
|
| 472 |
+
f" {tree}"
|
| 473 |
+
)
|
| 474 |
+
else:
|
| 475 |
+
raise KeyError(f"Found no values matching '{key}' in {tree}")
|
| 476 |
+
|
| 477 |
+
has_any_key = functools.partial(_node_has_keys, keys=tuple(kwargs.keys()))
|
| 478 |
+
|
| 479 |
+
def _replace(path: _KeyPath, node: Any) -> Any:
|
| 480 |
+
"""Replace a node with a new node whose values are updated."""
|
| 481 |
+
if has_any_key(node):
|
| 482 |
+
if (
|
| 483 |
+
_is_named_tuple(node)
|
| 484 |
+
and (node.__class__.__name__ in kwargs)
|
| 485 |
+
and (filtering is None or filtering(path, node))
|
| 486 |
+
):
|
| 487 |
+
# The node itself is a named tuple we wanted to replace
|
| 488 |
+
return kwargs[node.__class__.__name__]
|
| 489 |
+
else:
|
| 490 |
+
# The node contains one of the keys we want to replace
|
| 491 |
+
children_with_path = _get_children_with_path(path, node)
|
| 492 |
+
new_children_with_keys = {}
|
| 493 |
+
for child_path, child in children_with_path:
|
| 494 |
+
# Scan each child of that node
|
| 495 |
+
key = _get_key(child_path[-1])
|
| 496 |
+
if key in kwargs and (
|
| 497 |
+
filtering is None or filtering(child_path, child)
|
| 498 |
+
):
|
| 499 |
+
# If the child matches a given key given the filtering operation
|
| 500 |
+
# replaces with the new value
|
| 501 |
+
new_children_with_keys.update({key: kwargs[key]})
|
| 502 |
+
else:
|
| 503 |
+
if (
|
| 504 |
+
isinstance(child, tuple)
|
| 505 |
+
or isinstance(child, dict)
|
| 506 |
+
or isinstance(child, list)
|
| 507 |
+
):
|
| 508 |
+
# If the child is itself a pytree, further search in the child to
|
| 509 |
+
# replace the given value
|
| 510 |
+
new_children_with_keys.update({key: _replace(child_path, child)})
|
| 511 |
+
else:
|
| 512 |
+
# If the child is just a leaf that does not contain the key or
|
| 513 |
+
# satisfies the filtering operation, just return the child.
|
| 514 |
+
new_children_with_keys.update({key: child})
|
| 515 |
+
return _set_children(node, new_children_with_keys)
|
| 516 |
+
else:
|
| 517 |
+
return node
|
| 518 |
+
|
| 519 |
+
# Mimics jax.tree_util.tree_map_with_path(_replace, tree, is_leaf)
|
| 520 |
+
# except that the paths we consider can contain NamedTupleKeys
|
| 521 |
+
_, treedef = jax.tree_util.tree_flatten(tree, is_leaf=has_any_key)
|
| 522 |
+
tree_leaves_with_path = _tree_leaves_with_named_tuple_path(
|
| 523 |
+
tree, is_leaf=has_any_key
|
| 524 |
+
)
|
| 525 |
+
tree_leaves_with_path = list(zip(*tree_leaves_with_path))
|
| 526 |
+
new_tree = treedef.unflatten(
|
| 527 |
+
_replace(*xs) for xs in zip(*tree_leaves_with_path)
|
| 528 |
+
)
|
| 529 |
+
return new_tree
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def _tree_get_all_with_path(
|
| 533 |
+
tree: base.PyTree, key: str
|
| 534 |
+
) -> list[tuple[_KeyPath, Any]]:
|
| 535 |
+
"""Get all values of a pytree matching a given key.
|
| 536 |
+
|
| 537 |
+
Private function called recursively, see
|
| 538 |
+
:func:`optax.tree_utils.tree_get_all_with_path` for public api.
|
| 539 |
+
|
| 540 |
+
Args:
|
| 541 |
+
tree: tree to search in.
|
| 542 |
+
key: keyword or name to search in tree for.
|
| 543 |
+
|
| 544 |
+
Returns:
|
| 545 |
+
values_with_path
|
| 546 |
+
list of tuples where each tuple is of the form
|
| 547 |
+
(``path_to_value``, ``value``). Here ``value`` is one entry of the tree
|
| 548 |
+
that corresponds to the ``key``, and ``path_to_value`` is a tuple of
|
| 549 |
+
`KeyEntry` that is a tuple of :class:`jax.tree_util.DictKey`,
|
| 550 |
+
:class:`jax.tree_util.FlattenedIndexKey`,
|
| 551 |
+
:class:`jax.tree_util.GetAttrKey`,
|
| 552 |
+
:class:`jax.tree_util.SequenceKey`, or
|
| 553 |
+
:class:`optax.tree_utils.NamedTupleKey`.
|
| 554 |
+
"""
|
| 555 |
+
|
| 556 |
+
# Get subtrees containing a field with the given key
|
| 557 |
+
has_key = functools.partial(_node_has_keys, keys=(key,))
|
| 558 |
+
leaves_or_subtrees_with_path = _tree_leaves_with_named_tuple_path(
|
| 559 |
+
tree, is_leaf=has_key
|
| 560 |
+
)
|
| 561 |
+
subtrees_with_path = [
|
| 562 |
+
(path, leaf_or_subtree)
|
| 563 |
+
for path, leaf_or_subtree in leaves_or_subtrees_with_path
|
| 564 |
+
if has_key(leaf_or_subtree)
|
| 565 |
+
]
|
| 566 |
+
|
| 567 |
+
# Get (path_to_value, value) for the subtrees found
|
| 568 |
+
found_values_with_path = [
|
| 569 |
+
_flatten_to_key(path, subtree, key)
|
| 570 |
+
for path, subtree in subtrees_with_path
|
| 571 |
+
]
|
| 572 |
+
|
| 573 |
+
# Further search in subtrees for additional values
|
| 574 |
+
for path, subtree in subtrees_with_path:
|
| 575 |
+
children_with_path = _get_children_with_path(path, subtree)
|
| 576 |
+
for path, child in children_with_path:
|
| 577 |
+
new_values_with_path = _tree_get_all_with_path(child, key)
|
| 578 |
+
new_values_with_path = [
|
| 579 |
+
((*path, *new_path), new_value)
|
| 580 |
+
for new_path, new_value in new_values_with_path
|
| 581 |
+
]
|
| 582 |
+
found_values_with_path += new_values_with_path
|
| 583 |
+
return found_values_with_path
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
def _is_named_tuple(x):
|
| 587 |
+
return (
|
| 588 |
+
isinstance(x, tuple)
|
| 589 |
+
and hasattr(x, "_fields")
|
| 590 |
+
and hasattr(x, "__class__")
|
| 591 |
+
and hasattr(x.__class__, "__name__")
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
def _tree_leaves_with_named_tuple_path(
|
| 596 |
+
tree: base.PyTree,
|
| 597 |
+
is_leaf: Optional[
|
| 598 |
+
Callable[
|
| 599 |
+
[
|
| 600 |
+
Any,
|
| 601 |
+
],
|
| 602 |
+
bool,
|
| 603 |
+
]
|
| 604 |
+
] = None,
|
| 605 |
+
) -> list[tuple[_KeyPath, Any]]:
|
| 606 |
+
"""Get leaves of a tree with their path.
|
| 607 |
+
|
| 608 |
+
Essentially the same as :func:`jax.tree_util.tree_leaves_with_path`.
|
| 609 |
+
The difference is that for each attribute of a named tuple we add to the given
|
| 610 |
+
entry the name of the tuple. This facilitates getting/setting values in a
|
| 611 |
+
pytree by filtering for attributes in specific states (different named tuples)
|
| 612 |
+
that have otherwise the same name and type.
|
| 613 |
+
See :func:`optax.tree_utils.tree_get` for a concrete example.
|
| 614 |
+
|
| 615 |
+
Args:
|
| 616 |
+
tree: pytree to extract leaves of.
|
| 617 |
+
is_leaf: callable to stop expanding the tree at a node that satisfies
|
| 618 |
+
is_leaf(node) == True.
|
| 619 |
+
|
| 620 |
+
Returns:
|
| 621 |
+
list of (path_to_leaf, leaf) for all leaves in the tree
|
| 622 |
+
(or nodes satisfying is_leaf(node) == True).
|
| 623 |
+
"""
|
| 624 |
+
is_leaf_ = is_leaf if is_leaf else lambda _: False
|
| 625 |
+
tree_leaves_with_path = jax.tree_util.tree_leaves_with_path(
|
| 626 |
+
tree, is_leaf=lambda x: is_leaf_(x) or _is_named_tuple(x)
|
| 627 |
+
)
|
| 628 |
+
named_tree_leaves_with_path = []
|
| 629 |
+
for path, node in tree_leaves_with_path:
|
| 630 |
+
if is_leaf_(node) or not _is_named_tuple(node):
|
| 631 |
+
named_tree_leaves_with_path.append((path, node))
|
| 632 |
+
else:
|
| 633 |
+
for field in node._fields:
|
| 634 |
+
child_leaves_with_path = _tree_leaves_with_named_tuple_path(
|
| 635 |
+
getattr(node, field), is_leaf
|
| 636 |
+
)
|
| 637 |
+
child_leaves_with_path = [
|
| 638 |
+
(
|
| 639 |
+
(
|
| 640 |
+
*path,
|
| 641 |
+
NamedTupleKey(node.__class__.__name__, field),
|
| 642 |
+
*child_path,
|
| 643 |
+
),
|
| 644 |
+
child_value,
|
| 645 |
+
)
|
| 646 |
+
for child_path, child_value in child_leaves_with_path
|
| 647 |
+
]
|
| 648 |
+
named_tree_leaves_with_path = (
|
| 649 |
+
named_tree_leaves_with_path + child_leaves_with_path
|
| 650 |
+
)
|
| 651 |
+
return named_tree_leaves_with_path
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def _node_has_keys(node: Any, keys: tuple[Any, ...]) -> bool:
|
| 655 |
+
"""Filter for nodes in a tree whose field/key/name matches the given key.
|
| 656 |
+
|
| 657 |
+
Private method used in :func:`optax.tree_utils.tree_get_all_with_path` and in
|
| 658 |
+
:func:`optax.tree_utils.tree_set`.
|
| 659 |
+
|
| 660 |
+
Args:
|
| 661 |
+
node: node in a pytree.
|
| 662 |
+
keys: keys to search for in the node.
|
| 663 |
+
|
| 664 |
+
Returns:
|
| 665 |
+
whether the node has one of the given keys.
|
| 666 |
+
"""
|
| 667 |
+
if _is_named_tuple(node) and any(key in node._fields for key in keys):
|
| 668 |
+
return True
|
| 669 |
+
elif _is_named_tuple(node) and (node.__class__.__name__ in keys):
|
| 670 |
+
return True
|
| 671 |
+
elif isinstance(node, dict) and any(key in node for key in keys):
|
| 672 |
+
return True
|
| 673 |
+
else:
|
| 674 |
+
return False
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
def _flatten_to_key(
|
| 678 |
+
path: _KeyPath, node: Any, key: Any
|
| 679 |
+
) -> tuple[_KeyPath, Any]:
|
| 680 |
+
"""Flatten a node with a field/key/name matching given key.
|
| 681 |
+
|
| 682 |
+
Private method used in :func:`optax.tree_utils.tree_get_all_with_path`.
|
| 683 |
+
|
| 684 |
+
Args:
|
| 685 |
+
path: path to the node in a pytree.
|
| 686 |
+
node: node in a pytree.
|
| 687 |
+
key: key to reach for in the node.
|
| 688 |
+
|
| 689 |
+
Returns:
|
| 690 |
+
(path_to_key, key_node)
|
| 691 |
+
if key is a key/field of the node,
|
| 692 |
+
``path_to_key = (*path_to_node, key_path)``, ``key_node = node[key]``,
|
| 693 |
+
otherwise returns the path and node as they are.
|
| 694 |
+
"""
|
| 695 |
+
if _is_named_tuple(node):
|
| 696 |
+
if key == node.__class__.__name__:
|
| 697 |
+
return (path, node)
|
| 698 |
+
else:
|
| 699 |
+
path_to_key = (*path, NamedTupleKey(node.__class__.__name__, key))
|
| 700 |
+
return (path_to_key, getattr(node, key))
|
| 701 |
+
elif isinstance(node, dict) and key in node:
|
| 702 |
+
return (*path, jax.tree_util.DictKey(key)), node[key]
|
| 703 |
+
else:
|
| 704 |
+
return path, node
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
def _get_children_with_path(
|
| 708 |
+
path: _KeyPath, node: Any
|
| 709 |
+
) -> list[tuple[_KeyPath, Any]]:
|
| 710 |
+
"""Get children of a node.
|
| 711 |
+
|
| 712 |
+
Private method used in :func:`optax.tree_utils.tree_get_all_with_path` and in
|
| 713 |
+
:func:`optax.tree_utils.tree_set`. In particular, it is tailored for
|
| 714 |
+
nodes that are NamedTuple or dict.
|
| 715 |
+
|
| 716 |
+
Args:
|
| 717 |
+
path: path to the node in a pytree.
|
| 718 |
+
node: node in a pytree.
|
| 719 |
+
|
| 720 |
+
Returns:
|
| 721 |
+
list of (path_to_child, child) for child a child in nodes.
|
| 722 |
+
|
| 723 |
+
Raises:
|
| 724 |
+
ValueError if the given node is not a NamedTuple or a dict
|
| 725 |
+
"""
|
| 726 |
+
if _is_named_tuple(node):
|
| 727 |
+
return [
|
| 728 |
+
(
|
| 729 |
+
(*path, NamedTupleKey(node.__class__.__name__, field)),
|
| 730 |
+
getattr(node, field),
|
| 731 |
+
)
|
| 732 |
+
for field in node._fields
|
| 733 |
+
]
|
| 734 |
+
elif isinstance(node, dict):
|
| 735 |
+
return [
|
| 736 |
+
((*path, jax.tree_util.DictKey(key)), value)
|
| 737 |
+
for key, value in node.items()
|
| 738 |
+
]
|
| 739 |
+
else:
|
| 740 |
+
raise ValueError(
|
| 741 |
+
f"Subtree must be a dict or a NamedTuple. Got {type(node)}"
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def _set_children(node: Any, children_with_keys: dict[Any, Any]) -> Any:
|
| 746 |
+
"""Set children of a node.
|
| 747 |
+
|
| 748 |
+
Private method used in :func:`optax.tree_utils.tree_set`.
|
| 749 |
+
In particular, it is tailored for nodes that are NamedTuple or dict.
|
| 750 |
+
|
| 751 |
+
Args:
|
| 752 |
+
node: node in a pytree.
|
| 753 |
+
children_with_keys: children of the node with associated keys
|
| 754 |
+
|
| 755 |
+
Returns:
|
| 756 |
+
new_node whose fields/keys are replaced by the ones given in
|
| 757 |
+
children_with_keys.
|
| 758 |
+
|
| 759 |
+
Raises:
|
| 760 |
+
ValueError if the given node is not a NamedTuple or a dict
|
| 761 |
+
"""
|
| 762 |
+
if _is_named_tuple(node):
|
| 763 |
+
return node._replace(**children_with_keys)
|
| 764 |
+
elif isinstance(node, dict):
|
| 765 |
+
return children_with_keys
|
| 766 |
+
else:
|
| 767 |
+
raise ValueError(
|
| 768 |
+
f"Subtree must be a dict or a NamedTuple. Got {type(node)}"
|
| 769 |
+
)
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
def _get_key(key: _KeyEntry) -> Union[int, str]:
|
| 773 |
+
"""Convert a ``KeyEntry``` to a usual type."""
|
| 774 |
+
if isinstance(key, jax.tree_util.DictKey):
|
| 775 |
+
if isinstance(key.key, (str, int)):
|
| 776 |
+
return key.key
|
| 777 |
+
raise KeyError("Hashable keys not supported")
|
| 778 |
+
if isinstance(key, jax.tree_util.FlattenedIndexKey):
|
| 779 |
+
return key.key # int.
|
| 780 |
+
if isinstance(key, jax.tree_util.GetAttrKey):
|
| 781 |
+
return key.name # str.
|
| 782 |
+
if isinstance(key, jax.tree_util.SequenceKey):
|
| 783 |
+
return key.idx # int.
|
| 784 |
+
if isinstance(key, NamedTupleKey):
|
| 785 |
+
return key.name # str.
|
| 786 |
+
raise KeyError(f"Tree key '{key}' of type '{type(key)}' not valid.")
|
testbed/google-deepmind__optax/optax/tree_utils/_state_utils_test.py
ADDED
|
@@ -0,0 +1,599 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Tests for _state_utils."""
|
| 16 |
+
|
| 17 |
+
import dataclasses
|
| 18 |
+
from typing import Optional, TypedDict, cast
|
| 19 |
+
|
| 20 |
+
from absl.testing import absltest
|
| 21 |
+
import chex
|
| 22 |
+
import jax
|
| 23 |
+
import jax.numpy as jnp
|
| 24 |
+
import jax.tree_util as jtu
|
| 25 |
+
from optax._src import alias
|
| 26 |
+
from optax._src import base
|
| 27 |
+
from optax._src import combine
|
| 28 |
+
from optax._src import transform
|
| 29 |
+
from optax.schedules import _inject
|
| 30 |
+
from optax.schedules import _schedule
|
| 31 |
+
from optax.tree_utils import _state_utils
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclasses.dataclass
|
| 35 |
+
class FakeShardSpec:
|
| 36 |
+
sharding_axis: Optional[int]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ScaleByAdamStateDict(TypedDict):
|
| 40 |
+
"""An opt state that uses dictionaries instead of classes."""
|
| 41 |
+
|
| 42 |
+
count: chex.Array
|
| 43 |
+
params: TypedDict('Params', {'mu': chex.ArrayTree, 'nu': chex.ArrayTree})
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _scale_by_adam_with_dicts():
|
| 47 |
+
"""An implementation of adam using dictionary-based opt states."""
|
| 48 |
+
|
| 49 |
+
t = transform.scale_by_adam()
|
| 50 |
+
|
| 51 |
+
def init(params):
|
| 52 |
+
state = t.init(params)
|
| 53 |
+
state = cast(transform.ScaleByAdamState, state)
|
| 54 |
+
|
| 55 |
+
return ScaleByAdamStateDict(
|
| 56 |
+
count=state.count,
|
| 57 |
+
params={'mu': state.mu, 'nu': state.nu},
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
def update(updates, state, params=None):
|
| 61 |
+
state = transform.ScaleByAdamState(
|
| 62 |
+
count=state['count'],
|
| 63 |
+
mu=state['params']['mu'],
|
| 64 |
+
nu=state['params']['nu'],
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
_, state = t.update(updates, state, params)
|
| 68 |
+
state = cast(transform.ScaleByAdamState, state)
|
| 69 |
+
return ScaleByAdamStateDict(
|
| 70 |
+
count=state.count,
|
| 71 |
+
params={'mu': state.mu, 'nu': state.nu},
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
return base.GradientTransformation(init, update)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class StateUtilsTest(absltest.TestCase):
|
| 78 |
+
|
| 79 |
+
def test_dict_based_optimizers(self):
|
| 80 |
+
"""Test we can map over params also for optimizer states using dicts."""
|
| 81 |
+
opt = combine.chain(
|
| 82 |
+
_scale_by_adam_with_dicts(),
|
| 83 |
+
transform.add_decayed_weights(1e-3),
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
params = _fake_params()
|
| 87 |
+
params_sharding_spec = _fake_param_sharding()
|
| 88 |
+
opt_state = opt.init(params)
|
| 89 |
+
|
| 90 |
+
opt_state_sharding_spec = _state_utils.tree_map_params(
|
| 91 |
+
opt,
|
| 92 |
+
lambda _, spec: spec,
|
| 93 |
+
opt_state,
|
| 94 |
+
params_sharding_spec,
|
| 95 |
+
transform_non_params=lambda _: FakeShardSpec(None),
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
expected = (
|
| 99 |
+
{
|
| 100 |
+
'count': FakeShardSpec(sharding_axis=None),
|
| 101 |
+
'params': {
|
| 102 |
+
'mu': {
|
| 103 |
+
'my/fake/module': {
|
| 104 |
+
'b': FakeShardSpec(sharding_axis=1),
|
| 105 |
+
'w': FakeShardSpec(sharding_axis=0),
|
| 106 |
+
},
|
| 107 |
+
'my/other/fake/module': {
|
| 108 |
+
'b': FakeShardSpec(sharding_axis=3),
|
| 109 |
+
'w': FakeShardSpec(sharding_axis=2),
|
| 110 |
+
},
|
| 111 |
+
},
|
| 112 |
+
'nu': {
|
| 113 |
+
'my/fake/module': {
|
| 114 |
+
'b': FakeShardSpec(sharding_axis=1),
|
| 115 |
+
'w': FakeShardSpec(sharding_axis=0),
|
| 116 |
+
},
|
| 117 |
+
'my/other/fake/module': {
|
| 118 |
+
'b': FakeShardSpec(sharding_axis=3),
|
| 119 |
+
'w': FakeShardSpec(sharding_axis=2),
|
| 120 |
+
},
|
| 121 |
+
},
|
| 122 |
+
},
|
| 123 |
+
},
|
| 124 |
+
base.EmptyState(),
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
self.assertEqual(expected, opt_state_sharding_spec)
|
| 128 |
+
|
| 129 |
+
def test_state_chex_dataclass(self):
|
| 130 |
+
@chex.dataclass
|
| 131 |
+
class Foo:
|
| 132 |
+
count: int
|
| 133 |
+
v: chex.ArrayTree
|
| 134 |
+
|
| 135 |
+
def init(params):
|
| 136 |
+
return Foo(count=0, v=params)
|
| 137 |
+
|
| 138 |
+
params = {
|
| 139 |
+
'w': 0,
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
state = init(params)
|
| 143 |
+
state = _state_utils.tree_map_params(init, lambda v: v + 1, state)
|
| 144 |
+
state = cast(Foo, state)
|
| 145 |
+
|
| 146 |
+
self.assertEqual(int(state.count), 0)
|
| 147 |
+
self.assertEqual(state.v, {'w': jnp.array(1)})
|
| 148 |
+
|
| 149 |
+
def test_adam(self):
|
| 150 |
+
params = _fake_params()
|
| 151 |
+
params_sharding_spec = _fake_param_sharding()
|
| 152 |
+
|
| 153 |
+
opt = alias.adam(1e-4)
|
| 154 |
+
opt_state = opt.init(params)
|
| 155 |
+
|
| 156 |
+
opt_state_sharding_spec = _state_utils.tree_map_params(
|
| 157 |
+
opt,
|
| 158 |
+
lambda _, spec: spec,
|
| 159 |
+
opt_state,
|
| 160 |
+
params_sharding_spec,
|
| 161 |
+
transform_non_params=lambda _: FakeShardSpec(None),
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
expected = (
|
| 165 |
+
transform.ScaleByAdamState( # pytype:disable=wrong-arg-types
|
| 166 |
+
count=FakeShardSpec(sharding_axis=None),
|
| 167 |
+
mu={
|
| 168 |
+
'my/fake/module': {
|
| 169 |
+
'w': FakeShardSpec(sharding_axis=0),
|
| 170 |
+
'b': FakeShardSpec(sharding_axis=1),
|
| 171 |
+
},
|
| 172 |
+
'my/other/fake/module': {
|
| 173 |
+
'w': FakeShardSpec(sharding_axis=2),
|
| 174 |
+
'b': FakeShardSpec(sharding_axis=3),
|
| 175 |
+
},
|
| 176 |
+
},
|
| 177 |
+
nu={
|
| 178 |
+
'my/fake/module': {
|
| 179 |
+
'w': FakeShardSpec(sharding_axis=0),
|
| 180 |
+
'b': FakeShardSpec(sharding_axis=1),
|
| 181 |
+
},
|
| 182 |
+
'my/other/fake/module': {
|
| 183 |
+
'w': FakeShardSpec(sharding_axis=2),
|
| 184 |
+
'b': FakeShardSpec(sharding_axis=3),
|
| 185 |
+
},
|
| 186 |
+
},
|
| 187 |
+
),
|
| 188 |
+
base.EmptyState(),
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
self.assertEqual(expected, opt_state_sharding_spec)
|
| 192 |
+
|
| 193 |
+
def test_inject_hparams(self):
|
| 194 |
+
opt = _inject.inject_hyperparams(alias.adamw)(learning_rate=1e-3)
|
| 195 |
+
|
| 196 |
+
params = _fake_params()
|
| 197 |
+
state = opt.init(params)
|
| 198 |
+
state = _state_utils.tree_map_params(opt, lambda v: v + 1, state)
|
| 199 |
+
state = cast(_inject.InjectHyperparamsState, state)
|
| 200 |
+
|
| 201 |
+
self.assertEqual(1e-3, state.hyperparams['learning_rate'])
|
| 202 |
+
params_plus_one = jtu.tree_map(lambda v: v + 1, params)
|
| 203 |
+
mu = getattr(state.inner_state[0], 'mu')
|
| 204 |
+
chex.assert_trees_all_close(mu, params_plus_one)
|
| 205 |
+
|
| 206 |
+
def test_map_params_to_none(self):
|
| 207 |
+
opt = alias.adagrad(1e-4)
|
| 208 |
+
|
| 209 |
+
params = {'a': jnp.zeros((1, 2))}
|
| 210 |
+
state = opt.init(params)
|
| 211 |
+
state = _state_utils.tree_map_params(opt, lambda _: None, state)
|
| 212 |
+
self.assertEqual(
|
| 213 |
+
state,
|
| 214 |
+
(
|
| 215 |
+
transform.ScaleByRssState(sum_of_squares={'a': None}),
|
| 216 |
+
base.EmptyState(),
|
| 217 |
+
),
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
def test_map_non_params_to_none(self):
|
| 221 |
+
"""Test for dangerous edge-cases in tree when returning None values."""
|
| 222 |
+
|
| 223 |
+
opt = alias.adam(_schedule.linear_schedule(1e-2, 1e-4, 10))
|
| 224 |
+
|
| 225 |
+
params = {'a': jnp.zeros((1, 2))}
|
| 226 |
+
state = opt.init(params)
|
| 227 |
+
|
| 228 |
+
state = _state_utils.tree_map_params(
|
| 229 |
+
opt, lambda v: 1, state, transform_non_params=lambda _: None
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
expected = (
|
| 233 |
+
transform.ScaleByAdamState( # pytype:disable=wrong-arg-types
|
| 234 |
+
count=None,
|
| 235 |
+
mu={'a': 1},
|
| 236 |
+
nu={'a': 1},
|
| 237 |
+
),
|
| 238 |
+
transform.ScaleByScheduleState( # pytype:disable=wrong-arg-types
|
| 239 |
+
count=None
|
| 240 |
+
),
|
| 241 |
+
)
|
| 242 |
+
self.assertEqual(state, expected)
|
| 243 |
+
|
| 244 |
+
def test_tree_get_all_with_path(self):
|
| 245 |
+
params = jnp.array([1.0, 2.0, 3.0])
|
| 246 |
+
|
| 247 |
+
with self.subTest('Test with flat tree'):
|
| 248 |
+
tree = ()
|
| 249 |
+
found_values = _state_utils.tree_get_all_with_path(tree, 'foo')
|
| 250 |
+
self.assertEmpty(found_values)
|
| 251 |
+
tree = jnp.array([1.0, 2.0, 3.0])
|
| 252 |
+
found_values = _state_utils.tree_get_all_with_path(tree, 'foo')
|
| 253 |
+
self.assertEmpty(found_values)
|
| 254 |
+
|
| 255 |
+
with self.subTest('Test with single value in state'):
|
| 256 |
+
key = 'count'
|
| 257 |
+
opt = transform.scale_by_adam()
|
| 258 |
+
state = opt.init(params)
|
| 259 |
+
found_values = _state_utils.tree_get_all_with_path(state, key)
|
| 260 |
+
expected_result = [(
|
| 261 |
+
(_state_utils.NamedTupleKey('ScaleByAdamState', 'count'),),
|
| 262 |
+
jnp.array(0.0),
|
| 263 |
+
)]
|
| 264 |
+
self.assertEqual(found_values, expected_result)
|
| 265 |
+
|
| 266 |
+
with self.subTest('Test with no value in state'):
|
| 267 |
+
key = 'apple'
|
| 268 |
+
opt = alias.adam(learning_rate=1.0)
|
| 269 |
+
state = opt.init(params)
|
| 270 |
+
found_values = _state_utils.tree_get_all_with_path(state, key)
|
| 271 |
+
self.assertEmpty(found_values)
|
| 272 |
+
|
| 273 |
+
with self.subTest('Test with multiple values in state'):
|
| 274 |
+
key = 'learning_rate'
|
| 275 |
+
opt = combine.chain(
|
| 276 |
+
_inject.inject_hyperparams(alias.sgd)(learning_rate=1.0),
|
| 277 |
+
combine.chain(
|
| 278 |
+
alias.adam(learning_rate=1.0),
|
| 279 |
+
_inject.inject_hyperparams(alias.adam)(learning_rate=1e-4),
|
| 280 |
+
),
|
| 281 |
+
)
|
| 282 |
+
state = opt.init(params)
|
| 283 |
+
found_values = _state_utils.tree_get_all_with_path(state, key)
|
| 284 |
+
expected_result = [
|
| 285 |
+
(
|
| 286 |
+
(
|
| 287 |
+
jtu.SequenceKey(idx=0),
|
| 288 |
+
_state_utils.NamedTupleKey(
|
| 289 |
+
'InjectStatefulHyperparamsState', 'hyperparams'
|
| 290 |
+
),
|
| 291 |
+
jtu.DictKey(key='learning_rate'),
|
| 292 |
+
),
|
| 293 |
+
jnp.array(1.0),
|
| 294 |
+
),
|
| 295 |
+
(
|
| 296 |
+
(
|
| 297 |
+
jtu.SequenceKey(idx=1),
|
| 298 |
+
jtu.SequenceKey(idx=1),
|
| 299 |
+
_state_utils.NamedTupleKey(
|
| 300 |
+
'InjectStatefulHyperparamsState', 'hyperparams'
|
| 301 |
+
),
|
| 302 |
+
jtu.DictKey(key='learning_rate'),
|
| 303 |
+
),
|
| 304 |
+
jnp.array(1e-4),
|
| 305 |
+
),
|
| 306 |
+
]
|
| 307 |
+
self.assertEqual(found_values, expected_result)
|
| 308 |
+
|
| 309 |
+
with self.subTest('Test with optional filtering'):
|
| 310 |
+
state = dict(hparams=dict(learning_rate=1.0), learning_rate='foo')
|
| 311 |
+
|
| 312 |
+
# Without filtering two values are found
|
| 313 |
+
found_values = _state_utils.tree_get_all_with_path(state, 'learning_rate')
|
| 314 |
+
self.assertLen(found_values, 2)
|
| 315 |
+
|
| 316 |
+
# With filtering only the float entry is returned
|
| 317 |
+
filtering = lambda _, value: isinstance(value, float)
|
| 318 |
+
found_values = _state_utils.tree_get_all_with_path(
|
| 319 |
+
state, 'learning_rate', filtering=filtering
|
| 320 |
+
)
|
| 321 |
+
self.assertLen(found_values, 1)
|
| 322 |
+
expected_result = [(
|
| 323 |
+
(jtu.DictKey(key='hparams'), jtu.DictKey(key='learning_rate')),
|
| 324 |
+
1.0,
|
| 325 |
+
)]
|
| 326 |
+
self.assertEqual(found_values, expected_result)
|
| 327 |
+
|
| 328 |
+
with self.subTest('Test to get a subtree (here hyperparams_states)'):
|
| 329 |
+
opt = _inject.inject_hyperparams(alias.sgd)(learning_rate=lambda x: x)
|
| 330 |
+
filtering = lambda _, value: isinstance(value, tuple)
|
| 331 |
+
state = opt.init(params)
|
| 332 |
+
found_values = _state_utils.tree_get_all_with_path(
|
| 333 |
+
state, 'learning_rate', filtering=filtering
|
| 334 |
+
)
|
| 335 |
+
expected_result = [(
|
| 336 |
+
(
|
| 337 |
+
_state_utils.NamedTupleKey(
|
| 338 |
+
'InjectStatefulHyperparamsState', 'hyperparams_states'
|
| 339 |
+
),
|
| 340 |
+
jtu.DictKey(key='learning_rate'),
|
| 341 |
+
),
|
| 342 |
+
_inject.WrappedScheduleState(
|
| 343 |
+
count=jnp.array(0, dtype=jnp.dtype('int32'))
|
| 344 |
+
),
|
| 345 |
+
)]
|
| 346 |
+
self.assertEqual(found_values, expected_result)
|
| 347 |
+
|
| 348 |
+
with self.subTest('Test with nested tree containing a key'):
|
| 349 |
+
tree = dict(a=dict(a=1.0))
|
| 350 |
+
found_values = _state_utils.tree_get_all_with_path(tree, 'a')
|
| 351 |
+
expected_result = [
|
| 352 |
+
((jtu.DictKey(key='a'),), {'a': 1.0}),
|
| 353 |
+
((jtu.DictKey(key='a'), jtu.DictKey(key='a')), 1.0),
|
| 354 |
+
]
|
| 355 |
+
self.assertEqual(found_values, expected_result)
|
| 356 |
+
|
| 357 |
+
def test_tree_get(self):
|
| 358 |
+
params = jnp.array([1.0, 2.0, 3.0])
|
| 359 |
+
|
| 360 |
+
with self.subTest('Test with unique value matching the key'):
|
| 361 |
+
solver = _inject.inject_hyperparams(alias.sgd)(learning_rate=42.0)
|
| 362 |
+
state = solver.init(params)
|
| 363 |
+
lr = _state_utils.tree_get(state, 'learning_rate')
|
| 364 |
+
self.assertEqual(lr, 42.0)
|
| 365 |
+
|
| 366 |
+
with self.subTest('Test with no value matching the key'):
|
| 367 |
+
solver = _inject.inject_hyperparams(alias.sgd)(learning_rate=42.0)
|
| 368 |
+
state = solver.init(params)
|
| 369 |
+
ema = _state_utils.tree_get(state, 'ema')
|
| 370 |
+
self.assertIsNone(ema)
|
| 371 |
+
ema = _state_utils.tree_get(state, 'ema', default=7.0)
|
| 372 |
+
self.assertEqual(ema, 7.0)
|
| 373 |
+
|
| 374 |
+
with self.subTest('Test with multiple values matching the key'):
|
| 375 |
+
solver = combine.chain(
|
| 376 |
+
_inject.inject_hyperparams(alias.sgd)(learning_rate=42.0),
|
| 377 |
+
_inject.inject_hyperparams(alias.sgd)(learning_rate=42.0),
|
| 378 |
+
)
|
| 379 |
+
state = solver.init(params)
|
| 380 |
+
self.assertRaises(KeyError, _state_utils.tree_get, state, 'learning_rate')
|
| 381 |
+
|
| 382 |
+
with self.subTest('Test jitted tree_get'):
|
| 383 |
+
opt = _inject.inject_hyperparams(alias.sgd)(
|
| 384 |
+
learning_rate=lambda x: 1 / (x + 1)
|
| 385 |
+
)
|
| 386 |
+
state = opt.init(params)
|
| 387 |
+
filtering = lambda _, value: isinstance(value, jnp.ndarray)
|
| 388 |
+
|
| 389 |
+
@jax.jit
|
| 390 |
+
def get_learning_rate(state):
|
| 391 |
+
return _state_utils.tree_get(
|
| 392 |
+
state, 'learning_rate', filtering=filtering
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
for i in range(4):
|
| 396 |
+
# we simply update state, we don't care about updates.
|
| 397 |
+
_, state = opt.update(params, state)
|
| 398 |
+
lr = get_learning_rate(state)
|
| 399 |
+
self.assertEqual(lr, 1 / (i + 1))
|
| 400 |
+
|
| 401 |
+
with self.subTest('Test with optional filtering'):
|
| 402 |
+
state = dict(hparams=dict(learning_rate=1.0), learning_rate='foo')
|
| 403 |
+
|
| 404 |
+
# Without filtering raises an error
|
| 405 |
+
self.assertRaises(KeyError, _state_utils.tree_get, state, 'learning_rate')
|
| 406 |
+
|
| 407 |
+
# With filtering, fetches the float entry
|
| 408 |
+
filtering = lambda path, value: isinstance(value, float)
|
| 409 |
+
lr = _state_utils.tree_get(state, 'learning_rate', filtering=filtering)
|
| 410 |
+
self.assertEqual(lr, 1.0)
|
| 411 |
+
|
| 412 |
+
with self.subTest('Test filtering for specific state'):
|
| 413 |
+
opt = combine.chain(
|
| 414 |
+
transform.add_noise(1.0, 0.9, 0), transform.scale_by_adam()
|
| 415 |
+
)
|
| 416 |
+
state = opt.init(params)
|
| 417 |
+
|
| 418 |
+
filtering = (
|
| 419 |
+
lambda path, _: isinstance(path[-1], _state_utils.NamedTupleKey)
|
| 420 |
+
and path[-1].tuple_name == 'ScaleByAdamState'
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
count = _state_utils.tree_get(state, 'count', filtering=filtering)
|
| 424 |
+
self.assertEqual(count, jnp.asarray(0, dtype=jnp.dtype('int32')))
|
| 425 |
+
|
| 426 |
+
with self.subTest('Test extracting a state'):
|
| 427 |
+
opt = combine.chain(
|
| 428 |
+
transform.add_noise(1.0, 0.9, 0), transform.scale_by_adam()
|
| 429 |
+
)
|
| 430 |
+
state = opt.init(params)
|
| 431 |
+
noise_state = _state_utils.tree_get(state, 'AddNoiseState')
|
| 432 |
+
expected_result = (
|
| 433 |
+
transform.AddNoiseState(
|
| 434 |
+
count=jnp.asarray(0),
|
| 435 |
+
rng_key=jnp.array([0, 0], dtype=jnp.dtype('uint32')),
|
| 436 |
+
)
|
| 437 |
+
)
|
| 438 |
+
chex.assert_trees_all_equal(noise_state, expected_result)
|
| 439 |
+
|
| 440 |
+
def test_tree_set(self):
|
| 441 |
+
params = jnp.array([1.0, 2.0, 3.0])
|
| 442 |
+
|
| 443 |
+
with self.subTest('Test with flat tree'):
|
| 444 |
+
tree = ()
|
| 445 |
+
self.assertRaises(KeyError, _state_utils.tree_set, tree, foo=1.0)
|
| 446 |
+
tree = jnp.array([1.0, 2.0, 3.0])
|
| 447 |
+
self.assertRaises(KeyError, _state_utils.tree_set, tree, foo=1.0)
|
| 448 |
+
|
| 449 |
+
with self.subTest('Test modifying an injected hyperparam'):
|
| 450 |
+
opt = _inject.inject_hyperparams(alias.adam)(learning_rate=1.0)
|
| 451 |
+
state = opt.init(params)
|
| 452 |
+
new_state = _state_utils.tree_set(state, learning_rate=2.0, b1=3.0)
|
| 453 |
+
lr = _state_utils.tree_get(new_state, 'learning_rate')
|
| 454 |
+
self.assertEqual(lr, 2.0)
|
| 455 |
+
|
| 456 |
+
with self.subTest('Test modifying an attribute of the state'):
|
| 457 |
+
opt = _inject.inject_hyperparams(alias.adam)(learning_rate=1.0)
|
| 458 |
+
state = opt.init(params)
|
| 459 |
+
new_state = _state_utils.tree_set(state, learning_rate=2.0, b1=3.0)
|
| 460 |
+
b1 = _state_utils.tree_get(new_state, 'b1')
|
| 461 |
+
self.assertEqual(b1, 3.0)
|
| 462 |
+
|
| 463 |
+
with self.subTest('Test modifying a value not present in the state'):
|
| 464 |
+
opt = _inject.inject_hyperparams(alias.adam)(learning_rate=1.0)
|
| 465 |
+
state = opt.init(params)
|
| 466 |
+
self.assertRaises(KeyError, _state_utils.tree_set, state, ema=2.0)
|
| 467 |
+
|
| 468 |
+
with self.subTest('Test jitted tree_set'):
|
| 469 |
+
|
| 470 |
+
@jax.jit
|
| 471 |
+
def set_learning_rate(state, lr):
|
| 472 |
+
return _state_utils.tree_set(state, learning_rate=lr)
|
| 473 |
+
|
| 474 |
+
modified_state = state
|
| 475 |
+
lr = 1.0
|
| 476 |
+
for i in range(4):
|
| 477 |
+
modified_state = set_learning_rate(modified_state, lr / (i + 1))
|
| 478 |
+
# we simply update state, we don't care about updates.
|
| 479 |
+
_, modified_state = opt.update(params, modified_state)
|
| 480 |
+
modified_lr = _state_utils.tree_get(modified_state, 'learning_rate')
|
| 481 |
+
self.assertEqual(modified_lr, lr / (i + 1))
|
| 482 |
+
|
| 483 |
+
with self.subTest('Test modifying several values at once'):
|
| 484 |
+
opt = combine.chain(
|
| 485 |
+
alias.adam(learning_rate=1.0), alias.adam(learning_rate=1.0)
|
| 486 |
+
)
|
| 487 |
+
state = opt.init(params)
|
| 488 |
+
new_state = _state_utils.tree_set(state, count=2.0)
|
| 489 |
+
found_values = _state_utils.tree_get_all_with_path(new_state, 'count')
|
| 490 |
+
self.assertLen(found_values, 2)
|
| 491 |
+
for _, value in found_values:
|
| 492 |
+
self.assertEqual(value, 2.0)
|
| 493 |
+
|
| 494 |
+
with self.subTest('Test with optional filtering'):
|
| 495 |
+
state = dict(hparams=dict(learning_rate=1.0), learning_rate='foo')
|
| 496 |
+
filtering = lambda _, value: isinstance(value, float)
|
| 497 |
+
new_state = _state_utils.tree_set(state, filtering, learning_rate=0.5)
|
| 498 |
+
found_values = _state_utils.tree_get_all_with_path(
|
| 499 |
+
new_state, 'learning_rate'
|
| 500 |
+
)
|
| 501 |
+
expected_result = [
|
| 502 |
+
((jtu.DictKey(key='learning_rate'),), 'foo'),
|
| 503 |
+
((jtu.DictKey(key='hparams'), jtu.DictKey(key='learning_rate')), 0.5),
|
| 504 |
+
]
|
| 505 |
+
self.assertEqual(found_values, expected_result)
|
| 506 |
+
|
| 507 |
+
with self.subTest('Test with nested trees and filtering'):
|
| 508 |
+
tree = dict(a=dict(a=1.0), b=dict(a=1))
|
| 509 |
+
filtering = lambda _, value: isinstance(value, float)
|
| 510 |
+
new_tree = _state_utils.tree_set(tree, filtering, a=2.0)
|
| 511 |
+
expected_result = dict(a=dict(a=2.0), b=dict(a=1))
|
| 512 |
+
self.assertEqual(new_tree, expected_result)
|
| 513 |
+
|
| 514 |
+
with self.subTest('Test setting a subtree'):
|
| 515 |
+
tree = dict(a=dict(a=1.0), b=dict(a=1))
|
| 516 |
+
filtering = lambda _, value: isinstance(value, dict)
|
| 517 |
+
new_tree = _state_utils.tree_set(tree, filtering, a=dict(c=0.0))
|
| 518 |
+
expected_result = dict(a=dict(c=0.0), b=dict(a=1))
|
| 519 |
+
self.assertEqual(new_tree, expected_result)
|
| 520 |
+
|
| 521 |
+
with self.subTest('Test setting a specific state'):
|
| 522 |
+
opt = combine.chain(
|
| 523 |
+
transform.add_noise(1.0, 0.9, 0), transform.scale_by_adam()
|
| 524 |
+
)
|
| 525 |
+
state = opt.init(params)
|
| 526 |
+
|
| 527 |
+
filtering = (
|
| 528 |
+
lambda path, _: isinstance(path[-1], _state_utils.NamedTupleKey)
|
| 529 |
+
and path[-1].tuple_name == 'ScaleByAdamState'
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
new_state = _state_utils.tree_set(state, filtering, count=jnp.array(42))
|
| 533 |
+
expected_result = (
|
| 534 |
+
transform.AddNoiseState(
|
| 535 |
+
count=jnp.array(0),
|
| 536 |
+
rng_key=jnp.array([0, 0], dtype=jnp.dtype('uint32')),
|
| 537 |
+
),
|
| 538 |
+
transform.ScaleByAdamState(
|
| 539 |
+
count=jnp.array(42),
|
| 540 |
+
mu=jnp.array([0.0, 0.0, 0.0]),
|
| 541 |
+
nu=jnp.array([0.0, 0.0, 0.0]),
|
| 542 |
+
),
|
| 543 |
+
)
|
| 544 |
+
chex.assert_trees_all_equal(new_state, expected_result)
|
| 545 |
+
|
| 546 |
+
with self.subTest('Test setting a state'):
|
| 547 |
+
opt = combine.chain(
|
| 548 |
+
transform.add_noise(1.0, 0.9, 0), transform.scale_by_adam()
|
| 549 |
+
)
|
| 550 |
+
state = opt.init(params)
|
| 551 |
+
new_noise_state = (
|
| 552 |
+
transform.AddNoiseState(
|
| 553 |
+
count=jnp.array(42),
|
| 554 |
+
rng_key=jnp.array([4, 8], dtype=jnp.dtype('uint32')),
|
| 555 |
+
)
|
| 556 |
+
)
|
| 557 |
+
new_state = _state_utils.tree_set(state, AddNoiseState=new_noise_state)
|
| 558 |
+
expected_result = (
|
| 559 |
+
transform.AddNoiseState(
|
| 560 |
+
count=jnp.array(42),
|
| 561 |
+
rng_key=jnp.array([4, 8], dtype=jnp.dtype('uint32')),
|
| 562 |
+
),
|
| 563 |
+
transform.ScaleByAdamState(
|
| 564 |
+
count=jnp.array(0),
|
| 565 |
+
mu=jnp.array([0.0, 0.0, 0.0]),
|
| 566 |
+
nu=jnp.array([0.0, 0.0, 0.0]),
|
| 567 |
+
),
|
| 568 |
+
)
|
| 569 |
+
chex.assert_trees_all_equal(new_state, expected_result)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
def _fake_params():
|
| 573 |
+
return {
|
| 574 |
+
'my/fake/module': {
|
| 575 |
+
'w': jnp.zeros((1, 2)),
|
| 576 |
+
'b': jnp.zeros((3, 4)),
|
| 577 |
+
},
|
| 578 |
+
'my/other/fake/module': {
|
| 579 |
+
'w': jnp.zeros((1, 2)),
|
| 580 |
+
'b': jnp.zeros((3, 4)),
|
| 581 |
+
},
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
def _fake_param_sharding():
|
| 586 |
+
return {
|
| 587 |
+
'my/fake/module': {
|
| 588 |
+
'w': FakeShardSpec(0),
|
| 589 |
+
'b': FakeShardSpec(1),
|
| 590 |
+
},
|
| 591 |
+
'my/other/fake/module': {
|
| 592 |
+
'w': FakeShardSpec(2),
|
| 593 |
+
'b': FakeShardSpec(3),
|
| 594 |
+
},
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
if __name__ == '__main__':
|
| 599 |
+
absltest.main()
|
testbed/google-deepmind__optax/optax/tree_utils/_tree_math.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Utilities to perform maths on pytrees."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
import operator
|
| 19 |
+
from typing import Any, Optional, Union
|
| 20 |
+
|
| 21 |
+
import chex
|
| 22 |
+
import jax
|
| 23 |
+
from jax import tree_util as jtu
|
| 24 |
+
import jax.numpy as jnp
|
| 25 |
+
|
| 26 |
+
from optax._src import numerics
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def tree_add(tree_x: Any, tree_y: Any, *other_trees: Any) -> Any:
|
| 30 |
+
r"""Add two (or more) pytrees.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
tree_x: first pytree.
|
| 34 |
+
tree_y: second pytree.
|
| 35 |
+
*other_trees: optional other trees to add
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
the sum of the two (or more) pytrees.
|
| 39 |
+
|
| 40 |
+
.. versionchanged:: 0.2.1
|
| 41 |
+
Added optional ``*other_trees`` argument.
|
| 42 |
+
"""
|
| 43 |
+
trees = [tree_x, tree_y, *other_trees]
|
| 44 |
+
return jtu.tree_map(lambda *leaves: sum(leaves), *trees)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def tree_sub(tree_x: Any, tree_y: Any) -> Any:
|
| 48 |
+
r"""Subtract two pytrees.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
tree_x: first pytree.
|
| 52 |
+
tree_y: second pytree.
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
the difference of the two pytrees.
|
| 56 |
+
"""
|
| 57 |
+
return jtu.tree_map(operator.sub, tree_x, tree_y)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def tree_mul(tree_x: Any, tree_y: Any) -> Any:
|
| 61 |
+
r"""Multiply two pytrees.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
tree_x: first pytree.
|
| 65 |
+
tree_y: second pytree.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
the product of the two pytrees.
|
| 69 |
+
"""
|
| 70 |
+
return jtu.tree_map(operator.mul, tree_x, tree_y)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def tree_div(tree_x: Any, tree_y: Any) -> Any:
|
| 74 |
+
r"""Divide two pytrees.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
tree_x: first pytree.
|
| 78 |
+
tree_y: second pytree.
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
the quotient of the two pytrees.
|
| 82 |
+
"""
|
| 83 |
+
return jtu.tree_map(operator.truediv, tree_x, tree_y)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def tree_scalar_mul(
|
| 87 |
+
scalar: Union[float, jax.Array],
|
| 88 |
+
tree: Any,
|
| 89 |
+
) -> Any:
|
| 90 |
+
r"""Multiply a tree by a scalar.
|
| 91 |
+
|
| 92 |
+
In infix notation, the function performs ``out = scalar * tree``.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
scalar: scalar value.
|
| 96 |
+
tree: pytree.
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
a pytree with the same structure as ``tree``.
|
| 100 |
+
"""
|
| 101 |
+
return jtu.tree_map(lambda x: scalar * x, tree)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def tree_add_scalar_mul(
|
| 105 |
+
tree_x: Any, scalar: Union[float, jax.Array], tree_y: Any
|
| 106 |
+
) -> Any:
|
| 107 |
+
r"""Add two trees, where the second tree is scaled by a scalar.
|
| 108 |
+
|
| 109 |
+
In infix notation, the function performs ``out = tree_x + scalar * tree_y``.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
tree_x: first pytree.
|
| 113 |
+
scalar: scalar value.
|
| 114 |
+
tree_y: second pytree.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
a pytree with the same structure as ``tree_x`` and ``tree_y``.
|
| 118 |
+
"""
|
| 119 |
+
scalar = jnp.asarray(scalar)
|
| 120 |
+
return jtu.tree_map(
|
| 121 |
+
lambda x, y: x + scalar.astype(x.dtype) * y,
|
| 122 |
+
tree_x,
|
| 123 |
+
tree_y)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
_vdot = functools.partial(jnp.vdot, precision=jax.lax.Precision.HIGHEST)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def _vdot_safe(a, b):
|
| 130 |
+
return _vdot(jnp.asarray(a), jnp.asarray(b))
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def tree_vdot(tree_x: Any, tree_y: Any) -> chex.Numeric:
|
| 134 |
+
r"""Compute the inner product between two pytrees.
|
| 135 |
+
|
| 136 |
+
Examples:
|
| 137 |
+
|
| 138 |
+
>>> optax.tree_utils.tree_vdot(
|
| 139 |
+
... {'a': jnp.array([1, 2]), 'b': jnp.array([1, 2])},
|
| 140 |
+
... {'a': jnp.array([-1, -1]), 'b': jnp.array([1, 1])},
|
| 141 |
+
... )
|
| 142 |
+
Array(0, dtype=int32)
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
tree_x: first pytree to use.
|
| 146 |
+
tree_y: second pytree to use.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
inner product between ``tree_x`` and ``tree_y``, a scalar value.
|
| 150 |
+
|
| 151 |
+
Implementation detail: we upcast the values to the highest precision to avoid
|
| 152 |
+
numerical issues.
|
| 153 |
+
"""
|
| 154 |
+
vdots = jtu.tree_map(_vdot_safe, tree_x, tree_y)
|
| 155 |
+
return jtu.tree_reduce(operator.add, vdots)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def tree_sum(tree: Any) -> chex.Numeric:
|
| 159 |
+
"""Compute the sum of all the elements in a pytree.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
tree: pytree.
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
a scalar value.
|
| 166 |
+
"""
|
| 167 |
+
sums = jtu.tree_map(jnp.sum, tree)
|
| 168 |
+
return jtu.tree_reduce(operator.add, sums)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def _square(leaf):
|
| 172 |
+
return jnp.square(leaf.real) + jnp.square(leaf.imag)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def tree_l2_norm(tree: Any, squared: bool = False) -> chex.Numeric:
|
| 176 |
+
"""Compute the l2 norm of a pytree.
|
| 177 |
+
|
| 178 |
+
Args:
|
| 179 |
+
tree: pytree.
|
| 180 |
+
squared: whether the norm should be returned squared or not.
|
| 181 |
+
|
| 182 |
+
Returns:
|
| 183 |
+
a scalar value.
|
| 184 |
+
"""
|
| 185 |
+
squared_tree = jtu.tree_map(_square, tree)
|
| 186 |
+
sqnorm = tree_sum(squared_tree)
|
| 187 |
+
if squared:
|
| 188 |
+
return sqnorm
|
| 189 |
+
else:
|
| 190 |
+
return jnp.sqrt(sqnorm)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def tree_l1_norm(tree: Any) -> chex.Numeric:
|
| 194 |
+
"""Compute the l1 norm of a pytree.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
tree: pytree.
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
a scalar value.
|
| 201 |
+
"""
|
| 202 |
+
abs_tree = jtu.tree_map(jnp.abs, tree)
|
| 203 |
+
return tree_sum(abs_tree)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def tree_zeros_like(
|
| 207 |
+
tree: Any,
|
| 208 |
+
dtype: Optional[jax.typing.DTypeLike] = None,
|
| 209 |
+
) -> Any:
|
| 210 |
+
"""Creates an all-zeros tree with the same structure.
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
tree: pytree.
|
| 214 |
+
dtype: optional dtype to use for the tree of zeros.
|
| 215 |
+
|
| 216 |
+
Returns:
|
| 217 |
+
an all-zeros tree with the same structure as ``tree``.
|
| 218 |
+
"""
|
| 219 |
+
return jtu.tree_map(lambda x: jnp.zeros_like(x, dtype=dtype), tree)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def tree_ones_like(
|
| 223 |
+
tree: Any,
|
| 224 |
+
dtype: Optional[jax.typing.DTypeLike] = None,
|
| 225 |
+
) -> Any:
|
| 226 |
+
"""Creates an all-ones tree with the same structure.
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
tree: pytree.
|
| 230 |
+
dtype: optional dtype to use for the tree of ones.
|
| 231 |
+
|
| 232 |
+
Returns:
|
| 233 |
+
an all-ones tree with the same structure as ``tree``.
|
| 234 |
+
"""
|
| 235 |
+
return jtu.tree_map(lambda x: jnp.ones_like(x, dtype=dtype), tree)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def tree_full_like(
|
| 239 |
+
tree: Any,
|
| 240 |
+
fill_value: jax.typing.ArrayLike,
|
| 241 |
+
dtype: Optional[jax.typing.DTypeLike] = None,
|
| 242 |
+
) -> Any:
|
| 243 |
+
"""Creates an identical tree where all tensors are filled with ``fill_value``.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
tree: pytree.
|
| 247 |
+
fill_value: the fill value for all tensors in the tree.
|
| 248 |
+
dtype: optional dtype to use for the tensors in the tree.
|
| 249 |
+
|
| 250 |
+
Returns:
|
| 251 |
+
an tree with the same structure as ``tree``.
|
| 252 |
+
"""
|
| 253 |
+
return jtu.tree_map(
|
| 254 |
+
lambda x: jnp.full_like(x, fill_value, dtype=dtype), tree)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def tree_clip(
|
| 258 |
+
tree: Any,
|
| 259 |
+
min_value: Optional[jax.typing.ArrayLike],
|
| 260 |
+
max_value: Optional[jax.typing.ArrayLike],
|
| 261 |
+
) -> Any:
|
| 262 |
+
"""Creates an identical tree where all tensors are clipped to `[min, max]`.
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
tree: pytree.
|
| 266 |
+
min_value: min value to clip all tensors to.
|
| 267 |
+
max_value: max value to clip all tensors to.
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
an tree with the same structure as ``tree``.
|
| 271 |
+
|
| 272 |
+
.. versionadded:: 0.2.3
|
| 273 |
+
"""
|
| 274 |
+
return jtu.tree_map(lambda g: jnp.clip(g, min_value, max_value), tree)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def tree_update_moment(updates, moments, decay, order):
|
| 278 |
+
"""Compute the exponential moving average of the `order`-th moment."""
|
| 279 |
+
return jtu.tree_map(
|
| 280 |
+
lambda g, t: (1 - decay) * (g ** order) + decay * t, updates, moments)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def tree_update_infinity_moment(updates, moments, decay, eps):
|
| 284 |
+
"""Compute the exponential moving average of the infinity norm."""
|
| 285 |
+
return jtu.tree_map(
|
| 286 |
+
lambda g, t: jnp.maximum(jnp.abs(g) + eps, decay * t), updates, moments)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def tree_update_moment_per_elem_norm(updates, moments, decay, order):
|
| 290 |
+
"""Compute the EMA of the `order`-th moment of the element-wise norm."""
|
| 291 |
+
|
| 292 |
+
def orderth_norm(g):
|
| 293 |
+
if jnp.isrealobj(g):
|
| 294 |
+
return g ** order
|
| 295 |
+
else:
|
| 296 |
+
half_order = order / 2
|
| 297 |
+
# JAX generates different HLO for int and float `order`
|
| 298 |
+
if half_order.is_integer():
|
| 299 |
+
half_order = int(half_order)
|
| 300 |
+
return numerics.abs_sq(g) ** half_order
|
| 301 |
+
|
| 302 |
+
return jtu.tree_map(
|
| 303 |
+
lambda g, t: (1 - decay) * orderth_norm(g) + decay * t, updates, moments)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
@functools.partial(jax.jit, inline=True)
|
| 307 |
+
def tree_bias_correction(moment, decay, count):
|
| 308 |
+
"""Performs bias correction. It becomes a no-op as count goes to infinity."""
|
| 309 |
+
# The conversion to the data type of the moment ensures that bfloat16 remains
|
| 310 |
+
# bfloat16 in the optimizer state. This conversion has to be done after
|
| 311 |
+
# `bias_correction_` is calculated as calculating `decay**count` in low
|
| 312 |
+
# precision can result in it being rounded to 1 and subsequently a
|
| 313 |
+
# "division by zero" error.
|
| 314 |
+
bias_correction_ = 1 - decay**count
|
| 315 |
+
|
| 316 |
+
# Perform division in the original precision.
|
| 317 |
+
return jax.tree_util.tree_map(
|
| 318 |
+
lambda t: t / bias_correction_.astype(t.dtype), moment)
|