diff --git a/testbed/huggingface__datasets/.gitignore b/testbed/huggingface__datasets/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..02f80e2f0cd179f445bbd1aad686090db6b2a446 --- /dev/null +++ b/testbed/huggingface__datasets/.gitignore @@ -0,0 +1,67 @@ +# Locked files +*.lock +!dvc.lock + +# Extracted dummy data +datasets/**/dummy_data-zip-extracted/ + +# Compiled python modules. +*.pyc + +# Byte-compiled +_pycache__/ +.cache/ + +# Python egg metadata, regenerated from source files by setuptools. +*.egg-info +.eggs/ + +# PyPI distribution artifacts. +build/ +dist/ + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# pyenv +.python-version + +# Tests +.pytest_cache/ + +# Other +*.DS_Store + +# PyCharm/vscode +.idea +.vscode + +# keep only the empty datasets and metrics directory with it's __init__.py file +/src/*/datasets/* +!/src/*/datasets/__init__.py + +/src/*/metrics/* +!/src/*/metrics/__init__.py + +# Vim +.*.swp + +# playground +/playground + +# Sphinx documentation +docs/_build/ +docs/source/_build/ + +# Benchmark results +report.json +report.md + +# Ruff +.ruff_cache diff --git a/testbed/huggingface__datasets/setup.cfg b/testbed/huggingface__datasets/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c64d667239e95ecb2a07f226ee1a34b0b47287f8 --- /dev/null +++ b/testbed/huggingface__datasets/setup.cfg @@ -0,0 +1,10 @@ +[metadata] +license_files = LICENSE + +[tool:pytest] +# Test fails if a FutureWarning is thrown by `huggingface_hub` +filterwarnings = + error::FutureWarning:huggingface_hub* +markers = + unit: unit test + integration: integration test diff --git a/testbed/huggingface__huggingface_hub/.github/workflows/python-release.yml b/testbed/huggingface__huggingface_hub/.github/workflows/python-release.yml new file mode 100644 index 0000000000000000000000000000000000000000..e2776df5378859df7f0e2d68453cb94e98fc1b57 --- /dev/null +++ b/testbed/huggingface__huggingface_hub/.github/workflows/python-release.yml @@ -0,0 +1,33 @@ +name: Python release + +on: + push: + tags: + - v* + +env: + PYPI_TOKEN: ${{ secrets.PYPI_TOKEN_DIST }} + +jobs: + python_release: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies + run: | + pip install --upgrade pip + pip install setuptools wheel + + - run: python setup.py sdist bdist_wheel + + - run: | + pip install twine + + - name: Upload to PyPi + run: | + twine upload dist/* -u __token__ -p "$PYPI_TOKEN" diff --git a/testbed/huggingface__huggingface_hub/Makefile b/testbed/huggingface__huggingface_hub/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4d6ac0766a273508fee0213f54ad91003e445247 --- /dev/null +++ b/testbed/huggingface__huggingface_hub/Makefile @@ -0,0 +1,18 @@ +.PHONY: quality style test + + +check_dirs := tests src + + +quality: + black --check $(check_dirs) + isort --check-only $(check_dirs) + flake8 $(check_dirs) + +style: + black $(check_dirs) + isort $(check_dirs) + +test: + HUGGINGFACE_CO_STAGING=1 pytest -sv ./tests/ + diff --git a/testbed/huggingface__pytorch-image-models/.gitattributes b/testbed/huggingface__pytorch-image-models/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..2f77e919cdc5d76d29bfc56dbc96e814aee0dc9a --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/.gitattributes @@ -0,0 +1 @@ +*.ipynb linguist-documentation diff --git a/testbed/huggingface__pytorch-image-models/.github/FUNDING.yml b/testbed/huggingface__pytorch-image-models/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..ab0474f2976cdbdc5bcc2de381a8d8cef57c88d5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/.github/FUNDING.yml @@ -0,0 +1,2 @@ +# These are supported funding model platforms +github: rwightman diff --git a/testbed/huggingface__pytorch-image-models/.github/ISSUE_TEMPLATE/bug_report.md b/testbed/huggingface__pytorch-image-models/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000000000000000000000000000000000..bced110f65d7bfe0bcd23717db010995707d7d25 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a bug report to help us improve. Issues are for reporting bugs or requesting + features, the discussion forum is available for asking questions or seeking help + from the community. +title: "[BUG] Issue title..." +labels: bug +assignees: rwightman + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. +2. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Desktop (please complete the following information):** + - OS: [e.g. Windows 10, Ubuntu 18.04] + - This repository version [e.g. pip 0.3.1 or commit ref] + - PyTorch version w/ CUDA/cuDNN [e.g. from `conda list`, 1.7.0 py3.8_cuda11.0.221_cudnn8.0.3_0] + +**Additional context** +Add any other context about the problem here. diff --git a/testbed/huggingface__pytorch-image-models/.github/workflows/tests.yml b/testbed/huggingface__pytorch-image-models/.github/workflows/tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..1136f306727201eeed5ef77bf20b8bac606032b9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/.github/workflows/tests.yml @@ -0,0 +1,51 @@ +name: Python tests + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +env: + OMP_NUM_THREADS: 2 + MKL_NUM_THREADS: 2 + +jobs: + test: + name: Run tests on ${{ matrix.os }} with Python ${{ matrix.python }} + strategy: + matrix: + os: [ubuntu-latest, macOS-latest] + python: ['3.8'] + torch: ['1.9.0'] + torchvision: ['0.10.0'] + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + - name: Install testing dependencies + run: | + python -m pip install --upgrade pip + pip install pytest pytest-timeout + - name: Install torch on mac + if: startsWith(matrix.os, 'macOS') + run: pip install --no-cache-dir torch==${{ matrix.torch }} torchvision==${{ matrix.torchvision }} + - name: Install torch on ubuntu + if: startsWith(matrix.os, 'ubuntu') + run: | + pip install --no-cache-dir torch==${{ matrix.torch }}+cpu torchvision==${{ matrix.torchvision }}+cpu -f https://download.pytorch.org/whl/torch_stable.html + sudo apt update + sudo apt install -y google-perftools + - name: Install requirements + run: | + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + pip install --no-cache-dir git+https://github.com/mapillary/inplace_abn.git@v1.1.0 + - name: Run tests + env: + LD_PRELOAD: /usr/lib/x86_64-linux-gnu/libtcmalloc.so.4 + run: | + pytest -vv --durations=0 ./tests diff --git a/testbed/huggingface__pytorch-image-models/.gitignore b/testbed/huggingface__pytorch-image-models/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..54db5359bc7d79a19aa870e684e1fdb4c478a75d --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/.gitignore @@ -0,0 +1,108 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# PyCharm +.idea + +# PyTorch weights +*.tar +*.pth +*.gz +Untitled.ipynb +Testing notebook.ipynb diff --git a/testbed/huggingface__pytorch-image-models/LICENSE b/testbed/huggingface__pytorch-image-models/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4336819097f8777b9ca64ebe49bc0bc77ae3c5f5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 Ross Wightman + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/testbed/huggingface__pytorch-image-models/MANIFEST.in b/testbed/huggingface__pytorch-image-models/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..4f2d15846513429725906870ff23b04370a41e86 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/MANIFEST.in @@ -0,0 +1,2 @@ +include timm/models/pruned/*.txt + diff --git a/testbed/huggingface__pytorch-image-models/README.md b/testbed/huggingface__pytorch-image-models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fda37ca073a995e32ac5523e77e126f317ce02ce --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/README.md @@ -0,0 +1,423 @@ +# PyTorch Image Models +- [Sponsors](#sponsors) +- [What's New](#whats-new) +- [Introduction](#introduction) +- [Models](#models) +- [Features](#features) +- [Results](#results) +- [Getting Started (Documentation)](#getting-started-documentation) +- [Train, Validation, Inference Scripts](#train-validation-inference-scripts) +- [Awesome PyTorch Resources](#awesome-pytorch-resources) +- [Licenses](#licenses) +- [Citing](#citing) + +## Sponsors + +A big thank you to my [GitHub Sponsors](https://github.com/sponsors/rwightman) for their support! + +In addition to the sponsors at the link above, I've received hardware and/or cloud resources from +* Nvidia (https://www.nvidia.com/en-us/) +* TFRC (https://www.tensorflow.org/tfrc) + +I'm fortunate to be able to dedicate significant time and money of my own supporting this and other open source projects. However, as the projects increase in scope, outside support is needed to continue with the current trajectory of hardware, infrastructure, and electricty costs. + +## What's New + +### Aug 18, 2021 +* Optimizer bonanza! + * Add LAMB and LARS optimizers, incl trust ratio clipping options. Tweaked to work properly in PyTorch XLA (tested on TPUs w/ `timm bits` [branch](https://github.com/rwightman/pytorch-image-models/tree/bits_and_tpu/timm/bits)) + * Add MADGRAD from FB research w/ a few tweaks (decoupled decay option, step handling that works with PyTorch XLA) + * Some cleanup on all optimizers and factory. No more `.data`, a bit more consistency, unit tests for all! + * SGDP and AdamP still won't work with PyTorch XLA but others should (have yet to test Adabelief, Adafactor, Adahessian myself). +* EfficientNet-V2 XL TF ported weights added, but they don't validate well in PyTorch (L is better). The pre-processing for the V2 TF training is a bit diff and the fine-tuned 21k -> 1k weights are very sensitive and less robust than the 1k weights. +* Added PyTorch trained EfficientNet-V2 'Tiny' w/ GlobalContext attn weights. Only .1-.2 top-1 better than the SE so more of a curiosity for those interested. + +### July 12, 2021 +* Add XCiT models from [official facebook impl](https://github.com/facebookresearch/xcit). Contributed by [Alexander Soare](https://github.com/alexander-soare) + +### July 5-9, 2021 +* Add `efficientnetv2_rw_t` weights, a custom 'tiny' 13.6M param variant that is a bit better than (non NoisyStudent) B3 models. Both faster and better accuracy (at same or lower res) + * top-1 82.34 @ 288x288 and 82.54 @ 320x320 +* Add [SAM pretrained](https://arxiv.org/abs/2106.01548) in1k weight for ViT B/16 (`vit_base_patch16_sam_224`) and B/32 (`vit_base_patch32_sam_224`) models. +* Add 'Aggregating Nested Transformer' (NesT) w/ weights converted from official [Flax impl](https://github.com/google-research/nested-transformer). Contributed by [Alexander Soare](https://github.com/alexander-soare). + * `jx_nest_base` - 83.534, `jx_nest_small` - 83.120, `jx_nest_tiny` - 81.426 + +### June 23, 2021 +* Reproduce gMLP model training, `gmlp_s16_224` trained to 79.6 top-1, matching [paper](https://arxiv.org/abs/2105.08050). Hparams for this and other recent MLP training [here](https://gist.github.com/rwightman/d6c264a9001f9167e06c209f630b2cc6) + +### June 20, 2021 +* Release Vision Transformer 'AugReg' weights from [How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers](https://arxiv.org/abs/2106.10270) + * .npz weight loading support added, can load any of the 50K+ weights from the [AugReg series](https://console.cloud.google.com/storage/browser/vit_models/augreg) + * See [example notebook](https://colab.research.google.com/github/google-research/vision_transformer/blob/master/vit_jax_augreg.ipynb) from [official impl](https://github.com/google-research/vision_transformer/) for navigating the augreg weights + * Replaced all default weights w/ best AugReg variant (if possible). All AugReg 21k classifiers work. + * Highlights: `vit_large_patch16_384` (87.1 top-1), `vit_large_r50_s32_384` (86.2 top-1), `vit_base_patch16_384` (86.0 top-1) + * `vit_deit_*` renamed to just `deit_*` + * Remove my old small model, replace with DeiT compatible small w/ AugReg weights +* Add 1st training of my `gmixer_24_224` MLP /w GLU, 78.1 top-1 w/ 25M params. +* Add weights from official ResMLP release (https://github.com/facebookresearch/deit) +* Add `eca_nfnet_l2` weights from my 'lightweight' series. 84.7 top-1 at 384x384. +* Add distilled BiT 50x1 student and 152x2 Teacher weights from [Knowledge distillation: A good teacher is patient and consistent](https://arxiv.org/abs/2106.05237) +* NFNets and ResNetV2-BiT models work w/ Pytorch XLA now + * weight standardization uses F.batch_norm instead of std_mean (std_mean wasn't lowered) + * eps values adjusted, will be slight differences but should be quite close +* Improve test coverage and classifier interface of non-conv (vision transformer and mlp) models +* Cleanup a few classifier / flatten details for models w/ conv classifiers or early global pool +* Please report any regressions, this PR touched quite a few models. + +### June 8, 2021 +* Add first ResMLP weights, trained in PyTorch XLA on TPU-VM w/ my XLA branch. 24 block variant, 79.2 top-1. +* Add ResNet51-Q model w/ pretrained weights at 82.36 top-1. + * NFNet inspired block layout with quad layer stem and no maxpool + * Same param count (35.7M) and throughput as ResNetRS-50 but +1.5 top-1 @ 224x224 and +2.5 top-1 at 288x288 + +### May 25, 2021 +* Add LeViT, Visformer, ConViT (PR by Aman Arora), Twins (PR by paper authors) transformer models +* Add ResMLP and gMLP MLP vision models to the existing MLP Mixer impl +* Fix a number of torchscript issues with various vision transformer models +* Cleanup input_size/img_size override handling and improve testing / test coverage for all vision transformer and MLP models +* More flexible pos embedding resize (non-square) for ViT and TnT. Thanks [Alexander Soare](https://github.com/alexander-soare) +* Add `efficientnetv2_rw_m` model and weights (started training before official code). 84.8 top-1, 53M params. + +### May 14, 2021 +* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl. + * 1k trained variants: `tf_efficientnetv2_s/m/l` + * 21k trained variants: `tf_efficientnetv2_s/m/l_in21k` + * 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k` + * v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3` + * Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s` + * Some blank `efficientnetv2_*` models in-place for future native PyTorch training + +### May 5, 2021 +* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen) +* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit) +* Add ResNet-RS models and weights from [TF](https://github.com/tensorflow/tpu/tree/master/models/official/resnet/resnet_rs). Thanks [Aman Arora](https://github.com/amaarora) +* Add CoaT models and weights. Thanks [Mohammed Rizin](https://github.com/morizin) +* Add new ImageNet-21k weights & finetuned weights for TResNet, MobileNet-V3, ViT models. Thanks [mrT](https://github.com/mrT23) +* Add GhostNet models and weights. Thanks [Kai Han](https://github.com/iamhankai) +* Update ByoaNet attention modules + * Improve SA module inits + * Hack together experimental stand-alone Swin based attn module and `swinnet` + * Consistent '26t' model defs for experiments. +* Add improved Efficientnet-V2S (prelim model def) weights. 83.8 top-1. +* WandB logging support + +### April 13, 2021 +* Add Swin Transformer models and weights from https://github.com/microsoft/Swin-Transformer + +### April 12, 2021 +* Add ECA-NFNet-L1 (slimmed down F1 w/ SiLU, 41M params) trained with this code. 84% top-1 @ 320x320. Trained at 256x256. +* Add EfficientNet-V2S model (unverified model definition) weights. 83.3 top-1 @ 288x288. Only trained single res 224. Working on progressive training. +* Add ByoaNet model definition (Bring-your-own-attention) w/ SelfAttention block and corresponding SA/SA-like modules and model defs + * Lambda Networks - https://arxiv.org/abs/2102.08602 + * Bottleneck Transformers - https://arxiv.org/abs/2101.11605 + * Halo Nets - https://arxiv.org/abs/2103.12731 +* Adabelief optimizer contributed by Juntang Zhuang + +### April 1, 2021 +* Add snazzy `benchmark.py` script for bulk `timm` model benchmarking of train and/or inference +* Add Pooling-based Vision Transformer (PiT) models (from https://github.com/naver-ai/pit) + * Merged distilled variant into main for torchscript compatibility + * Some `timm` cleanup/style tweaks and weights have hub download support +* Cleanup Vision Transformer (ViT) models + * Merge distilled (DeiT) model into main so that torchscript can work + * Support updated weight init (defaults to old still) that closer matches original JAX impl (possibly better training from scratch) + * Separate hybrid model defs into different file and add several new model defs to fiddle with, support patch_size != 1 for hybrids + * Fix fine-tuning num_class changes (PiT and ViT) and pos_embed resizing (Vit) with distilled variants + * nn.Sequential for block stack (does not break downstream compat) +* TnT (Transformer-in-Transformer) models contributed by author (from https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT) +* Add RegNetY-160 weights from DeiT teacher model +* Add new NFNet-L0 w/ SE attn (rename `nfnet_l0b`->`nfnet_l0`) weights 82.75 top-1 @ 288x288 +* Some fixes/improvements for TFDS dataset wrapper + +### March 17, 2021 +* Add new ECA-NFNet-L0 (rename `nfnet_l0c`->`eca_nfnet_l0`) weights trained by myself. + * 82.6 top-1 @ 288x288, 82.8 @ 320x320, trained at 224x224 + * Uses SiLU activation, approx 2x faster than `dm_nfnet_f0` and 50% faster than `nfnet_f0s` w/ 1/3 param count +* Integrate [Hugging Face model hub](https://huggingface.co/models) into timm create_model and default_cfg handling for pretrained weight and config sharing (more on this soon!) +* Merge HardCoRe NAS models contributed by https://github.com/yoniaflalo +* Merge PyTorch trained EfficientNet-EL and pruned ES/EL variants contributed by [DeGirum](https://github.com/DeGirum) + + +### March 7, 2021 +* First 0.4.x PyPi release w/ NFNets (& related), ByoB (GPU-Efficient, RepVGG, etc). +* Change feature extraction for pre-activation nets (NFNets, ResNetV2) to return features before activation. +* Tested with PyTorch 1.8 release. Updated CI to use 1.8. +* Benchmarked several arch on RTX 3090, Titan RTX, and V100 across 1.7.1, 1.8, NGC 20.12, and 21.02. Some interesting performance variations to take note of https://gist.github.com/rwightman/bb59f9e245162cee0e38bd66bd8cd77f + +### Feb 18, 2021 +* Add pretrained weights and model variants for NFNet-F* models from [DeepMind Haiku impl](https://github.com/deepmind/deepmind-research/tree/master/nfnets). + * Models are prefixed with `dm_`. They require SAME padding conv, skipinit enabled, and activation gains applied in act fn. + * These models are big, expect to run out of GPU memory. With the GELU activiation + other options, they are roughly 1/2 the inference speed of my SiLU PyTorch optimized `s` variants. + * Original model results are based on pre-processing that is not the same as all other models so you'll see different results in the results csv (once updated). + * Matching the original pre-processing as closely as possible I get these results: + * `dm_nfnet_f6` - 86.352 + * `dm_nfnet_f5` - 86.100 + * `dm_nfnet_f4` - 85.834 + * `dm_nfnet_f3` - 85.676 + * `dm_nfnet_f2` - 85.178 + * `dm_nfnet_f1` - 84.696 + * `dm_nfnet_f0` - 83.464 + +### Feb 16, 2021 +* Add Adaptive Gradient Clipping (AGC) as per https://arxiv.org/abs/2102.06171. Integrated w/ PyTorch gradient clipping via mode arg that defaults to prev 'norm' mode. For backward arg compat, clip-grad arg must be specified to enable when using train.py. + * AGC w/ default clipping factor `--clip-grad .01 --clip-mode agc` + * PyTorch global norm of 1.0 (old behaviour, always norm), `--clip-grad 1.0` + * PyTorch value clipping of 10, `--clip-grad 10. --clip-mode value` + * AGC performance is definitely sensitive to the clipping factor. More experimentation needed to determine good values for smaller batch sizes and optimizers besides those in paper. So far I've found .001-.005 is necessary for stable RMSProp training w/ NFNet/NF-ResNet. + +### Feb 12, 2021 +* Update Normalization-Free nets to include new NFNet-F (https://arxiv.org/abs/2102.06171) model defs + +### Feb 10, 2021 +* First Normalization-Free model training experiments done, + * nf_resnet50 - 80.68 top-1 @ 288x288, 80.31 @ 256x256 + * nf_regnet_b1 - 79.30 @ 288x288, 78.75 @ 256x256 +* More model archs, incl a flexible ByobNet backbone ('Bring-your-own-blocks') + * GPU-Efficient-Networks (https://github.com/idstcv/GPU-Efficient-Networks), impl in `byobnet.py` + * RepVGG (https://github.com/DingXiaoH/RepVGG), impl in `byobnet.py` + * classic VGG (from torchvision, impl in `vgg.py`) +* Refinements to normalizer layer arg handling and normalizer+act layer handling in some models +* Default AMP mode changed to native PyTorch AMP instead of APEX. Issues not being fixed with APEX. Native works with `--channels-last` and `--torchscript` model training, APEX does not. +* Fix a few bugs introduced since last pypi release + +### Feb 8, 2021 +* Add several ResNet weights with ECA attention. 26t & 50t trained @ 256, test @ 320. 269d train @ 256, fine-tune @320, test @ 352. + * `ecaresnet26t` - 79.88 top-1 @ 320x320, 79.08 @ 256x256 + * `ecaresnet50t` - 82.35 top-1 @ 320x320, 81.52 @ 256x256 + * `ecaresnet269d` - 84.93 top-1 @ 352x352, 84.87 @ 320x320 +* Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed). +* Support model default_cfgs with separate train vs test resolution `test_input_size` and remove extra `_320` suffix ResNet model defs that were just for test. + +### Jan 30, 2021 +* Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692) + +### Jan 25, 2021 +* Add ResNetV2 Big Transfer (BiT) models w/ ImageNet-1k and 21k weights from https://github.com/google-research/big_transfer +* Add official R50+ViT-B/16 hybrid models + weights from https://github.com/google-research/vision_transformer +* ImageNet-21k ViT weights are added w/ model defs and representation layer (pre logits) support + * NOTE: ImageNet-21k classifier heads were zero'd in original weights, they are only useful for transfer learning +* Add model defs and weights for DeiT Vision Transformer models from https://github.com/facebookresearch/deit +* Refactor dataset classes into ImageDataset/IterableImageDataset + dataset specific parser classes +* Add Tensorflow-Datasets (TFDS) wrapper to allow use of TFDS image classification sets with train script + * Ex: `train.py /data/tfds --dataset tfds/oxford_iiit_pet --val-split test --model resnet50 -b 256 --amp --num-classes 37 --opt adamw --lr 3e-4 --weight-decay .001 --pretrained -j 2` +* Add improved .tar dataset parser that reads images from .tar, folder of .tar files, or .tar within .tar + * Run validation on full ImageNet-21k directly from tar w/ BiT model: `validate.py /data/fall11_whole.tar --model resnetv2_50x1_bitm_in21k --amp` +* Models in this update should be stable w/ possible exception of ViT/BiT, possibility of some regressions with train/val scripts and dataset handling + +### Jan 3, 2021 +* Add SE-ResNet-152D weights + * 256x256 val, 0.94 crop top-1 - 83.75 + * 320x320 val, 1.0 crop - 84.36 +* Update [results files](results/) + + +## Introduction + +Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results. + +The work of many others is present here. I've tried to make sure all source material is acknowledged via links to github, arxiv papers, etc in the README, documentation, and code docstrings. Please let me know if I missed anything. + +## Models + +All model architecture families include variants with pretrained weights. There are specific model variants without any weights, it is NOT a bug. Help training new or better weights is always appreciated. Here are some example [training hparams](https://rwightman.github.io/pytorch-image-models/training_hparam_examples) to get you started. + +A full version of the list below with source links can be found in the [documentation](https://rwightman.github.io/pytorch-image-models/models/). + +* Aggregating Nested Transformers - https://arxiv.org/abs/2105.12723 +* Big Transfer ResNetV2 (BiT) - https://arxiv.org/abs/1912.11370 +* Bottleneck Transformers - https://arxiv.org/abs/2101.11605 +* CaiT (Class-Attention in Image Transformers) - https://arxiv.org/abs/2103.17239 +* CoaT (Co-Scale Conv-Attentional Image Transformers) - https://arxiv.org/abs/2104.06399 +* ConViT (Soft Convolutional Inductive Biases Vision Transformers)- https://arxiv.org/abs/2103.10697 +* CspNet (Cross-Stage Partial Networks) - https://arxiv.org/abs/1911.11929 +* DeiT (Vision Transformer) - https://arxiv.org/abs/2012.12877 +* DenseNet - https://arxiv.org/abs/1608.06993 +* DLA - https://arxiv.org/abs/1707.06484 +* DPN (Dual-Path Network) - https://arxiv.org/abs/1707.01629 +* EfficientNet (MBConvNet Family) + * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252 + * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665 + * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946 + * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + * EfficientNet V2 - https://arxiv.org/abs/2104.00298 + * FBNet-C - https://arxiv.org/abs/1812.03443 + * MixNet - https://arxiv.org/abs/1907.09595 + * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626 + * MobileNet-V2 - https://arxiv.org/abs/1801.04381 + * Single-Path NAS - https://arxiv.org/abs/1904.02877 +* GhostNet - https://arxiv.org/abs/1911.11907 +* gMLP - https://arxiv.org/abs/2105.08050 +* GPU-Efficient Networks - https://arxiv.org/abs/2006.14090 +* Halo Nets - https://arxiv.org/abs/2103.12731 +* HardCoRe-NAS - https://arxiv.org/abs/2102.11646 +* HRNet - https://arxiv.org/abs/1908.07919 +* Inception-V3 - https://arxiv.org/abs/1512.00567 +* Inception-ResNet-V2 and Inception-V4 - https://arxiv.org/abs/1602.07261 +* Lambda Networks - https://arxiv.org/abs/2102.08602 +* LeViT (Vision Transformer in ConvNet's Clothing) - https://arxiv.org/abs/2104.01136 +* MLP-Mixer - https://arxiv.org/abs/2105.01601 +* MobileNet-V3 (MBConvNet w/ Efficient Head) - https://arxiv.org/abs/1905.02244 +* NASNet-A - https://arxiv.org/abs/1707.07012 +* NFNet-F - https://arxiv.org/abs/2102.06171 +* NF-RegNet / NF-ResNet - https://arxiv.org/abs/2101.08692 +* PNasNet - https://arxiv.org/abs/1712.00559 +* Pooling-based Vision Transformer (PiT) - https://arxiv.org/abs/2103.16302 +* RegNet - https://arxiv.org/abs/2003.13678 +* RepVGG - https://arxiv.org/abs/2101.03697 +* ResMLP - https://arxiv.org/abs/2105.03404 +* ResNet/ResNeXt + * ResNet (v1b/v1.5) - https://arxiv.org/abs/1512.03385 + * ResNeXt - https://arxiv.org/abs/1611.05431 + * 'Bag of Tricks' / Gluon C, D, E, S variations - https://arxiv.org/abs/1812.01187 + * Weakly-supervised (WSL) Instagram pretrained / ImageNet tuned ResNeXt101 - https://arxiv.org/abs/1805.00932 + * Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet/ResNeXts - https://arxiv.org/abs/1905.00546 + * ECA-Net (ECAResNet) - https://arxiv.org/abs/1910.03151v4 + * Squeeze-and-Excitation Networks (SEResNet) - https://arxiv.org/abs/1709.01507 + * ResNet-RS - https://arxiv.org/abs/2103.07579 +* Res2Net - https://arxiv.org/abs/1904.01169 +* ResNeSt - https://arxiv.org/abs/2004.08955 +* ReXNet - https://arxiv.org/abs/2007.00992 +* SelecSLS - https://arxiv.org/abs/1907.00837 +* Selective Kernel Networks - https://arxiv.org/abs/1903.06586 +* Swin Transformer - https://arxiv.org/abs/2103.14030 +* Transformer-iN-Transformer (TNT) - https://arxiv.org/abs/2103.00112 +* TResNet - https://arxiv.org/abs/2003.13630 +* Twins (Spatial Attention in Vision Transformers) - https://arxiv.org/pdf/2104.13840.pdf +* Vision Transformer - https://arxiv.org/abs/2010.11929 +* VovNet V2 and V1 - https://arxiv.org/abs/1911.06667 +* Xception - https://arxiv.org/abs/1610.02357 +* Xception (Modified Aligned, Gluon) - https://arxiv.org/abs/1802.02611 +* Xception (Modified Aligned, TF) - https://arxiv.org/abs/1802.02611 +* XCiT (Cross-Covariance Image Transformers) - https://arxiv.org/abs/2106.09681 + +## Features + +Several (less common) features that I often utilize in my projects are included. Many of their additions are the reason why I maintain my own set of models, instead of using others' via PIP: + +* All models have a common default configuration interface and API for + * accessing/changing the classifier - `get_classifier` and `reset_classifier` + * doing a forward pass on just the features - `forward_features` (see [documentation](https://rwightman.github.io/pytorch-image-models/feature_extraction/)) + * these makes it easy to write consistent network wrappers that work with any of the models +* All models support multi-scale feature map extraction (feature pyramids) via create_model (see [documentation](https://rwightman.github.io/pytorch-image-models/feature_extraction/)) + * `create_model(name, features_only=True, out_indices=..., output_stride=...)` + * `out_indices` creation arg specifies which feature maps to return, these indices are 0 based and generally correspond to the `C(i + 1)` feature level. + * `output_stride` creation arg controls output stride of the network by using dilated convolutions. Most networks are stride 32 by default. Not all networks support this. + * feature map channel counts, reduction level (stride) can be queried AFTER model creation via the `.feature_info` member +* All models have a consistent pretrained weight loader that adapts last linear if necessary, and from 3 to 1 channel input if desired +* High performance [reference training, validation, and inference scripts](https://rwightman.github.io/pytorch-image-models/scripts/) that work in several process/GPU modes: + * NVIDIA DDP w/ a single GPU per process, multiple processes with APEX present (AMP mixed-precision optional) + * PyTorch DistributedDataParallel w/ multi-gpu, single process (AMP disabled as it crashes when enabled) + * PyTorch w/ single GPU single process (AMP optional) +* A dynamic global pool implementation that allows selecting from average pooling, max pooling, average + max, or concat([average, max]) at model creation. All global pooling is adaptive average by default and compatible with pretrained weights. +* A 'Test Time Pool' wrapper that can wrap any of the included models and usually provides improved performance doing inference with input images larger than the training size. Idea adapted from original DPN implementation when I ported (https://github.com/cypw/DPNs) +* Learning rate schedulers + * Ideas adopted from + * [AllenNLP schedulers](https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers) + * [FAIRseq lr_scheduler](https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler) + * SGDR: Stochastic Gradient Descent with Warm Restarts (https://arxiv.org/abs/1608.03983) + * Schedulers include `step`, `cosine` w/ restarts, `tanh` w/ restarts, `plateau` +* Optimizers: + * `rmsprop_tf` adapted from PyTorch RMSProp by myself. Reproduces much improved Tensorflow RMSProp behaviour. + * `radam` by [Liyuan Liu](https://github.com/LiyuanLucasLiu/RAdam) (https://arxiv.org/abs/1908.03265) + * `novograd` by [Masashi Kimura](https://github.com/convergence-lab/novograd) (https://arxiv.org/abs/1905.11286) + * `lookahead` adapted from impl by [Liam](https://github.com/alphadl/lookahead.pytorch) (https://arxiv.org/abs/1907.08610) + * `fused` optimizers by name with [NVIDIA Apex](https://github.com/NVIDIA/apex/tree/master/apex/optimizers) installed + * `adamp` and `sgdp` by [Naver ClovAI](https://github.com/clovaai) (https://arxiv.org/abs/2006.08217) + * `adafactor` adapted from [FAIRSeq impl](https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py) (https://arxiv.org/abs/1804.04235) + * `adahessian` by [David Samuel](https://github.com/davda54/ada-hessian) (https://arxiv.org/abs/2006.00719) +* Random Erasing from [Zhun Zhong](https://github.com/zhunzhong07/Random-Erasing/blob/master/transforms.py) (https://arxiv.org/abs/1708.04896) +* Mixup (https://arxiv.org/abs/1710.09412) +* CutMix (https://arxiv.org/abs/1905.04899) +* AutoAugment (https://arxiv.org/abs/1805.09501) and RandAugment (https://arxiv.org/abs/1909.13719) ImageNet configurations modeled after impl for EfficientNet training (https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py) +* AugMix w/ JSD loss (https://arxiv.org/abs/1912.02781), JSD w/ clean + augmented mixing support works with AutoAugment and RandAugment as well +* SplitBachNorm - allows splitting batch norm layers between clean and augmented (auxiliary batch norm) data +* DropPath aka "Stochastic Depth" (https://arxiv.org/abs/1603.09382) +* DropBlock (https://arxiv.org/abs/1810.12890) +* Blur Pooling (https://arxiv.org/abs/1904.11486) +* Space-to-Depth by [mrT23](https://github.com/mrT23/TResNet/blob/master/src/models/tresnet/layers/space_to_depth.py) (https://arxiv.org/abs/1801.04590) -- original paper? +* Adaptive Gradient Clipping (https://arxiv.org/abs/2102.06171, https://github.com/deepmind/deepmind-research/tree/master/nfnets) +* An extensive selection of channel and/or spatial attention modules: + * Bottleneck Transformer - https://arxiv.org/abs/2101.11605 + * CBAM - https://arxiv.org/abs/1807.06521 + * Effective Squeeze-Excitation (ESE) - https://arxiv.org/abs/1911.06667 + * Efficient Channel Attention (ECA) - https://arxiv.org/abs/1910.03151 + * Gather-Excite (GE) - https://arxiv.org/abs/1810.12348 + * Global Context (GC) - https://arxiv.org/abs/1904.11492 + * Halo - https://arxiv.org/abs/2103.12731 + * Involution - https://arxiv.org/abs/2103.06255 + * Lambda Layer - https://arxiv.org/abs/2102.08602 + * Non-Local (NL) - https://arxiv.org/abs/1711.07971 + * Squeeze-and-Excitation (SE) - https://arxiv.org/abs/1709.01507 + * Selective Kernel (SK) - (https://arxiv.org/abs/1903.06586 + * Split (SPLAT) - https://arxiv.org/abs/2004.08955 + * Shifted Window (SWIN) - https://arxiv.org/abs/2103.14030 + +## Results + +Model validation results can be found in the [documentation](https://rwightman.github.io/pytorch-image-models/results/) and in the [results tables](results/README.md) + +## Getting Started (Documentation) + +My current [documentation](https://rwightman.github.io/pytorch-image-models/) for `timm` covers the basics. + +[timmdocs](https://fastai.github.io/timmdocs/) is quickly becoming a much more comprehensive set of documentation for `timm`. A big thanks to [Aman Arora](https://github.com/amaarora) for his efforts creating timmdocs. + +[paperswithcode](https://paperswithcode.com/lib/timm) is a good resource for browsing the models within `timm`. + +## Train, Validation, Inference Scripts + +The root folder of the repository contains reference train, validation, and inference scripts that work with the included models and other features of this repository. They are adaptable for other datasets and use cases with a little hacking. See [documentation](https://rwightman.github.io/pytorch-image-models/scripts/) for some basics and [training hparams](https://rwightman.github.io/pytorch-image-models/training_hparam_examples) for some train examples that produce SOTA ImageNet results. + +## Awesome PyTorch Resources + +One of the greatest assets of PyTorch is the community and their contributions. A few of my favourite resources that pair well with the models and components here are listed below. + +### Object Detection, Instance and Semantic Segmentation +* Detectron2 - https://github.com/facebookresearch/detectron2 +* Segmentation Models (Semantic) - https://github.com/qubvel/segmentation_models.pytorch +* EfficientDet (Obj Det, Semantic soon) - https://github.com/rwightman/efficientdet-pytorch + +### Computer Vision / Image Augmentation +* Albumentations - https://github.com/albumentations-team/albumentations +* Kornia - https://github.com/kornia/kornia + +### Knowledge Distillation +* RepDistiller - https://github.com/HobbitLong/RepDistiller +* torchdistill - https://github.com/yoshitomo-matsubara/torchdistill + +### Metric Learning +* PyTorch Metric Learning - https://github.com/KevinMusgrave/pytorch-metric-learning + +### Training / Frameworks +* fastai - https://github.com/fastai/fastai + +## Licenses + +### Code +The code here is licensed Apache 2.0. I've taken care to make sure any third party code included or adapted has compatible (permissive) licenses such as MIT, BSD, etc. I've made an effort to avoid any GPL / LGPL conflicts. That said, it is your responsibility to ensure you comply with licenses here and conditions of any dependent licenses. Where applicable, I've linked the sources/references for various components in docstrings. If you think I've missed anything please create an issue. + +### Pretrained Weights +So far all of the pretrained weights available here are pretrained on ImageNet with a select few that have some additional pretraining (see extra note below). ImageNet was released for non-commercial research purposes only (https://image-net.org/download). It's not clear what the implications of that are for the use of pretrained weights from that dataset. Any models I have trained with ImageNet are done for research purposes and one should assume that the original dataset license applies to the weights. It's best to seek legal advice if you intend to use the pretrained weights in a commercial product. + +#### Pretrained on more than ImageNet +Several weights included or references here were pretrained with proprietary datasets that I do not have access to. These include the Facebook WSL, SSL, SWSL ResNe(Xt) and the Google Noisy Student EfficientNet models. The Facebook models have an explicit non-commercial license (CC-BY-NC 4.0, https://github.com/facebookresearch/semi-supervised-ImageNet1K-models, https://github.com/facebookresearch/WSL-Images). The Google models do not appear to have any restriction beyond the Apache 2.0 license (and ImageNet concerns). In either case, you should contact Facebook or Google with any questions. + +## Citing + +### BibTeX + +```bibtex +@misc{rw2019timm, + author = {Ross Wightman}, + title = {PyTorch Image Models}, + year = {2019}, + publisher = {GitHub}, + journal = {GitHub repository}, + doi = {10.5281/zenodo.4414861}, + howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} +} +``` + +### Latest DOI + +[![DOI](https://zenodo.org/badge/168799526.svg)](https://zenodo.org/badge/latestdoi/168799526) diff --git a/testbed/huggingface__pytorch-image-models/avg_checkpoints.py b/testbed/huggingface__pytorch-image-models/avg_checkpoints.py new file mode 100644 index 0000000000000000000000000000000000000000..1f7604b05f221f1ad95b4eb61f9936f92526770b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/avg_checkpoints.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +""" Checkpoint Averaging Script + +This script averages all model weights for checkpoints in specified path that match +the specified filter wildcard. All checkpoints must be from the exact same model. + +For any hope of decent results, the checkpoints should be from the same or child +(via resumes) training session. This can be viewed as similar to maintaining running +EMA (exponential moving average) of the model weights or performing SWA (stochastic +weight averaging), but post-training. + +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" +import torch +import argparse +import os +import glob +import hashlib +from timm.models.helpers import load_state_dict + +parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager') +parser.add_argument('--input', default='', type=str, metavar='PATH', + help='path to base input folder containing checkpoints') +parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD', + help='checkpoint filter (path wildcard)') +parser.add_argument('--output', default='./averaged.pth', type=str, metavar='PATH', + help='output filename') +parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', + help='Force not using ema version of weights (if present)') +parser.add_argument('--no-sort', dest='no_sort', action='store_true', + help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant') +parser.add_argument('-n', type=int, default=10, metavar='N', + help='Number of checkpoints to average') + + +def checkpoint_metric(checkpoint_path): + if not checkpoint_path or not os.path.isfile(checkpoint_path): + return {} + print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path)) + checkpoint = torch.load(checkpoint_path, map_location='cpu') + metric = None + if 'metric' in checkpoint: + metric = checkpoint['metric'] + return metric + + +def main(): + args = parser.parse_args() + # by default use the EMA weights (if present) + args.use_ema = not args.no_use_ema + # by default sort by checkpoint metric (if present) and avg top n checkpoints + args.sort = not args.no_sort + + if os.path.exists(args.output): + print("Error: Output filename ({}) already exists.".format(args.output)) + exit(1) + + pattern = args.input + if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep): + pattern += os.path.sep + pattern += args.filter + checkpoints = glob.glob(pattern, recursive=True) + + if args.sort: + checkpoint_metrics = [] + for c in checkpoints: + metric = checkpoint_metric(c) + if metric is not None: + checkpoint_metrics.append((metric, c)) + checkpoint_metrics = list(sorted(checkpoint_metrics)) + checkpoint_metrics = checkpoint_metrics[-args.n:] + print("Selected checkpoints:") + [print(m, c) for m, c in checkpoint_metrics] + avg_checkpoints = [c for m, c in checkpoint_metrics] + else: + avg_checkpoints = checkpoints + print("Selected checkpoints:") + [print(c) for c in checkpoints] + + avg_state_dict = {} + avg_counts = {} + for c in avg_checkpoints: + new_state_dict = load_state_dict(c, args.use_ema) + if not new_state_dict: + print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint)) + continue + + for k, v in new_state_dict.items(): + if k not in avg_state_dict: + avg_state_dict[k] = v.clone().to(dtype=torch.float64) + avg_counts[k] = 1 + else: + avg_state_dict[k] += v.to(dtype=torch.float64) + avg_counts[k] += 1 + + for k, v in avg_state_dict.items(): + v.div_(avg_counts[k]) + + # float32 overflow seems unlikely based on weights seen to date, but who knows + float32_info = torch.finfo(torch.float32) + final_state_dict = {} + for k, v in avg_state_dict.items(): + v = v.clamp(float32_info.min, float32_info.max) + final_state_dict[k] = v.to(dtype=torch.float32) + + try: + torch.save(final_state_dict, args.output, _use_new_zipfile_serialization=False) + except: + torch.save(final_state_dict, args.output) + + with open(args.output, 'rb') as f: + sha_hash = hashlib.sha256(f.read()).hexdigest() + print("=> Saved state_dict to '{}, SHA256: {}'".format(args.output, sha_hash)) + + +if __name__ == '__main__': + main() diff --git a/testbed/huggingface__pytorch-image-models/benchmark.py b/testbed/huggingface__pytorch-image-models/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..903bb817be6ba60dc3277ab3e66ae30d54e8b824 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/benchmark.py @@ -0,0 +1,481 @@ +#!/usr/bin/env python3 +""" Model Benchmark Script + +An inference and train step benchmark script for timm models. + +Hacked together by Ross Wightman (https://github.com/rwightman) +""" +import argparse +import os +import csv +import json +import time +import logging +import torch +import torch.nn as nn +import torch.nn.parallel +from collections import OrderedDict +from contextlib import suppress +from functools import partial + +from timm.models import create_model, is_model, list_models +from timm.optim import create_optimizer_v2 +from timm.data import resolve_data_config +from timm.utils import AverageMeter, setup_default_logging + + +has_apex = False +try: + from apex import amp + has_apex = True +except ImportError: + pass + +has_native_amp = False +try: + if getattr(torch.cuda.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('validate') + + +parser = argparse.ArgumentParser(description='PyTorch Benchmark') + +# benchmark specific args +parser.add_argument('--model-list', metavar='NAME', default='', + help='txt file based list of model names to benchmark') +parser.add_argument('--bench', default='both', type=str, + help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") +parser.add_argument('--detail', action='store_true', default=False, + help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') +parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', + help='Output csv file for validation results (summary)') +parser.add_argument('--num-warm-iter', default=10, type=int, + metavar='N', help='Number of warmup iterations (default: 10)') +parser.add_argument('--num-bench-iter', default=40, type=int, + metavar='N', help='Number of benchmark iterations (default: 40)') + +# common inference / train args +parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', + help='model architecture (default: resnet50)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--num-classes', type=int, default=None, + help='Number classes in dataset') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--amp', action='store_true', default=False, + help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') +parser.add_argument('--precision', default='float32', type=str, + help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') + + +# train optimizer parameters +parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') +parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: None, use opt default)') +parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='Optimizer momentum (default: 0.9)') +parser.add_argument('--weight-decay', type=float, default=0.0001, + help='weight decay (default: 0.0001)') +parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') +parser.add_argument('--clip-mode', type=str, default='norm', + help='Gradient clipping mode. One of ("norm", "value", "agc")') + + +# model regularization / loss params that impact model or loss fn +parser.add_argument('--smoothing', type=float, default=0.1, + help='Label smoothing (default: 0.1)') +parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', + help='Dropout rate (default: 0.)') +parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', + help='Drop path rate (default: None)') +parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', + help='Drop block rate (default: None)') + + +def timestamp(sync=False): + return time.perf_counter() + + +def cuda_timestamp(sync=False, device=None): + if sync: + torch.cuda.synchronize(device=device) + return time.perf_counter() + + +def count_params(model: nn.Module): + return sum([m.numel() for m in model.parameters()]) + + +def resolve_precision(precision: str): + assert precision in ('amp', 'float16', 'bfloat16', 'float32') + use_amp = False + model_dtype = torch.float32 + data_dtype = torch.float32 + if precision == 'amp': + use_amp = True + elif precision == 'float16': + model_dtype = torch.float16 + data_dtype = torch.float16 + elif precision == 'bfloat16': + model_dtype = torch.bfloat16 + data_dtype = torch.bfloat16 + return use_amp, model_dtype, data_dtype + + +class BenchmarkRunner: + def __init__( + self, model_name, detail=False, device='cuda', torchscript=False, precision='float32', + num_warm_iter=10, num_bench_iter=50, **kwargs): + self.model_name = model_name + self.detail = detail + self.device = device + self.use_amp, self.model_dtype, self.data_dtype = resolve_precision(precision) + self.channels_last = kwargs.pop('channels_last', False) + self.amp_autocast = torch.cuda.amp.autocast if self.use_amp else suppress + + self.model = create_model( + model_name, + num_classes=kwargs.pop('num_classes', None), + in_chans=3, + global_pool=kwargs.pop('gp', 'fast'), + scriptable=torchscript) + self.model.to( + device=self.device, + dtype=self.model_dtype, + memory_format=torch.channels_last if self.channels_last else None) + self.num_classes = self.model.num_classes + self.param_count = count_params(self.model) + _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) + if torchscript: + self.model = torch.jit.script(self.model) + + data_config = resolve_data_config(kwargs, model=self.model, use_test_size=True) + self.input_size = data_config['input_size'] + self.batch_size = kwargs.pop('batch_size', 256) + + self.example_inputs = None + self.num_warm_iter = num_warm_iter + self.num_bench_iter = num_bench_iter + self.log_freq = num_bench_iter // 5 + if 'cuda' in self.device: + self.time_fn = partial(cuda_timestamp, device=self.device) + else: + self.time_fn = timestamp + + def _init_input(self): + self.example_inputs = torch.randn( + (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) + if self.channels_last: + self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) + + +class InferenceBenchmarkRunner(BenchmarkRunner): + + def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): + super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) + self.model.eval() + + def run(self): + def _step(): + t_step_start = self.time_fn() + with self.amp_autocast(): + output = self.model(self.example_inputs) + t_step_end = self.time_fn(True) + return t_step_end - t_step_start + + _logger.info( + f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' + f'input size {self.input_size} and batch size {self.batch_size}.') + + with torch.no_grad(): + self._init_input() + + for _ in range(self.num_warm_iter): + _step() + + total_step = 0. + num_samples = 0 + t_run_start = self.time_fn() + for i in range(self.num_bench_iter): + delta_fwd = _step() + total_step += delta_fwd + num_samples += self.batch_size + num_steps = i + 1 + if num_steps % self.log_freq == 0: + _logger.info( + f"Infer [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_step / num_steps:0.3f} ms/step.") + t_run_end = self.time_fn(True) + t_run_elapsed = t_run_end - t_run_start + + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + ) + + _logger.info( + f"Inference benchmark of {self.model_name} done. " + f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") + + return results + + +class TrainBenchmarkRunner(BenchmarkRunner): + + def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): + super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) + self.model.train() + + if kwargs.pop('smoothing', 0) > 0: + self.loss = nn.CrossEntropyLoss().to(self.device) + else: + self.loss = nn.CrossEntropyLoss().to(self.device) + self.target_shape = tuple() + + self.optimizer = create_optimizer_v2( + self.model, + opt=kwargs.pop('opt', 'sgd'), + lr=kwargs.pop('lr', 1e-4)) + + def _gen_target(self, batch_size): + return torch.empty( + (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) + + def run(self): + def _step(detail=False): + self.optimizer.zero_grad() # can this be ignored? + t_start = self.time_fn() + t_fwd_end = t_start + t_bwd_end = t_start + with self.amp_autocast(): + output = self.model(self.example_inputs) + if isinstance(output, tuple): + output = output[0] + if detail: + t_fwd_end = self.time_fn(True) + target = self._gen_target(output.shape[0]) + self.loss(output, target).backward() + if detail: + t_bwd_end = self.time_fn(True) + self.optimizer.step() + t_end = self.time_fn(True) + if detail: + delta_fwd = t_fwd_end - t_start + delta_bwd = t_bwd_end - t_fwd_end + delta_opt = t_end - t_bwd_end + return delta_fwd, delta_bwd, delta_opt + else: + delta_step = t_end - t_start + return delta_step + + _logger.info( + f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' + f'input size {self.input_size} and batch size {self.batch_size}.') + + self._init_input() + + for _ in range(self.num_warm_iter): + _step() + + t_run_start = self.time_fn() + if self.detail: + total_fwd = 0. + total_bwd = 0. + total_opt = 0. + num_samples = 0 + for i in range(self.num_bench_iter): + delta_fwd, delta_bwd, delta_opt = _step(True) + num_samples += self.batch_size + total_fwd += delta_fwd + total_bwd += delta_bwd + total_opt += delta_opt + num_steps = (i + 1) + if num_steps % self.log_freq == 0: + total_step = total_fwd + total_bwd + total_opt + _logger.info( + f"Train [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd," + f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd," + f" {1000 * total_opt / num_steps:0.3f} ms/step opt." + ) + total_step = total_fwd + total_bwd + total_opt + t_run_elapsed = self.time_fn() - t_run_start + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), + bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), + opt_time=round(1000 * total_opt / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + ) + else: + total_step = 0. + num_samples = 0 + for i in range(self.num_bench_iter): + delta_step = _step(False) + num_samples += self.batch_size + total_step += delta_step + num_steps = (i + 1) + if num_steps % self.log_freq == 0: + _logger.info( + f"Train [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_step / num_steps:0.3f} ms/step.") + t_run_elapsed = self.time_fn() - t_run_start + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + ) + + _logger.info( + f"Train benchmark of {self.model_name} done. " + f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") + + return results + + +def decay_batch_exp(batch_size, factor=0.5, divisor=16): + out_batch_size = batch_size * factor + if out_batch_size > divisor: + out_batch_size = (out_batch_size + 1) // divisor * divisor + else: + out_batch_size = batch_size - 1 + return max(0, int(out_batch_size)) + + +def _try_run(model_name, bench_fn, initial_batch_size, bench_kwargs): + batch_size = initial_batch_size + results = dict() + while batch_size >= 1: + torch.cuda.empty_cache() + try: + bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) + results = bench.run() + return results + except RuntimeError as e: + print(f'Error: {str(e)} while running benchmark. Reducing batch size to {batch_size} for retry.') + batch_size = decay_batch_exp(batch_size) + return results + + +def benchmark(args): + if args.amp: + _logger.warning("Overriding precision to 'amp' since --amp flag set.") + args.precision = 'amp' + _logger.info(f'Benchmarking in {args.precision} precision. ' + f'{"NHWC" if args.channels_last else "NCHW"} layout. ' + f'torchscript {"enabled" if args.torchscript else "disabled"}') + + bench_kwargs = vars(args).copy() + bench_kwargs.pop('amp') + model = bench_kwargs.pop('model') + batch_size = bench_kwargs.pop('batch_size') + + bench_fns = (InferenceBenchmarkRunner,) + prefixes = ('infer',) + if args.bench == 'both': + bench_fns = ( + InferenceBenchmarkRunner, + TrainBenchmarkRunner + ) + prefixes = ('infer', 'train') + elif args.bench == 'train': + bench_fns = TrainBenchmarkRunner, + prefixes = 'train', + + model_results = OrderedDict(model=model) + for prefix, bench_fn in zip(prefixes, bench_fns): + run_results = _try_run(model, bench_fn, initial_batch_size=batch_size, bench_kwargs=bench_kwargs) + if prefix: + run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()} + model_results.update(run_results) + param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) + model_results.setdefault('param_count', param_count) + model_results.pop('train_param_count', 0) + return model_results + + +def main(): + setup_default_logging() + args = parser.parse_args() + model_cfgs = [] + model_names = [] + + if args.model_list: + args.model = '' + with open(args.model_list) as f: + model_names = [line.rstrip() for line in f] + model_cfgs = [(n, None) for n in model_names] + elif args.model == 'all': + # validate all models in a list of names with pretrained checkpoints + args.pretrained = True + model_names = list_models(pretrained=True, exclude_filters=['*in21k']) + model_cfgs = [(n, None) for n in model_names] + elif not is_model(args.model): + # model name doesn't exist, try as wildcard filter + model_names = list_models(args.model) + model_cfgs = [(n, None) for n in model_names] + + if len(model_cfgs): + results_file = args.results_file or './benchmark.csv' + _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) + results = [] + try: + for m, _ in model_cfgs: + if not m: + continue + args.model = m + r = benchmark(args) + results.append(r) + except KeyboardInterrupt as e: + pass + sort_key = 'train_samples_per_sec' if 'train' in args.bench else 'infer_samples_per_sec' + results = sorted(results, key=lambda x: x[sort_key], reverse=True) + if len(results): + write_results(results_file, results) + + import json + json_str = json.dumps(results, indent=4) + print(json_str) + else: + benchmark(args) + + +def write_results(results_file, results): + with open(results_file, mode='w') as cf: + dw = csv.DictWriter(cf, fieldnames=results[0].keys()) + dw.writeheader() + for r in results: + dw.writerow(r) + cf.flush() + + +if __name__ == '__main__': + main() diff --git a/testbed/huggingface__pytorch-image-models/clean_checkpoint.py b/testbed/huggingface__pytorch-image-models/clean_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..a8edcc915a881c743c77f4509eb3833c286493c5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/clean_checkpoint.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +""" Checkpoint Cleaning Script + +Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc. +and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256 +calculation for model zoo compatibility. + +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" +import torch +import argparse +import os +import hashlib +import shutil +from collections import OrderedDict + +parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--output', default='', type=str, metavar='PATH', + help='output path') +parser.add_argument('--use-ema', dest='use_ema', action='store_true', + help='use ema version of weights if present') +parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true', + help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint') + +_TEMP_NAME = './_checkpoint.pth' + + +def main(): + args = parser.parse_args() + + if os.path.exists(args.output): + print("Error: Output filename ({}) already exists.".format(args.output)) + exit(1) + + # Load an existing checkpoint to CPU, strip everything but the state_dict and re-save + if args.checkpoint and os.path.isfile(args.checkpoint): + print("=> Loading checkpoint '{}'".format(args.checkpoint)) + checkpoint = torch.load(args.checkpoint, map_location='cpu') + + new_state_dict = OrderedDict() + if isinstance(checkpoint, dict): + state_dict_key = 'state_dict_ema' if args.use_ema else 'state_dict' + if state_dict_key in checkpoint: + state_dict = checkpoint[state_dict_key] + else: + state_dict = checkpoint + else: + assert False + for k, v in state_dict.items(): + if args.clean_aux_bn and 'aux_bn' in k: + # If all aux_bn keys are removed, the SplitBN layers will end up as normal and + # load with the unmodified model using BatchNorm2d. + continue + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + print("=> Loaded state_dict from '{}'".format(args.checkpoint)) + + try: + torch.save(new_state_dict, _TEMP_NAME, _use_new_zipfile_serialization=False) + except: + torch.save(new_state_dict, _TEMP_NAME) + + with open(_TEMP_NAME, 'rb') as f: + sha_hash = hashlib.sha256(f.read()).hexdigest() + + if args.output: + checkpoint_root, checkpoint_base = os.path.split(args.output) + checkpoint_base = os.path.splitext(checkpoint_base)[0] + else: + checkpoint_root = '' + checkpoint_base = os.path.splitext(args.checkpoint)[0] + final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + '.pth' + shutil.move(_TEMP_NAME, os.path.join(checkpoint_root, final_filename)) + print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash)) + else: + print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint)) + + +if __name__ == '__main__': + main() diff --git a/testbed/huggingface__pytorch-image-models/convert/convert_nest_flax.py b/testbed/huggingface__pytorch-image-models/convert/convert_nest_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..cda4d34f9b11bddd793f8d39e245f6b1cf52ec74 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/convert/convert_nest_flax.py @@ -0,0 +1,109 @@ +""" +Convert weights from https://github.com/google-research/nested-transformer +NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt +""" + +import sys + +import numpy as np +import torch + +from clu import checkpoint + + +arch_depths = { + 'nest_base': [2, 2, 20], + 'nest_small': [2, 2, 20], + 'nest_tiny': [2, 2, 8], +} + + +def convert_nest(checkpoint_path, arch): + """ + Expects path to checkpoint which is a dir containing 4 files like in each of these folders + - https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints + `arch` is needed to + Returns a state dict that can be used with `torch.nn.Module.load_state_dict` + Hint: Follow timm.models.nest.Nest.__init__ and + https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py + """ + assert arch in ['nest_base', 'nest_small', 'nest_tiny'], "Your `arch` is not supported" + + flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target'] + state_dict = {} + + # Patch embedding + state_dict['patch_embed.proj.weight'] = torch.tensor( + flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1) + state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias']) + + # Positional embeddings + posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')] + for i, k in enumerate(posemb_keys): + state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding']) + + # Transformer encoders + depths = arch_depths[arch] + for level in range(len(depths)): + for layer in range(depths[level]): + global_layer_ix = sum(depths[:level]) + layer + # Norms + for i in range(2): + state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.weight'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale']) + state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.bias'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias']) + # Attention qkv + w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel'] + w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel'] + # Pay attention to dims here (maybe get pen and paper) + w_kv = np.concatenate(np.split(w_kv, 2, -1), 1) + w_qkv = np.concatenate([w_q, w_kv], 1) + state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1,0) + b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias'] + b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias'] + # Pay attention to dims here (maybe get pen and paper) + b_kv = np.concatenate(np.split(b_kv, 2, -1), 0) + b_qkv = np.concatenate([b_q, b_kv], 0) + state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1) + # Attention proj + w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel'] + w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1) + state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj + state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias']) + # MLP + for i in range(2): + state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.weight'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0) + state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.bias'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias']) + + # Block aggregations (ConvPool) + for level in range(1, len(depths)): + # Convs + state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor( + flax_dict[f'ConvPool_{level-1}']['Conv_0']['kernel']).permute(3, 2, 0, 1) + state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor( + flax_dict[f'ConvPool_{level-1}']['Conv_0']['bias']) + # Norms + state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor( + flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['scale']) + state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor( + flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['bias']) + + # Final norm + state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale']) + state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias']) + + # Classifier + state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0) + state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias']) + + return state_dict + + +if __name__ == '__main__': + variant = sys.argv[1] # base, small, or tiny + state_dict = convert_nest(f'./nest-{variant[0]}_imagenet', f'nest_{variant}') + torch.save(state_dict, f'./jx_nest_{variant}.pth') \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/distributed_train.sh b/testbed/huggingface__pytorch-image-models/distributed_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..1985669e25b858964ce04b9796865b78d9870663 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/distributed_train.sh @@ -0,0 +1,5 @@ +#!/bin/bash +NUM_PROC=$1 +shift +python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC train.py "$@" + diff --git a/testbed/huggingface__pytorch-image-models/docs/archived_changes.md b/testbed/huggingface__pytorch-image-models/docs/archived_changes.md new file mode 100644 index 0000000000000000000000000000000000000000..f8d88fd78f0831be14e641f2366cf3e5b7b5f665 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/archived_changes.md @@ -0,0 +1,205 @@ +# Archived Changes + +### Dec 18, 2020 +* Add ResNet-101D, ResNet-152D, and ResNet-200D weights trained @ 256x256 + * 256x256 val, 0.94 crop (top-1) - 101D (82.33), 152D (83.08), 200D (83.25) + * 288x288 val, 1.0 crop - 101D (82.64), 152D (83.48), 200D (83.76) + * 320x320 val, 1.0 crop - 101D (83.00), 152D (83.66), 200D (84.01) + +### Dec 7, 2020 +* Simplify EMA module (ModelEmaV2), compatible with fully torchscripted models +* Misc fixes for SiLU ONNX export, default_cfg missing from Feature extraction models, Linear layer w/ AMP + torchscript +* PyPi release @ 0.3.2 (needed by EfficientDet) + + +### Oct 30, 2020 +* Test with PyTorch 1.7 and fix a small top-n metric view vs reshape issue. +* Convert newly added 224x224 Vision Transformer weights from official JAX repo. 81.8 top-1 for B/16, 83.1 L/16. +* Support PyTorch 1.7 optimized, native SiLU (aka Swish) activation. Add mapping to 'silu' name, custom swish will eventually be deprecated. +* Fix regression for loading pretrained classifier via direct model entrypoint functions. Didn't impact create_model() factory usage. +* PyPi release @ 0.3.0 version! + +### Oct 26, 2020 +* Update Vision Transformer models to be compatible with official code release at https://github.com/google-research/vision_transformer +* Add Vision Transformer weights (ImageNet-21k pretrain) for 384x384 base and large models converted from official jax impl + * ViT-B/16 - 84.2 + * ViT-B/32 - 81.7 + * ViT-L/16 - 85.2 + * ViT-L/32 - 81.5 + +### Oct 21, 2020 +* Weights added for Vision Transformer (ViT) models. 77.86 top-1 for 'small' and 79.35 for 'base'. Thanks to [Christof](https://www.kaggle.com/christofhenkel) for training the base model w/ lots of GPUs. + +### Oct 13, 2020 +* Initial impl of Vision Transformer models. Both patch and hybrid (CNN backbone) variants. Currently trying to train... +* Adafactor and AdaHessian (FP32 only, no AMP) optimizers +* EdgeTPU-M (`efficientnet_em`) model trained in PyTorch, 79.3 top-1 +* Pip release, doc updates pending a few more changes... + +### Sept 18, 2020 +* New ResNet 'D' weights. 72.7 (top-1) ResNet-18-D, 77.1 ResNet-34-D, 80.5 ResNet-50-D +* Added a few untrained defs for other ResNet models (66D, 101D, 152D, 200/200D) + +### Sept 3, 2020 +* New weights + * Wide-ResNet50 - 81.5 top-1 (vs 78.5 torchvision) + * SEResNeXt50-32x4d - 81.3 top-1 (vs 79.1 cadene) +* Support for native Torch AMP and channels_last memory format added to train/validate scripts (`--channels-last`, `--native-amp` vs `--apex-amp`) +* Models tested with channels_last on latest NGC 20.08 container. AdaptiveAvgPool in attn layers changed to mean((2,3)) to work around bug with NHWC kernel. + +### Aug 12, 2020 +* New/updated weights from training experiments + * EfficientNet-B3 - 82.1 top-1 (vs 81.6 for official with AA and 81.9 for AdvProp) + * RegNetY-3.2GF - 82.0 top-1 (78.9 from official ver) + * CSPResNet50 - 79.6 top-1 (76.6 from official ver) +* Add CutMix integrated w/ Mixup. See [pull request](https://github.com/rwightman/pytorch-image-models/pull/218) for some usage examples +* Some fixes for using pretrained weights with `in_chans` != 3 on several models. + +### Aug 5, 2020 +Universal feature extraction, new models, new weights, new test sets. +* All models support the `features_only=True` argument for `create_model` call to return a network that extracts feature maps from the deepest layer at each stride. +* New models + * CSPResNet, CSPResNeXt, CSPDarkNet, DarkNet + * ReXNet + * (Modified Aligned) Xception41/65/71 (a proper port of TF models) +* New trained weights + * SEResNet50 - 80.3 top-1 + * CSPDarkNet53 - 80.1 top-1 + * CSPResNeXt50 - 80.0 top-1 + * DPN68b - 79.2 top-1 + * EfficientNet-Lite0 (non-TF ver) - 75.5 (submitted by [@hal-314](https://github.com/hal-314)) +* Add 'real' labels for ImageNet and ImageNet-Renditions test set, see [`results/README.md`](results/README.md) +* Test set ranking/top-n diff script by [@KushajveerSingh](https://github.com/KushajveerSingh) +* Train script and loader/transform tweaks to punch through more aug arguments +* README and documentation overhaul. See initial (WIP) documentation at https://rwightman.github.io/pytorch-image-models/ +* adamp and sgdp optimizers added by [@hellbell](https://github.com/hellbell) + +### June 11, 2020 +Bunch of changes: +* DenseNet models updated with memory efficient addition from torchvision (fixed a bug), blur pooling and deep stem additions +* VoVNet V1 and V2 models added, 39 V2 variant (ese_vovnet_39b) trained to 79.3 top-1 +* Activation factory added along with new activations: + * select act at model creation time for more flexibility in using activations compatible with scripting or tracing (ONNX export) + * hard_mish (experimental) added with memory-efficient grad, along with ME hard_swish + * context mgr for setting exportable/scriptable/no_jit states +* Norm + Activation combo layers added with initial trial support in DenseNet and VoVNet along with impl of EvoNorm and InplaceAbn wrapper that fit the interface +* Torchscript works for all but two of the model types as long as using Pytorch 1.5+, tests added for this +* Some import cleanup and classifier reset changes, all models will have classifier reset to nn.Identity on reset_classifer(0) call +* Prep for 0.1.28 pip release + +### May 12, 2020 +* Add ResNeSt models (code adapted from https://github.com/zhanghang1989/ResNeSt, paper https://arxiv.org/abs/2004.08955)) + +### May 3, 2020 +* Pruned EfficientNet B1, B2, and B3 (https://arxiv.org/abs/2002.08258) contributed by [Yonathan Aflalo](https://github.com/yoniaflalo) + +### May 1, 2020 +* Merged a number of execellent contributions in the ResNet model family over the past month + * BlurPool2D and resnetblur models initiated by [Chris Ha](https://github.com/VRandme), I trained resnetblur50 to 79.3. + * TResNet models and SpaceToDepth, AntiAliasDownsampleLayer layers by [mrT23](https://github.com/mrT23) + * ecaresnet (50d, 101d, light) models and two pruned variants using pruning as per (https://arxiv.org/abs/2002.08258) by [Yonathan Aflalo](https://github.com/yoniaflalo) +* 200 pretrained models in total now with updated results csv in results folder + +### April 5, 2020 +* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite + * 3.5M param MobileNet-V2 100 @ 73% + * 4.5M param MobileNet-V2 110d @ 75% + * 6.1M param MobileNet-V2 140 @ 76.5% + * 5.8M param MobileNet-V2 120d @ 77.3% + +### March 18, 2020 +* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) +* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) + +### April 5, 2020 +* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite + * 3.5M param MobileNet-V2 100 @ 73% + * 4.5M param MobileNet-V2 110d @ 75% + * 6.1M param MobileNet-V2 140 @ 76.5% + * 5.8M param MobileNet-V2 120d @ 77.3% + +### March 18, 2020 +* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) +* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) + +### Feb 29, 2020 +* New MobileNet-V3 Large weights trained from stratch with this code to 75.77% top-1 +* IMPORTANT CHANGE - default weight init changed for all MobilenetV3 / EfficientNet / related models + * overall results similar to a bit better training from scratch on a few smaller models tried + * performance early in training seems consistently improved but less difference by end + * set `fix_group_fanout=False` in `_init_weight_goog` fn if you need to reproducte past behaviour +* Experimental LR noise feature added applies a random perturbation to LR each epoch in specified range of training + +### Feb 18, 2020 +* Big refactor of model layers and addition of several attention mechanisms. Several additions motivated by 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268): + * Move layer/module impl into `layers` subfolder/module of `models` and organize in a more granular fashion + * ResNet downsample paths now properly support dilation (output stride != 32) for avg_pool ('D' variant) and 3x3 (SENets) networks + * Add Selective Kernel Nets on top of ResNet base, pretrained weights + * skresnet18 - 73% top-1 + * skresnet34 - 76.9% top-1 + * skresnext50_32x4d (equiv to SKNet50) - 80.2% top-1 + * ECA and CECA (circular padding) attention layer contributed by [Chris Ha](https://github.com/VRandme) + * CBAM attention experiment (not the best results so far, may remove) + * Attention factory to allow dynamically selecting one of SE, ECA, CBAM in the `.se` position for all ResNets + * Add DropBlock and DropPath (formerly DropConnect for EfficientNet/MobileNetv3) support to all ResNet variants +* Full dataset results updated that incl NoisyStudent weights and 2 of the 3 SK weights + +### Feb 12, 2020 +* Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet) + +### Feb 6, 2020 +* Add RandAugment trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) + +### Feb 1/2, 2020 +* Port new EfficientNet-B8 (RandAugment) weights, these are different than the B8 AdvProp, different input normalization. +* Update results csv files on all models for ImageNet validation and three other test sets +* Push PyPi package update + +### Jan 31, 2020 +* Update ResNet50 weights with a new 79.038 result from further JSD / AugMix experiments. Full command line for reproduction in training section below. + +### Jan 11/12, 2020 +* Master may be a bit unstable wrt to training, these changes have been tested but not all combos +* Implementations of AugMix added to existing RA and AA. Including numerous supporting pieces like JSD loss (Jensen-Shannon divergence + CE), and AugMixDataset +* SplitBatchNorm adaptation layer added for implementing Auxiliary BN as per AdvProp paper +* ResNet-50 AugMix trained model w/ 79% top-1 added +* `seresnext26tn_32x4d` - 77.99 top-1, 93.75 top-5 added to tiered experiment, higher img/s than 't' and 'd' + +### Jan 3, 2020 +* Add RandAugment trained EfficientNet-B0 weight with 77.7 top-1. Trained by [Michael Klachko](https://github.com/michaelklachko) with this code and recent hparams (see Training section) +* Add `avg_checkpoints.py` script for post training weight averaging and update all scripts with header docstrings and shebangs. + +### Dec 30, 2019 +* Merge [Dushyant Mehta's](https://github.com/mehtadushy) PR for SelecSLS (Selective Short and Long Range Skip Connections) networks. Good GPU memory consumption and throughput. Original: https://github.com/mehtadushy/SelecSLS-Pytorch + +### Dec 28, 2019 +* Add new model weights and training hparams (see Training Hparams section) + * `efficientnet_b3` - 81.5 top-1, 95.7 top-5 at default res/crop, 81.9, 95.8 at 320x320 1.0 crop-pct + * trained with RandAugment, ended up with an interesting but less than perfect result (see training section) + * `seresnext26d_32x4d`- 77.6 top-1, 93.6 top-5 + * deep stem (32, 32, 64), avgpool downsample + * stem/dowsample from bag-of-tricks paper + * `seresnext26t_32x4d`- 78.0 top-1, 93.7 top-5 + * deep tiered stem (24, 48, 64), avgpool downsample (a modified 'D' variant) + * stem sizing mods from Jeremy Howard and fastai devs discussing ResNet architecture experiments + +### Dec 23, 2019 +* Add RandAugment trained MixNet-XL weights with 80.48 top-1. +* `--dist-bn` argument added to train.py, will distribute BN stats between nodes after each train epoch, before eval + +### Dec 4, 2019 +* Added weights from the first training from scratch of an EfficientNet (B2) with my new RandAugment implementation. Much better than my previous B2 and very close to the official AdvProp ones (80.4 top-1, 95.08 top-5). + +### Nov 29, 2019 +* Brought EfficientNet and MobileNetV3 up to date with my https://github.com/rwightman/gen-efficientnet-pytorch code. Torchscript and ONNX export compat excluded. + * AdvProp weights added + * Official TF MobileNetv3 weights added +* EfficientNet and MobileNetV3 hook based 'feature extraction' classes added. Will serve as basis for using models as backbones in obj detection/segmentation tasks. Lots more to be done here... +* HRNet classification models and weights added from https://github.com/HRNet/HRNet-Image-Classification +* Consistency in global pooling, `reset_classifer`, and `forward_features` across models + * `forward_features` always returns unpooled feature maps now +* Reasonable chance I broke something... let me know + +### Nov 22, 2019 +* Add ImageNet training RandAugment implementation alongside AutoAugment. PyTorch Transform compatible format, using PIL. Currently training two EfficientNet models from scratch with promising results... will update. +* `drop-connect` cmd line arg finally added to `train.py`, no need to hack model fns. Works for efficientnet/mobilenetv3 based models, ignored otherwise. \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/changes.md b/testbed/huggingface__pytorch-image-models/docs/changes.md new file mode 100644 index 0000000000000000000000000000000000000000..6ff50756395602f831024c938d5ef88e2a4efc5b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/changes.md @@ -0,0 +1,130 @@ +# Recent Changes + +### June 8, 2021 +* Add first ResMLP weights, trained in PyTorch XLA on TPU-VM w/ my XLA branch. 24 block variant, 79.2 top-1. +* Add ResNet51-Q model w/ pretrained weights at 82.36 top-1. + * NFNet inspired block layout with quad layer stem and no maxpool + * Same param count (35.7M) and throughput as ResNetRS-50 but +1.5 top-1 @ 224x224 and +2.5 top-1 at 288x288 + +### May 25, 2021 +* Add LeViT, Visformer, Convit (PR by Aman Arora), Twins (PR by paper authors) transformer models +* Cleanup input_size/img_size override handling and testing for all vision transformer models +* Add `efficientnetv2_rw_m` model and weights (started training before official code). 84.8 top-1, 53M params. + +### May 14, 2021 +* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl. + * 1k trained variants: `tf_efficientnetv2_s/m/l` + * 21k trained variants: `tf_efficientnetv2_s/m/l_in21k` + * 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k` + * v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3` + * Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s` + * Some blank `efficientnetv2_*` models in-place for future native PyTorch training + +### May 5, 2021 +* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen) +* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit) +* Add ResNet-RS models and weights from [TF](https://github.com/tensorflow/tpu/tree/master/models/official/resnet/resnet_rs). Thanks [Aman Arora](https://github.com/amaarora) +* Add CoaT models and weights. Thanks [Mohammed Rizin](https://github.com/morizin) +* Add new ImageNet-21k weights & finetuned weights for TResNet, MobileNet-V3, ViT models. Thanks [mrT](https://github.com/mrT23) +* Add GhostNet models and weights. Thanks [Kai Han](https://github.com/iamhankai) +* Update ByoaNet attention modles + * Improve SA module inits + * Hack together experimental stand-alone Swin based attn module and `swinnet` + * Consistent '26t' model defs for experiments. +* Add improved Efficientnet-V2S (prelim model def) weights. 83.8 top-1. +* WandB logging support + +### April 13, 2021 +* Add Swin Transformer models and weights from https://github.com/microsoft/Swin-Transformer + +### April 12, 2021 +* Add ECA-NFNet-L1 (slimmed down F1 w/ SiLU, 41M params) trained with this code. 84% top-1 @ 320x320. Trained at 256x256. +* Add EfficientNet-V2S model (unverified model definition) weights. 83.3 top-1 @ 288x288. Only trained single res 224. Working on progressive training. +* Add ByoaNet model definition (Bring-your-own-attention) w/ SelfAttention block and corresponding SA/SA-like modules and model defs + * Lambda Networks - https://arxiv.org/abs/2102.08602 + * Bottleneck Transformers - https://arxiv.org/abs/2101.11605 + * Halo Nets - https://arxiv.org/abs/2103.12731 +* Adabelief optimizer contributed by Juntang Zhuang + +### April 1, 2021 +* Add snazzy `benchmark.py` script for bulk `timm` model benchmarking of train and/or inference +* Add Pooling-based Vision Transformer (PiT) models (from https://github.com/naver-ai/pit) + * Merged distilled variant into main for torchscript compatibility + * Some `timm` cleanup/style tweaks and weights have hub download support +* Cleanup Vision Transformer (ViT) models + * Merge distilled (DeiT) model into main so that torchscript can work + * Support updated weight init (defaults to old still) that closer matches original JAX impl (possibly better training from scratch) + * Separate hybrid model defs into different file and add several new model defs to fiddle with, support patch_size != 1 for hybrids + * Fix fine-tuning num_class changes (PiT and ViT) and pos_embed resizing (Vit) with distilled variants + * nn.Sequential for block stack (does not break downstream compat) +* TnT (Transformer-in-Transformer) models contributed by author (from https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT) +* Add RegNetY-160 weights from DeiT teacher model +* Add new NFNet-L0 w/ SE attn (rename `nfnet_l0b`->`nfnet_l0`) weights 82.75 top-1 @ 288x288 +* Some fixes/improvements for TFDS dataset wrapper + +### March 7, 2021 +* First 0.4.x PyPi release w/ NFNets (& related), ByoB (GPU-Efficient, RepVGG, etc). +* Change feature extraction for pre-activation nets (NFNets, ResNetV2) to return features before activation. + +### Feb 18, 2021 +* Add pretrained weights and model variants for NFNet-F* models from [DeepMind Haiku impl](https://github.com/deepmind/deepmind-research/tree/master/nfnets). + * Models are prefixed with `dm_`. They require SAME padding conv, skipinit enabled, and activation gains applied in act fn. + * These models are big, expect to run out of GPU memory. With the GELU activiation + other options, they are roughly 1/2 the inference speed of my SiLU PyTorch optimized `s` variants. + * Original model results are based on pre-processing that is not the same as all other models so you'll see different results in the results csv (once updated). + * Matching the original pre-processing as closely as possible I get these results: + * `dm_nfnet_f6` - 86.352 + * `dm_nfnet_f5` - 86.100 + * `dm_nfnet_f4` - 85.834 + * `dm_nfnet_f3` - 85.676 + * `dm_nfnet_f2` - 85.178 + * `dm_nfnet_f1` - 84.696 + * `dm_nfnet_f0` - 83.464 + +### Feb 16, 2021 +* Add Adaptive Gradient Clipping (AGC) as per https://arxiv.org/abs/2102.06171. Integrated w/ PyTorch gradient clipping via mode arg that defaults to prev 'norm' mode. For backward arg compat, clip-grad arg must be specified to enable when using train.py. + * AGC w/ default clipping factor `--clip-grad .01 --clip-mode agc` + * PyTorch global norm of 1.0 (old behaviour, always norm), `--clip-grad 1.0` + * PyTorch value clipping of 10, `--clip-grad 10. --clip-mode value` + * AGC performance is definitely sensitive to the clipping factor. More experimentation needed to determine good values for smaller batch sizes and optimizers besides those in paper. So far I've found .001-.005 is necessary for stable RMSProp training w/ NFNet/NF-ResNet. + +### Feb 12, 2021 +* Update Normalization-Free nets to include new NFNet-F (https://arxiv.org/abs/2102.06171) model defs + +### Feb 10, 2021 +* More model archs, incl a flexible ByobNet backbone ('Bring-your-own-blocks') + * GPU-Efficient-Networks (https://github.com/idstcv/GPU-Efficient-Networks), impl in `byobnet.py` + * RepVGG (https://github.com/DingXiaoH/RepVGG), impl in `byobnet.py` + * classic VGG (from torchvision, impl in `vgg`) +* Refinements to normalizer layer arg handling and normalizer+act layer handling in some models +* Default AMP mode changed to native PyTorch AMP instead of APEX. Issues not being fixed with APEX. Native works with `--channels-last` and `--torchscript` model training, APEX does not. +* Fix a few bugs introduced since last pypi release + +### Feb 8, 2021 +* Add several ResNet weights with ECA attention. 26t & 50t trained @ 256, test @ 320. 269d train @ 256, fine-tune @320, test @ 352. + * `ecaresnet26t` - 79.88 top-1 @ 320x320, 79.08 @ 256x256 + * `ecaresnet50t` - 82.35 top-1 @ 320x320, 81.52 @ 256x256 + * `ecaresnet269d` - 84.93 top-1 @ 352x352, 84.87 @ 320x320 +* Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed). +* Support model default_cfgs with separate train vs test resolution `test_input_size` and remove extra `_320` suffix ResNet model defs that were just for test. + +### Jan 30, 2021 +* Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692) + +### Jan 25, 2021 +* Add ResNetV2 Big Transfer (BiT) models w/ ImageNet-1k and 21k weights from https://github.com/google-research/big_transfer +* Add official R50+ViT-B/16 hybrid models + weights from https://github.com/google-research/vision_transformer +* ImageNet-21k ViT weights are added w/ model defs and representation layer (pre logits) support + * NOTE: ImageNet-21k classifier heads were zero'd in original weights, they are only useful for transfer learning +* Add model defs and weights for DeiT Vision Transformer models from https://github.com/facebookresearch/deit +* Refactor dataset classes into ImageDataset/IterableImageDataset + dataset specific parser classes +* Add Tensorflow-Datasets (TFDS) wrapper to allow use of TFDS image classification sets with train script + * Ex: `train.py /data/tfds --dataset tfds/oxford_iiit_pet --val-split test --model resnet50 -b 256 --amp --num-classes 37 --opt adamw --lr 3e-4 --weight-decay .001 --pretrained -j 2` +* Add improved .tar dataset parser that reads images from .tar, folder of .tar files, or .tar within .tar + * Run validation on full ImageNet-21k directly from tar w/ BiT model: `validate.py /data/fall11_whole.tar --model resnetv2_50x1_bitm_in21k --amp` +* Models in this update should be stable w/ possible exception of ViT/BiT, possibility of some regressions with train/val scripts and dataset handling + +### Jan 3, 2021 +* Add SE-ResNet-152D weights + * 256x256 val, 0.94 crop top-1 - 83.75 + * 320x320 val, 1.0 crop - 84.36 +* Update results files diff --git a/testbed/huggingface__pytorch-image-models/docs/feature_extraction.md b/testbed/huggingface__pytorch-image-models/docs/feature_extraction.md new file mode 100644 index 0000000000000000000000000000000000000000..b41c15597bfa855a15dfff62aadbc763d0c27427 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/feature_extraction.md @@ -0,0 +1,173 @@ +# Feature Extraction + +All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification. + +## Penultimate Layer Features (Pre-Classifier Features) + +The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features. + +### Unpooled + +There are three ways to obtain unpooled features. + +Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks. + +If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network. + +#### forward_features() +```python hl_lines="3 6" +import torch +import timm +m = timm.create_model('xception41', pretrained=True) +o = m(torch.randn(2, 3, 299, 299)) +print(f'Original shape: {o.shape}') +o = m.forward_features(torch.randn(2, 3, 299, 299)) +print(f'Unpooled shape: {o.shape}') +``` +Output: +```text +Original shape: torch.Size([2, 1000]) +Unpooled shape: torch.Size([2, 2048, 10, 10]) +``` + +#### Create with no classifier and pooling +```python hl_lines="3" +import torch +import timm +m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='') +o = m(torch.randn(2, 3, 224, 224)) +print(f'Unpooled shape: {o.shape}') +``` +Output: +```text +Unpooled shape: torch.Size([2, 2048, 7, 7]) +``` + +#### Remove it later +```python hl_lines="3 6" +import torch +import timm +m = timm.create_model('densenet121', pretrained=True) +o = m(torch.randn(2, 3, 224, 224)) +print(f'Original shape: {o.shape}') +m.reset_classifier(0, '') +o = m(torch.randn(2, 3, 224, 224)) +print(f'Unpooled shape: {o.shape}') +``` +Output: +```text +Original shape: torch.Size([2, 1000]) +Unpooled shape: torch.Size([2, 1024, 7, 7]) +``` + +### Pooled + +To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact. + +#### Create with no classifier +```python hl_lines="3" +import torch +import timm +m = timm.create_model('resnet50', pretrained=True, num_classes=0) +o = m(torch.randn(2, 3, 224, 224)) +print(f'Pooled shape: {o.shape}') +``` +Output: +```text +Pooled shape: torch.Size([2, 2048]) +``` + +#### Remove it later +```python hl_lines="3 6" +import torch +import timm +m = timm.create_model('ese_vovnet19b_dw', pretrained=True) +o = m(torch.randn(2, 3, 224, 224)) +print(f'Original shape: {o.shape}') +m.reset_classifier(0) +o = m(torch.randn(2, 3, 224, 224)) +print(f'Pooled shape: {o.shape}') +``` +Output: +```text +Pooled shape: torch.Size([2, 1024]) +``` + + +## Multi-scale Feature Maps (Feature Pyramid) + +Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library. + +`timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels. + +A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default 5 strides will be output from most models (not all have that many), with the first starting at 2 (some start at 1 or 4). + +### Create a feature map extraction model +```python hl_lines="3" +import torch +import timm +m = timm.create_model('resnest26d', features_only=True, pretrained=True) +o = m(torch.randn(2, 3, 224, 224)) +for x in o: + print(x.shape) +``` +Output: +```text +torch.Size([2, 64, 112, 112]) +torch.Size([2, 256, 56, 56]) +torch.Size([2, 512, 28, 28]) +torch.Size([2, 1024, 14, 14]) +torch.Size([2, 2048, 7, 7]) +``` + +### Query the feature information + +After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points. + +```python hl_lines="3 4" +import torch +import timm +m = timm.create_model('regnety_032', features_only=True, pretrained=True) +print(f'Feature channels: {m.feature_info.channels()}') +o = m(torch.randn(2, 3, 224, 224)) +for x in o: + print(x.shape) +``` +Output: +```text +Feature channels: [32, 72, 216, 576, 1512] +torch.Size([2, 32, 112, 112]) +torch.Size([2, 72, 56, 56]) +torch.Size([2, 216, 28, 28]) +torch.Size([2, 576, 14, 14]) +torch.Size([2, 1512, 7, 7]) +``` + +### Select specific feature levels or limit the stride + +There are to additional creation arguments impacting the output features. + +* `out_indices` selects which indices to output +* `output_stride` limits the feature output stride of the network (also works in classification mode BTW) + +`out_indices` is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most models, index 0 is the stride 2 features, and index 4 is stride 32. + +`output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`. + +```python hl_lines="3 4 5" +import torch +import timm +m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True) +print(f'Feature channels: {m.feature_info.channels()}') +print(f'Feature reduction: {m.feature_info.reduction()}') +o = m(torch.randn(2, 3, 320, 320)) +for x in o: + print(x.shape) +``` +Output: +```text +Feature channels: [512, 2048] +Feature reduction: [8, 8] +torch.Size([2, 512, 40, 40]) +torch.Size([2, 2048, 40, 40]) +``` diff --git a/testbed/huggingface__pytorch-image-models/docs/index.md b/testbed/huggingface__pytorch-image-models/docs/index.md new file mode 100644 index 0000000000000000000000000000000000000000..95f7df642a17fcb4fe40662a956dbfeac5611032 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/index.md @@ -0,0 +1,80 @@ +# Getting Started + +## Welcome + +Welcome to the `timm` documentation, a lean set of docs that covers the basics of `timm`. + +For a more comprehensive set of docs (currently under development), please visit [timmdocs](https://fastai.github.io/timmdocs/) by [Aman Arora](https://github.com/amaarora). + +## Install + +The library can be installed with pip: + +``` +pip install timm +``` + +I update the PyPi (pip) packages when I'm confident there are no significant model regressions from previous releases. If you want to pip install the bleeding edge from GitHub, use: +``` +pip install git+https://github.com/rwightman/pytorch-image-models.git +``` + +!!! info "Conda Environment" + All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically Python 3.6.x, 3.7.x., 3.8.x., 3.9 + + Little to no care has been taken to be Python 2.x friendly and will not support it. If you run into any challenges running on Windows, or other OS, I'm definitely open to looking into those issues so long as it's in a reproducible (read Conda) environment. + + PyTorch versions 1.4, 1.5.x, 1.6, 1.7.x, and 1.8 have been tested with this code. + + I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda: + ``` + conda create -n torch-env + conda activate torch-env + conda install pytorch torchvision cudatoolkit=11.1 -c pytorch -c conda-forge + conda install pyyaml + ``` + +## Load a Pretrained Model + +Pretrained models can be loaded using `timm.create_model` + +```python +import timm + +m = timm.create_model('mobilenetv3_large_100', pretrained=True) +m.eval() +``` + +## List Models with Pretrained Weights +```python +import timm +from pprint import pprint +model_names = timm.list_models(pretrained=True) +pprint(model_names) +>>> ['adv_inception_v3', + 'cspdarknet53', + 'cspresnext50', + 'densenet121', + 'densenet161', + 'densenet169', + 'densenet201', + 'densenetblur121d', + 'dla34', + 'dla46_c', +... +] +``` + +## List Model Architectures by Wildcard +```python +import timm +from pprint import pprint +model_names = timm.list_models('*resne*t*') +pprint(model_names) +>>> ['cspresnet50', + 'cspresnet50d', + 'cspresnet50w', + 'cspresnext50', +... +] +``` diff --git a/testbed/huggingface__pytorch-image-models/docs/models.md b/testbed/huggingface__pytorch-image-models/docs/models.md new file mode 100644 index 0000000000000000000000000000000000000000..fd43805ea990101174b17102b11b30bb14ddafb5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models.md @@ -0,0 +1,171 @@ +# Model Summaries + +The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below. + +Most included models have pretrained weights. The weights are either: + +1. from their original sources +2. ported by myself from their original impl in a different framework (e.g. Tensorflow models) +3. trained from scratch using the included training script + +The validation results for the pretrained weights are [here](results.md) + +A more exciting view (with pretty pictures) of the models within `timm` can be found at [paperswithcode](https://paperswithcode.com/lib/timm). + +## Big Transfer ResNetV2 (BiT) [[resnetv2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnetv2.py)] +* Paper: `Big Transfer (BiT): General Visual Representation Learning` - https://arxiv.org/abs/1912.11370 +* Reference code: https://github.com/google-research/big_transfer + +## Cross-Stage Partial Networks [[cspnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py)] +* Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 +* Reference impl: https://github.com/WongKinYiu/CrossStagePartialNetworks + +## DenseNet [[densenet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py)] +* Paper: `Densely Connected Convolutional Networks` - https://arxiv.org/abs/1608.06993 +* Code: https://github.com/pytorch/vision/tree/master/torchvision/models + +## DLA [[dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)] +* Paper: https://arxiv.org/abs/1707.06484 +* Code: https://github.com/ucbdrive/dla + +## Dual-Path Networks [[dpn.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py)] +* Paper: `Dual Path Networks` - https://arxiv.org/abs/1707.01629 +* My PyTorch code: https://github.com/rwightman/pytorch-dpn-pretrained +* Reference code: https://github.com/cypw/DPNs + +## GPU-Efficient Networks [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)] +* Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 +* Reference code: https://github.com/idstcv/GPU-Efficient-Networks + +## HRNet [[hrnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py)] +* Paper: `Deep High-Resolution Representation Learning for Visual Recognition` - https://arxiv.org/abs/1908.07919 +* Code: https://github.com/HRNet/HRNet-Image-Classification + +## Inception-V3 [[inception_v3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py)] +* Paper: `Rethinking the Inception Architecture for Computer Vision` - https://arxiv.org/abs/1512.00567 +* Code: https://github.com/pytorch/vision/tree/master/torchvision/models + +## Inception-V4 [[inception_v4.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py)] +* Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 +* Code: https://github.com/Cadene/pretrained-models.pytorch +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets + +## Inception-ResNet-V2 [[inception_resnet_v2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py)] +* Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 +* Code: https://github.com/Cadene/pretrained-models.pytorch +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets + +## NASNet-A [[nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)] +* Papers: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012 +* Code: https://github.com/Cadene/pretrained-models.pytorch +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet + +## PNasNet-5 [[pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)] +* Papers: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559 +* Code: https://github.com/Cadene/pretrained-models.pytorch +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet + +## EfficientNet [[efficientnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py)] + +* Papers: + * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252 + * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665 + * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946 + * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + * MixNet - https://arxiv.org/abs/1907.09595 + * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626 + * MobileNet-V2 - https://arxiv.org/abs/1801.04381 + * FBNet-C - https://arxiv.org/abs/1812.03443 + * Single-Path NAS - https://arxiv.org/abs/1904.02877 +* My PyTorch code: https://github.com/rwightman/gen-efficientnet-pytorch +* Reference code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + +## MobileNet-V3 [[mobilenetv3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py)] +* Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet + +## RegNet [[regnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py)] +* Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 +* Reference code: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + +## RepVGG [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)] +* Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 +* Reference code: https://github.com/DingXiaoH/RepVGG + +## ResNet, ResNeXt [[resnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py)] + +* ResNet (V1B) + * Paper: `Deep Residual Learning for Image Recognition` - https://arxiv.org/abs/1512.03385 + * Code: https://github.com/pytorch/vision/tree/master/torchvision/models +* ResNeXt + * Paper: `Aggregated Residual Transformations for Deep Neural Networks` - https://arxiv.org/abs/1611.05431 + * Code: https://github.com/pytorch/vision/tree/master/torchvision/models +* 'Bag of Tricks' / Gluon C, D, E, S ResNet variants + * Paper: `Bag of Tricks for Image Classification with CNNs` - https://arxiv.org/abs/1812.01187 + * Code: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py +* Instagram pretrained / ImageNet tuned ResNeXt101 + * Paper: `Exploring the Limits of Weakly Supervised Pretraining` - https://arxiv.org/abs/1805.00932 + * Weights: https://pytorch.org/hub/facebookresearch_WSL-Images_resnext (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) +* Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts + * Paper: `Billion-scale semi-supervised learning for image classification` - https://arxiv.org/abs/1905.00546 + * Weights: https://github.com/facebookresearch/semi-supervised-ImageNet1K-models (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) +* Squeeze-and-Excitation Networks + * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + * Code: Added to ResNet base, this is current version going forward, old `senet.py` is being deprecated +* ECAResNet (ECA-Net) + * Paper: `ECA-Net: Efficient Channel Attention for Deep CNN` - https://arxiv.org/abs/1910.03151v4 + * Code: Added to ResNet base, ECA module contributed by @VRandme, reference https://github.com/BangguWu/ECANet + +## Res2Net [[res2net.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py)] +* Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +* Code: https://github.com/gasvn/Res2Net + +## ResNeSt [[resnest.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py)] +* Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 +* Code: https://github.com/zhanghang1989/ResNeSt + +## ReXNet [[rexnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py)] +* Paper: `ReXNet: Diminishing Representational Bottleneck on CNN` - https://arxiv.org/abs/2007.00992 +* Code: https://github.com/clovaai/rexnet + +## Selective-Kernel Networks [[sknet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py)] +* Paper: `Selective-Kernel Networks` - https://arxiv.org/abs/1903.06586 +* Code: https://github.com/implus/SKNet, https://github.com/clovaai/assembled-cnn + +## SelecSLS [[selecsls.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py)] +* Paper: `XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera` - https://arxiv.org/abs/1907.00837 +* Code: https://github.com/mehtadushy/SelecSLS-Pytorch + +## Squeeze-and-Excitation Networks [[senet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py)] +NOTE: I am deprecating this version of the networks, the new ones are part of `resnet.py` + +* Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 +* Code: https://github.com/Cadene/pretrained-models.pytorch + +## TResNet [[tresnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py)] +* Paper: `TResNet: High Performance GPU-Dedicated Architecture` - https://arxiv.org/abs/2003.13630 +* Code: https://github.com/mrT23/TResNet + +## VGG [[vgg.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vgg.py)] +* Paper: `Very Deep Convolutional Networks For Large-Scale Image Recognition` - https://arxiv.org/pdf/1409.1556.pdf +* Reference code: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py + +## Vision Transformer [[vision_transformer.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py)] +* Paper: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 +* Reference code and pretrained weights: https://github.com/google-research/vision_transformer + +## VovNet V2 and V1 [[vovnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py)] +* Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 +* Reference code: https://github.com/youngwanLEE/vovnet-detectron2 + +## Xception [[xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py)] +* Paper: `Xception: Deep Learning with Depthwise Separable Convolutions` - https://arxiv.org/abs/1610.02357 +* Code: https://github.com/Cadene/pretrained-models.pytorch + +## Xception (Modified Aligned, Gluon) [[gluon_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py)] +* Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 +* Reference code: https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo, https://github.com/jfzhang95/pytorch-deeplab-xception/ + +## Xception (Modified Aligned, TF) [[aligned_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py)] +* Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 +* Reference code: https://github.com/tensorflow/models/tree/master/research/deeplab diff --git a/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/advprop.md b/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/advprop.md new file mode 100644 index 0000000000000000000000000000000000000000..c204d871af812b30da394db7ae80bae91ba2ff36 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/advprop.md @@ -0,0 +1,457 @@ +# AdvProp (EfficientNet) + +**AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples. + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{xie2020adversarial, + title={Adversarial Examples Improve Image Recognition}, + author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le}, + year={2020}, + eprint={1911.09665}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/big-transfer.md b/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/big-transfer.md new file mode 100644 index 0000000000000000000000000000000000000000..b593b41aae9fef15cdbc15b2db8470d00e8fed87 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/big-transfer.md @@ -0,0 +1,295 @@ +# Big Transfer (BiT) + +**Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{kolesnikov2020big, + title={Big Transfer (BiT): General Visual Representation Learning}, + author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, + year={2020}, + eprint={1912.11370}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/csp-darknet.md b/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/csp-darknet.md new file mode 100644 index 0000000000000000000000000000000000000000..b6ab42d1271078427773f644136b3bbc6642f8cb --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/csp-darknet.md @@ -0,0 +1,81 @@ +# CSP-DarkNet + +**CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. + +This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{bochkovskiy2020yolov4, + title={YOLOv4: Optimal Speed and Accuracy of Object Detection}, + author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao}, + year={2020}, + eprint={2004.10934}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/dpn.md b/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/dpn.md new file mode 100644 index 0000000000000000000000000000000000000000..460ee78ff504efb5a26e91d7f19f2f99ffc7116e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/.templates/models/dpn.md @@ -0,0 +1,256 @@ +# Dual Path Network (DPN) + +A **Dual Path Network (DPN)** is a convolutional neural network which presents a new topology of connection paths internally. The intuition is that [ResNets](https://paperswithcode.com/method/resnet) enables feature re-usage while DenseNet enables new feature exploration, and both are important for learning good representations. To enjoy the benefits from both path topologies, Dual Path Networks share common features while maintaining the flexibility to explore new features through dual path architectures. + +The principal building block is an [DPN Block](https://paperswithcode.com/method/dpn-block). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{chen2017dual, + title={Dual Path Networks}, + author={Yunpeng Chen and Jianan Li and Huaxin Xiao and Xiaojie Jin and Shuicheng Yan and Jiashi Feng}, + year={2017}, + eprint={1707.01629}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/testbed/huggingface__pytorch-image-models/docs/models/gloun-senet.md b/testbed/huggingface__pytorch-image-models/docs/models/gloun-senet.md new file mode 100644 index 0000000000000000000000000000000000000000..833307857d1375732bb3f4ef0712ad37c009bf9a --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/gloun-senet.md @@ -0,0 +1,124 @@ +# (Gluon) SENet + +A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('gluon_senet154', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `gluon_senet154`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('gluon_senet154', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/gloun-seresnext.md b/testbed/huggingface__pytorch-image-models/docs/models/gloun-seresnext.md new file mode 100644 index 0000000000000000000000000000000000000000..bef4b119449de660592b9108528adcfc0d09c551 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/gloun-seresnext.md @@ -0,0 +1,197 @@ +# (Gluon) SE-ResNeXt + +**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `gluon_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/inception-resnet-v2.md b/testbed/huggingface__pytorch-image-models/docs/models/inception-resnet-v2.md new file mode 100644 index 0000000000000000000000000000000000000000..9cb597ebedc123dcf60cd740b780db113f19a2fa --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/inception-resnet-v2.md @@ -0,0 +1,133 @@ +# Inception ResNet v2 + +**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('inception_resnet_v2', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{szegedy2016inceptionv4, + title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning}, + author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi}, + year={2016}, + eprint={1602.07261}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/inception-v3.md b/testbed/huggingface__pytorch-image-models/docs/models/inception-v3.md new file mode 100644 index 0000000000000000000000000000000000000000..d51c52470c85575f77167a8a7aca3518ce72f2b3 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/inception-v3.md @@ -0,0 +1,146 @@ +# Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('inception_v3', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `inception_v3`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/legacy-se-resnet.md b/testbed/huggingface__pytorch-image-models/docs/models/legacy-se-resnet.md new file mode 100644 index 0000000000000000000000000000000000000000..78e4a126b0340a98632cefd56b27bc667399e4d6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/legacy-se-resnet.md @@ -0,0 +1,318 @@ +# (Legacy) SE-ResNet + +**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('legacy_seresnet101', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `legacy_seresnet101`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('legacy_seresnet101', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/legacy-senet.md b/testbed/huggingface__pytorch-image-models/docs/models/legacy-senet.md new file mode 100644 index 0000000000000000000000000000000000000000..c3a49da5caffedfe4ce7f36a3b39ddf65a21af7e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/legacy-senet.md @@ -0,0 +1,135 @@ +# (Legacy) SENet + +A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from Gluon. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('legacy_senet154', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `legacy_senet154`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('legacy_senet154', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/mobilenet-v2.md b/testbed/huggingface__pytorch-image-models/docs/models/mobilenet-v2.md new file mode 100644 index 0000000000000000000000000000000000000000..d6532e2fbf81e9cbf91ab725cf9d6a720702ac63 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/mobilenet-v2.md @@ -0,0 +1,271 @@ +# MobileNet v2 + +**MobileNetV2** is a convolutional neural network architecture that seeks to perform well on mobile devices. It is based on an [inverted residual structure](https://paperswithcode.com/method/inverted-residual-block) where the residual connections are between the bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. As a whole, the architecture of MobileNetV2 contains the initial fully convolution layer with 32 filters, followed by 19 residual bottleneck layers. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('mobilenetv2_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `mobilenetv2_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('mobilenetv2_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1801-04381, + author = {Mark Sandler and + Andrew G. Howard and + Menglong Zhu and + Andrey Zhmoginov and + Liang{-}Chieh Chen}, + title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, + Detection and Segmentation}, + journal = {CoRR}, + volume = {abs/1801.04381}, + year = {2018}, + url = {http://arxiv.org/abs/1801.04381}, + archivePrefix = {arXiv}, + eprint = {1801.04381}, + timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/nasnet.md b/testbed/huggingface__pytorch-image-models/docs/models/nasnet.md new file mode 100644 index 0000000000000000000000000000000000000000..76b29fc9febe5a6a5ec96f9967f59511e7f2ca82 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/nasnet.md @@ -0,0 +1,131 @@ +# NASNet + +**NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('nasnetalarge', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `nasnetalarge`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('nasnetalarge', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{zoph2018learning, + title={Learning Transferable Architectures for Scalable Image Recognition}, + author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le}, + year={2018}, + eprint={1707.07012}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/noisy-student.md b/testbed/huggingface__pytorch-image-models/docs/models/noisy-student.md new file mode 100644 index 0000000000000000000000000000000000000000..5a92d9282f735627aebb2a21e6912b8acafb3043 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/noisy-student.md @@ -0,0 +1,571 @@ +# Noisy Student (EfficientNet) + +**Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training +and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps: + +1. train a teacher model on labeled images +2. use the teacher to generate pseudo labels on unlabeled images +3. train a student model on the combination of labeled images and pseudo labeled images. + +The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student. + +Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ns`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{xie2020selftraining, + title={Self-training with Noisy Student improves ImageNet classification}, + author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le}, + year={2020}, + eprint={1911.04252}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/pnasnet.md b/testbed/huggingface__pytorch-image-models/docs/models/pnasnet.md new file mode 100644 index 0000000000000000000000000000000000000000..52dfd946da3ff3d778781f3753f4bf323c3aab0b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/pnasnet.md @@ -0,0 +1,132 @@ +# PNASNet + +**Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('pnasnet5large', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `pnasnet5large`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('pnasnet5large', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{liu2018progressive, + title={Progressive Neural Architecture Search}, + author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy}, + year={2018}, + eprint={1712.00559}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/res2net.md b/testbed/huggingface__pytorch-image-models/docs/models/res2net.md new file mode 100644 index 0000000000000000000000000000000000000000..4fb0a6b7e26553eec49135f18500d4214e770d13 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/res2net.md @@ -0,0 +1,321 @@ +# Res2Net + +**Res2Net** is an image model that employs a variation on bottleneck residual blocks, [Res2Net Blocks](https://paperswithcode.com/method/res2net-block). The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('res2net101_26w_4s', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `res2net101_26w_4s`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('res2net101_26w_4s', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{Gao_2021, + title={Res2Net: A New Multi-Scale Backbone Architecture}, + volume={43}, + ISSN={1939-3539}, + url={http://dx.doi.org/10.1109/TPAMI.2019.2938758}, + DOI={10.1109/tpami.2019.2938758}, + number={2}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + year={2021}, + month={Feb}, + pages={652–662} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/resnest.md b/testbed/huggingface__pytorch-image-models/docs/models/resnest.md new file mode 100644 index 0000000000000000000000000000000000000000..7b2bf0ae6653fb7d6afb740a3899cd189e575de4 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/resnest.md @@ -0,0 +1,469 @@ +# ResNeSt + +A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: $V = \text{Concat}${$V^{1},V^{2},\cdots{V}^{K}$}. As in standard residual blocks, the final output $Y$ of otheur Split-Attention block is produced using a shortcut connection: $Y=V+X$, if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation $\mathcal{T}$ is applied to the shortcut connection to align the output shapes: $Y=V+\mathcal{T}(X)$. For example, $\mathcal{T}$ can be strided convolution or combined convolution-with-pooling. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnest101e', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnest101e`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnest101e', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola}, + year={2020}, + eprint={2004.08955}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/resnet-d.md b/testbed/huggingface__pytorch-image-models/docs/models/resnet-d.md new file mode 100644 index 0000000000000000000000000000000000000000..323dc629b8e2c28d89003f9ceeb44d7cdb38aa72 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/resnet-d.md @@ -0,0 +1,324 @@ +# ResNet-D + +**ResNet-D** is a modification on the [ResNet](https://paperswithcode.com/method/resnet) architecture that utilises an [average pooling](https://paperswithcode.com/method/average-pooling) tweak for downsampling. The motivation is that in the unmodified ResNet, the [1×1 convolution](https://paperswithcode.com/method/1x1-convolution) for the downsampling block ignores 3/4 of input feature maps, so this is modified so no information will be ignored + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnet101d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnet101d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{he2018bag, + title={Bag of Tricks for Image Classification with Convolutional Neural Networks}, + author={Tong He and Zhi Zhang and Hang Zhang and Zhongyue Zhang and Junyuan Xie and Mu Li}, + year={2018}, + eprint={1812.01187}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/resnet.md b/testbed/huggingface__pytorch-image-models/docs/models/resnet.md new file mode 100644 index 0000000000000000000000000000000000000000..f770d3bea547bdff80ce7737e1219d8e4e53057d --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/resnet.md @@ -0,0 +1,439 @@ +# ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnet18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnet18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/HeZRS15, + author = {Kaiming He and + Xiangyu Zhang and + Shaoqing Ren and + Jian Sun}, + title = {Deep Residual Learning for Image Recognition}, + journal = {CoRR}, + volume = {abs/1512.03385}, + year = {2015}, + url = {http://arxiv.org/abs/1512.03385}, + archivePrefix = {arXiv}, + eprint = {1512.03385}, + timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, + biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/resnext.md b/testbed/huggingface__pytorch-image-models/docs/models/resnext.md new file mode 100644 index 0000000000000000000000000000000000000000..5d6451c2bc820bbeecd6ff3d42ca776e09292e4c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/resnext.md @@ -0,0 +1,244 @@ +# ResNeXt + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnext101_32x8d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnext101_32x8d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnext101_32x8d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/XieGDTH16, + author = {Saining Xie and + Ross B. Girshick and + Piotr Doll{\'{a}}r and + Zhuowen Tu and + Kaiming He}, + title = {Aggregated Residual Transformations for Deep Neural Networks}, + journal = {CoRR}, + volume = {abs/1611.05431}, + year = {2016}, + url = {http://arxiv.org/abs/1611.05431}, + archivePrefix = {arXiv}, + eprint = {1611.05431}, + timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, + biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/rexnet.md b/testbed/huggingface__pytorch-image-models/docs/models/rexnet.md new file mode 100644 index 0000000000000000000000000000000000000000..cb009f83d4d012247aaf45102d7a7ed2fab4dce4 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/rexnet.md @@ -0,0 +1,258 @@ +# RexNet + +**Rank Expansion Networks** (ReXNets) follow a set of new design principles for designing bottlenecks in image classification models. Authors refine each layer by 1) expanding the input channel size of the convolution layer and 2) replacing the [ReLU6s](https://www.paperswithcode.com/method/relu6). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('rexnet_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `rexnet_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('rexnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{han2020rexnet, + title={ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network}, + author={Dongyoon Han and Sangdoo Yun and Byeongho Heo and YoungJoon Yoo}, + year={2020}, + eprint={2007.00992}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/se-resnet.md b/testbed/huggingface__pytorch-image-models/docs/models/se-resnet.md new file mode 100644 index 0000000000000000000000000000000000000000..206b9394f6666c1ff9811f1e5c2cded42028bf27 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/se-resnet.md @@ -0,0 +1,183 @@ +# SE-ResNet + +**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('seresnet152d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `seresnet152d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('seresnet152d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/selecsls.md b/testbed/huggingface__pytorch-image-models/docs/models/selecsls.md new file mode 100644 index 0000000000000000000000000000000000000000..741275db656e3f6e6da98ba854124be9ddc2477e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/selecsls.md @@ -0,0 +1,197 @@ +# SelecSLS + +**SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('selecsls42b', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `selecsls42b`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('selecsls42b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{Mehta_2020, + title={XNect}, + volume={39}, + ISSN={1557-7368}, + url={http://dx.doi.org/10.1145/3386569.3392410}, + DOI={10.1145/3386569.3392410}, + number={4}, + journal={ACM Transactions on Graphics}, + publisher={Association for Computing Machinery (ACM)}, + author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, + year={2020}, + month={Jul} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/seresnext.md b/testbed/huggingface__pytorch-image-models/docs/models/seresnext.md new file mode 100644 index 0000000000000000000000000000000000000000..2a85a842bb568d6b8e66de1381e9bd89f08e104f --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/seresnext.md @@ -0,0 +1,228 @@ +# SE-ResNeXt + +**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resneXt) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('seresnext26d_32x4d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `seresnext26d_32x4d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('seresnext26d_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/skresnet.md b/testbed/huggingface__pytorch-image-models/docs/models/skresnet.md new file mode 100644 index 0000000000000000000000000000000000000000..fd2dcd7557a2873f58fd04ed338efbe3bac4153f --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/skresnet.md @@ -0,0 +1,173 @@ +# SK-ResNet + +**SK ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNet are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('skresnet18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `skresnet18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('skresnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{li2019selective, + title={Selective Kernel Networks}, + author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, + year={2019}, + eprint={1903.06586}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/skresnext.md b/testbed/huggingface__pytorch-image-models/docs/models/skresnext.md new file mode 100644 index 0000000000000000000000000000000000000000..582a8d557284b13e868e2ce70a81b2d7309f76fb --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/skresnext.md @@ -0,0 +1,131 @@ +# SK-ResNeXt + +**SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('skresnext50_32x4d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `skresnext50_32x4d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('skresnext50_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{li2019selective, + title={Selective Kernel Networks}, + author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, + year={2019}, + eprint={1903.06586}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/spnasnet.md b/testbed/huggingface__pytorch-image-models/docs/models/spnasnet.md new file mode 100644 index 0000000000000000000000000000000000000000..6b54f2f7e5ff0897ee2f0dbcfe16d84a8631c486 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/spnasnet.md @@ -0,0 +1,123 @@ +# SPNASNet + +**Single-Path NAS** is a novel differentiable NAS method for designing hardware-efficient ConvNets in less than 4 hours. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('spnasnet_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `spnasnet_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('spnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{stamoulis2019singlepath, + title={Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours}, + author={Dimitrios Stamoulis and Ruizhou Ding and Di Wang and Dimitrios Lymberopoulos and Bodhi Priyantha and Jie Liu and Diana Marculescu}, + year={2019}, + eprint={1904.02877}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/ssl-resnet.md b/testbed/huggingface__pytorch-image-models/docs/models/ssl-resnet.md new file mode 100644 index 0000000000000000000000000000000000000000..02bc8d285ab19a28b64c6131934ef19ffe77f7f0 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/ssl-resnet.md @@ -0,0 +1,192 @@ +# SSL ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('ssl_resnet18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `ssl_resnet18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('ssl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/ssl-resnext.md b/testbed/huggingface__pytorch-image-models/docs/models/ssl-resnext.md new file mode 100644 index 0000000000000000000000000000000000000000..878ed003085462243de4a88c1012717cb4af29e7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/ssl-resnext.md @@ -0,0 +1,278 @@ +# SSL ResNeXT + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('ssl_resnext101_32x16d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `ssl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('ssl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/swsl-resnet.md b/testbed/huggingface__pytorch-image-models/docs/models/swsl-resnet.md new file mode 100644 index 0000000000000000000000000000000000000000..4c935aed523b24649cb9794f6c07df73d64dbf3a --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/swsl-resnet.md @@ -0,0 +1,192 @@ +# SWSL ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('swsl_resnet18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `swsl_resnet18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('swsl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/swsl-resnext.md b/testbed/huggingface__pytorch-image-models/docs/models/swsl-resnext.md new file mode 100644 index 0000000000000000000000000000000000000000..1c0ce03122f246cbe2d251115be220020ea16899 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/swsl-resnext.md @@ -0,0 +1,278 @@ +# SWSL ResNeXt + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('swsl_resnext101_32x16d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `swsl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('swsl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/tf-efficientnet-lite.md b/testbed/huggingface__pytorch-image-models/docs/models/tf-efficientnet-lite.md new file mode 100644 index 0000000000000000000000000000000000000000..6c0ad9158e51a47823928a0044ce1180bbb1d9f5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/tf-efficientnet-lite.md @@ -0,0 +1,256 @@ +# (Tensorflow) EfficientNet Lite + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2). + +EfficientNet-Lite makes EfficientNet more suitable for mobile devices by introducing [ReLU6](https://paperswithcode.com/method/relu6) activation functions and removing [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_efficientnet_lite0', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_efficientnet_lite0`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_efficientnet_lite0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/tf-efficientnet.md b/testbed/huggingface__pytorch-image-models/docs/models/tf-efficientnet.md new file mode 100644 index 0000000000000000000000000000000000000000..39a981ee1cf38d99d882a5777c1b41b147aa4e14 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/tf-efficientnet.md @@ -0,0 +1,663 @@ +# (Tensorflow) EfficientNet + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_efficientnet_b0', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/tf-inception-v3.md b/testbed/huggingface__pytorch-image-models/docs/models/tf-inception-v3.md new file mode 100644 index 0000000000000000000000000000000000000000..cbba35c8d13ca45be6bcede48e37e65c24f5c259 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/tf-inception-v3.md @@ -0,0 +1,148 @@ +# (Tensorflow) Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_inception_v3', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_inception_v3`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/tf-mixnet.md b/testbed/huggingface__pytorch-image-models/docs/models/tf-mixnet.md new file mode 100644 index 0000000000000000000000000000000000000000..862c09d11e54bef907028492aabc9f4228aaee22 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/tf-mixnet.md @@ -0,0 +1,194 @@ +# (Tensorflow) MixNet + +**MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_mixnet_l', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_mixnet_l`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2019mixconv, + title={MixConv: Mixed Depthwise Convolutional Kernels}, + author={Mingxing Tan and Quoc V. Le}, + year={2019}, + eprint={1907.09595}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/tf-mobilenet-v3.md b/testbed/huggingface__pytorch-image-models/docs/models/tf-mobilenet-v3.md new file mode 100644 index 0000000000000000000000000000000000000000..51946e8acfc30042ff9795fcf9ce3ca90d63f3b9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/tf-mobilenet-v3.md @@ -0,0 +1,381 @@ +# (Tensorflow) MobileNet v3 + +**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-02244, + author = {Andrew Howard and + Mark Sandler and + Grace Chu and + Liang{-}Chieh Chen and + Bo Chen and + Mingxing Tan and + Weijun Wang and + Yukun Zhu and + Ruoming Pang and + Vijay Vasudevan and + Quoc V. Le and + Hartwig Adam}, + title = {Searching for MobileNetV3}, + journal = {CoRR}, + volume = {abs/1905.02244}, + year = {2019}, + url = {http://arxiv.org/abs/1905.02244}, + archivePrefix = {arXiv}, + eprint = {1905.02244}, + timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/tresnet.md b/testbed/huggingface__pytorch-image-models/docs/models/tresnet.md new file mode 100644 index 0000000000000000000000000000000000000000..89d01dec88c1b801162e4a74c3716f73b4a9712a --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/tresnet.md @@ -0,0 +1,352 @@ +# TResNet + +A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tresnet_l', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tresnet_l`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tresnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{ridnik2020tresnet, + title={TResNet: High Performance GPU-Dedicated Architecture}, + author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman}, + year={2020}, + eprint={2003.13630}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/vision-transformer.md b/testbed/huggingface__pytorch-image-models/docs/models/vision-transformer.md new file mode 100644 index 0000000000000000000000000000000000000000..8e631977db2eca8dc9827df1b8dc1cbca71a2e0d --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/vision-transformer.md @@ -0,0 +1,380 @@ +# Vision Transformer (ViT) + +The **Vision Transformer** is a model for image classification that employs a Transformer-like architecture over patches of the image. This includes the use of [Multi-Head Attention](https://paperswithcode.com/method/multi-head-attention), [Scaled Dot-Product Attention](https://paperswithcode.com/method/scaled) and other architectural features seen in the [Transformer](https://paperswithcode.com/method/transformer) architecture traditionally used for NLP. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('vit_base_patch16_224', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `vit_base_patch16_224`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('vit_base_patch16_224', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{dosovitskiy2020image, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, + year={2020}, + eprint={2010.11929}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/wide-resnet.md b/testbed/huggingface__pytorch-image-models/docs/models/wide-resnet.md new file mode 100644 index 0000000000000000000000000000000000000000..98a166550a0231465a40385ec4feb3e06f471f1d --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/wide-resnet.md @@ -0,0 +1,163 @@ +# Wide ResNet + +**Wide Residual Networks** are a variant on [ResNets](https://paperswithcode.com/method/resnet) where we decrease depth and increase the width of residual networks. This is achieved through the use of [wide residual blocks](https://paperswithcode.com/method/wide-residual-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('wide_resnet101_2', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `wide_resnet101_2`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('wide_resnet101_2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/ZagoruykoK16, + author = {Sergey Zagoruyko and + Nikos Komodakis}, + title = {Wide Residual Networks}, + journal = {CoRR}, + volume = {abs/1605.07146}, + year = {2016}, + url = {http://arxiv.org/abs/1605.07146}, + archivePrefix = {arXiv}, + eprint = {1605.07146}, + timestamp = {Mon, 13 Aug 2018 16:46:42 +0200}, + biburl = {https://dblp.org/rec/journals/corr/ZagoruykoK16.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/models/xception.md b/testbed/huggingface__pytorch-image-models/docs/models/xception.md new file mode 100644 index 0000000000000000000000000000000000000000..1c7abff82773ee99e943c4bf79afeac82eb327e2 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/models/xception.md @@ -0,0 +1,224 @@ +# Xception + +**Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution layers](https://paperswithcode.com/method/depthwise-separable-convolution). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('xception', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `xception`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('xception', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/ZagoruykoK16, +@misc{chollet2017xception, + title={Xception: Deep Learning with Depthwise Separable Convolutions}, + author={François Chollet}, + year={2017}, + eprint={1610.02357}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/results.md b/testbed/huggingface__pytorch-image-models/docs/results.md new file mode 100644 index 0000000000000000000000000000000000000000..a06ff33278af3b8d1e42000834859fa880e5e50c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/results.md @@ -0,0 +1,67 @@ +# Results + +CSV files containing an ImageNet-1K and out-of-distribution (OOD) test set validation results for all models with pretrained weights is located in the repository [results folder](https://github.com/rwightman/pytorch-image-models/tree/master/results). + +## Self-trained Weights + +The table below includes ImageNet-1k validation results of model weights that I've trained myself. It is not updated as frequently as the csv results outputs linked above. + +|Model | Acc@1 (Err) | Acc@5 (Err) | Param # (M) | Interpolation | Image Size | +|---|---|---|---|---|---| +| efficientnet_b3a | 82.242 (17.758) | 96.114 (3.886) | 12.23 | bicubic | 320 (1.0 crop) | +| efficientnet_b3 | 82.076 (17.924) | 96.020 (3.980) | 12.23 | bicubic | 300 | +| regnet_32 | 82.002 (17.998) | 95.906 (4.094) | 19.44 | bicubic | 224 | +| skresnext50d_32x4d | 81.278 (18.722) | 95.366 (4.634) | 27.5 | bicubic | 288 (1.0 crop) | +| seresnext50d_32x4d | 81.266 (18.734) | 95.620 (4.380) | 27.6 | bicubic | 224 | +| efficientnet_b2a | 80.608 (19.392) | 95.310 (4.690) | 9.11 | bicubic | 288 (1.0 crop) | +| resnet50d | 80.530 (19.470) | 95.160 (4.840) | 25.6 | bicubic | 224 | +| mixnet_xl | 80.478 (19.522) | 94.932 (5.068) | 11.90 | bicubic | 224 | +| efficientnet_b2 | 80.402 (19.598) | 95.076 (4.924) | 9.11 | bicubic | 260 | +| seresnet50 | 80.274 (19.726) | 95.070 (4.930) | 28.1 | bicubic | 224 | +| skresnext50d_32x4d | 80.156 (19.844) | 94.642 (5.358) | 27.5 | bicubic | 224 | +| cspdarknet53 | 80.058 (19.942) | 95.084 (4.916) | 27.6 | bicubic | 256 | +| cspresnext50 | 80.040 (19.960) | 94.944 (5.056) | 20.6 | bicubic | 224 | +| resnext50_32x4d | 79.762 (20.238) | 94.600 (5.400) | 25 | bicubic | 224 | +| resnext50d_32x4d | 79.674 (20.326) | 94.868 (5.132) | 25.1 | bicubic | 224 | +| cspresnet50 | 79.574 (20.426) | 94.712 (5.288) | 21.6 | bicubic | 256 | +| ese_vovnet39b | 79.320 (20.680) | 94.710 (5.290) | 24.6 | bicubic | 224 | +| resnetblur50 | 79.290 (20.710) | 94.632 (5.368) | 25.6 | bicubic | 224 | +| dpn68b | 79.216 (20.784) | 94.414 (5.586) | 12.6 | bicubic | 224 | +| resnet50 | 79.038 (20.962) | 94.390 (5.610) | 25.6 | bicubic | 224 | +| mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33 | bicubic | 224 | +| efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.79 | bicubic | 240 | +| efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44 | bicubic | 224 | +| seresnext26t_32x4d | 77.998 (22.002) | 93.708 (6.292) | 16.8 | bicubic | 224 | +| seresnext26tn_32x4d | 77.986 (22.014) | 93.746 (6.254) | 16.8 | bicubic | 224 | +| efficientnet_b0 | 77.698 (22.302) | 93.532 (6.468) | 5.29 | bicubic | 224 | +| seresnext26d_32x4d | 77.602 (22.398) | 93.608 (6.392) | 16.8 | bicubic | 224 | +| mobilenetv2_120d | 77.294 (22.706 | 93.502 (6.498) | 5.8 | bicubic | 224 | +| mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01 | bicubic | 224 | +| resnet34d | 77.116 (22.884) | 93.382 (6.618) | 21.8 | bicubic | 224 | +| seresnext26_32x4d | 77.104 (22.896) | 93.316 (6.684) | 16.8 | bicubic | 224 | +| skresnet34 | 76.912 (23.088) | 93.322 (6.678) | 22.2 | bicubic | 224 | +| ese_vovnet19b_dw | 76.798 (23.202) | 93.268 (6.732) | 6.5 | bicubic | 224 | +| resnet26d | 76.68 (23.32) | 93.166 (6.834) | 16 | bicubic | 224 | +| densenetblur121d | 76.576 (23.424) | 93.190 (6.810) | 8.0 | bicubic | 224 | +| mobilenetv2_140 | 76.524 (23.476) | 92.990 (7.010) | 6.1 | bicubic | 224 | +| mixnet_s | 75.988 (24.012) | 92.794 (7.206) | 4.13 | bicubic | 224 | +| mobilenetv3_large_100 | 75.766 (24.234) | 92.542 (7.458) | 5.5 | bicubic | 224 | +| mobilenetv3_rw | 75.634 (24.366) | 92.708 (7.292) | 5.5 | bicubic | 224 | +| mnasnet_a1 | 75.448 (24.552) | 92.604 (7.396) | 3.89 | bicubic | 224 | +| resnet26 | 75.292 (24.708) | 92.57 (7.43) | 16 | bicubic | 224 | +| fbnetc_100 | 75.124 (24.876) | 92.386 (7.614) | 5.6 | bilinear | 224 | +| resnet34 | 75.110 (24.890) | 92.284 (7.716) | 22 | bilinear | 224 | +| mobilenetv2_110d | 75.052 (24.948) | 92.180 (7.820) | 4.5 | bicubic | 224 | +| seresnet34 | 74.808 (25.192) | 92.124 (7.876) | 22 | bilinear | 224 | +| mnasnet_b1 | 74.658 (25.342) | 92.114 (7.886) | 4.38 | bicubic | 224 | +| spnasnet_100 | 74.084 (25.916) | 91.818 (8.182) | 4.42 | bilinear | 224 | +| skresnet18 | 73.038 (26.962) | 91.168 (8.832) | 11.9 | bicubic | 224 | +| mobilenetv2_100 | 72.978 (27.022) | 91.016 (8.984) | 3.5 | bicubic | 224 | +| resnet18d | 72.260 (27.740) | 90.696 (9.304) | 11.7 | bicubic | 224 | +| seresnet18 | 71.742 (28.258) | 90.334 (9.666) | 11.8 | bicubic | 224 | + +## Ported and Other Weights + +For weights ported from other deep learning frameworks (Tensorflow, MXNet GluonCV) or copied from other PyTorch sources, please see the full results tables for ImageNet and various OOD test sets at in the [results tables](https://github.com/rwightman/pytorch-image-models/tree/master/results). + +Model code .py files contain links to original sources of models and weights. diff --git a/testbed/huggingface__pytorch-image-models/docs/scripts.md b/testbed/huggingface__pytorch-image-models/docs/scripts.md new file mode 100644 index 0000000000000000000000000000000000000000..f48eec0d75cd7b882c18278279cfeb3ffce709ef --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/scripts.md @@ -0,0 +1,27 @@ +# Scripts +A train, validation, inference, and checkpoint cleaning script included in the github root folder. Scripts are not currently packaged in the pip release. + +The training and validation scripts evolved from early versions of the [PyTorch Imagenet Examples](https://github.com/pytorch/examples). I have added significant functionality over time, including CUDA specific performance enhancements based on +[NVIDIA's APEX Examples](https://github.com/NVIDIA/apex/tree/master/examples). + +## Training Script + +The variety of training args is large and not all combinations of options (or even options) have been fully tested. For the training dataset folder, specify the folder to the base that contains a `train` and `validation` folder. + +To train an SE-ResNet34 on ImageNet, locally distributed, 4 GPUs, one process per GPU w/ cosine schedule, random-erasing prob of 50% and per-pixel random value: + +`./distributed_train.sh 4 /data/imagenet --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4` + +NOTE: It is recommended to use PyTorch 1.7+ w/ PyTorch native AMP and DDP instead of APEX AMP. `--amp` defaults to native AMP as of timm ver 0.4.3. `--apex-amp` will force use of APEX components if they are installed. + +## Validation / Inference Scripts + +Validation and inference scripts are similar in usage. One outputs metrics on a validation set and the other outputs topk class ids in a csv. Specify the folder containing validation images, not the base as in training script. + +To validate with the model's pretrained weights (if they exist): + +`python validate.py /imagenet/validation/ --model seresnext26_32x4d --pretrained` + +To run inference from a checkpoint: + +`python inference.py /imagenet/validation/ --model mobilenetv3_large_100 --checkpoint ./output/train/model_best.pth.tar` \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/docs/training_hparam_examples.md b/testbed/huggingface__pytorch-image-models/docs/training_hparam_examples.md new file mode 100644 index 0000000000000000000000000000000000000000..c2afc2b1081079523138432771316f270b3cb065 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/docs/training_hparam_examples.md @@ -0,0 +1,47 @@ +# Training Examples + +## EfficientNet-B2 with RandAugment - 80.4 top-1, 95.1 top-5 +These params are for dual Titan RTX cards with NVIDIA Apex installed: + +`./distributed_train.sh 2 /imagenet/ --model efficientnet_b2 -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .016` + +## MixNet-XL with RandAugment - 80.5 top-1, 94.9 top-5 +This params are for dual Titan RTX cards with NVIDIA Apex installed: + +`./distributed_train.sh 2 /imagenet/ --model mixnet_xl -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .969 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.3 --amp --lr .016 --dist-bn reduce` + +## SE-ResNeXt-26-D and SE-ResNeXt-26-T +These hparams (or similar) work well for a wide range of ResNet architecture, generally a good idea to increase the epoch # as the model size increases... ie approx 180-200 for ResNe(X)t50, and 220+ for larger. Increase batch size and LR proportionally for better GPUs or with AMP enabled. These params were for 2 1080Ti cards: + +`./distributed_train.sh 2 /imagenet/ --model seresnext26t_32x4d --lr 0.1 --warmup-epochs 5 --epochs 160 --weight-decay 1e-4 --sched cosine --reprob 0.4 --remode pixel -b 112` + +## EfficientNet-B3 with RandAugment - 81.5 top-1, 95.7 top-5 +The training of this model started with the same command line as EfficientNet-B2 w/ RA above. After almost three weeks of training the process crashed. The results weren't looking amazing so I resumed the training several times with tweaks to a few params (increase RE prob, decrease rand-aug, increase ema-decay). Nothing looked great. I ended up averaging the best checkpoints from all restarts. The result is mediocre at default res/crop but oddly performs much better with a full image test crop of 1.0. + +## EfficientNet-B0 with RandAugment - 77.7 top-1, 95.3 top-5 +[Michael Klachko](https://github.com/michaelklachko) achieved these results with the command line for B2 adapted for larger batch size, with the recommended B0 dropout rate of 0.2. + +`./distributed_train.sh 2 /imagenet/ --model efficientnet_b0 -b 384 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .048` + +## ResNet50 with JSD loss and RandAugment (clean + 2x RA augs) - 79.04 top-1, 94.39 top-5 + +Trained on two older 1080Ti cards, this took a while. Only slightly, non statistically better ImageNet validation result than my first good AugMix training of 78.99. However, these weights are more robust on tests with ImageNetV2, ImageNet-Sketch, etc. Unlike my first AugMix runs, I've enabled SplitBatchNorm, disabled random erasing on the clean split, and cranked up random erasing prob on the 2 augmented paths. + +`./distributed_train.sh 2 /imagenet -b 64 --model resnet50 --sched cosine --epochs 200 --lr 0.05 --amp --remode pixel --reprob 0.6 --aug-splits 3 --aa rand-m9-mstd0.5-inc1 --resplit --split-bn --jsd --dist-bn reduce` + +## EfficientNet-ES (EdgeTPU-Small) with RandAugment - 78.066 top-1, 93.926 top-5 +Trained by [Andrew Lavin](https://github.com/andravin) with 8 V100 cards. Model EMA was not used, final checkpoint is the average of 8 best checkpoints during training. + +`./distributed_train.sh 8 /imagenet --model efficientnet_es -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-connect 0.2 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064` + +## MobileNetV3-Large-100 - 75.766 top-1, 92,542 top-5 + +`./distributed_train.sh 2 /imagenet/ --model mobilenetv3_large_100 -b 512 --sched step --epochs 600 --decay-epochs 2.4 --decay-rate .973 --opt rmsproptf --opt-eps .001 -j 7 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 --lr-noise 0.42 0.9` + + +## ResNeXt-50 32x4d w/ RandAugment - 79.762 top-1, 94.60 top-5 +These params will also work well for SE-ResNeXt-50 and SK-ResNeXt-50 and likely 101. I used them for the SK-ResNeXt-50 32x4d that I trained with 2 GPU using a slightly higher LR per effective batch size (lr=0.18, b=192 per GPU). The cmd line below are tuned for 8 GPU training. + + +`./distributed_train.sh 8 /imagenet --model resnext50_32x4d --lr 0.6 --warmup-epochs 5 --epochs 240 --weight-decay 1e-4 --sched cosine --reprob 0.4 --recount 3 --remode pixel --aa rand-m7-mstd0.5-inc1 -b 192 -j 6 --amp --dist-bn reduce` + diff --git a/testbed/huggingface__pytorch-image-models/hubconf.py b/testbed/huggingface__pytorch-image-models/hubconf.py new file mode 100644 index 0000000000000000000000000000000000000000..70fed79a27a254dbe5fc32a6ffb41f2fded08ef2 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/hubconf.py @@ -0,0 +1,4 @@ +dependencies = ['torch'] +from timm.models import registry + +globals().update(registry._model_entrypoints) diff --git a/testbed/huggingface__pytorch-image-models/mkdocs.yml b/testbed/huggingface__pytorch-image-models/mkdocs.yml new file mode 100644 index 0000000000000000000000000000000000000000..a72436c67dfac977c00b49130c737dd3d935f7da --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/mkdocs.yml @@ -0,0 +1,46 @@ +site_name: 'Pytorch Image Models' +site_description: 'Pretained Image Recognition Models' +repo_name: 'rwightman/pytorch-image-models' +repo_url: 'https://github.com/rwightman/pytorch-image-models' +nav: + - index.md + - models.md + - ... | models/*.md + - results.md + - scripts.md + - training_hparam_examples.md + - feature_extraction.md + - changes.md + - archived_changes.md +theme: + name: 'material' + feature: + tabs: false +extra_javascript: + - 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML' + - https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js + - javascripts/tables.js +markdown_extensions: + - codehilite: + linenums: true + - admonition + - pymdownx.arithmatex + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.critic + - pymdownx.details + - pymdownx.emoji: + emoji_generator: !!python/name:pymdownx.emoji.to_svg + - pymdownx.inlinehilite + - pymdownx.magiclink + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.superfences + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + - mdx_truly_sane_lists +plugins: + - search + - awesome-pages diff --git a/testbed/huggingface__pytorch-image-models/notebooks/EffResNetComparison.ipynb b/testbed/huggingface__pytorch-image-models/notebooks/EffResNetComparison.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9df4bf9e1f89e04f44ba64036780031033b2c825 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/notebooks/EffResNetComparison.ipynb @@ -0,0 +1,1478 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "EffResNetComparison", + "version": "0.3.2", + "provenance": [], + "collapsed_sections": [], + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7AUmKc2yMHz0", + "colab_type": "text" + }, + "source": [ + "# EfficientNets vs ResNets in PyTorch: On Why I Won't Be Tossing My ResNets\n", + "\n", + "First off, I want to be clear that I am not panning EfficientNets (https://arxiv.org/abs/1905.11946) here. They are unprecident in their parameter and FLOP efficiency. Thanks Mingxing Tan, Quoc V. Le, and the Google Brain team for releasing the code and weights.\n", + "\n", + "I dug into the EfficientNet paper the day it was released. I had recently implemented MobileNet-v3 and MNasNet architectures in PyTorch and EfficientNets have a lot in common with those models. After defining new model definitions strings, adding the depth scaling, and hacking together some weight porting code they were alive. \n", + "\n", + "First impressions were positive, \"Wow, that's some impressive accuracy for so few parameters (and such small checkpoints)''. After spending more time with the models, training them, running numerous validations, etc. some realities sank in. These models are less efficient in actual use than I'd expected. I started doing more detailed comparisons with familiar ResNet models and that's how this notebook came to be...\n", + "\n", + "## Objectives\n", + "A few points I'm hoping to illustrate in this notebook:\n", + "\n", + "1. The efficiencies of EfficientNets may not translate to better real-world performance on all frameworks and hardware platforms. Your trusty old ResNets may be just as good for your NN framework of choice running on an NVIDIA GPU. What consumes less resources in Tensorflow with an XLA optimized graph on a TPU, may end up being more resource hungry in PyTorch running with a CUDA backend.\n", + "\n", + "2. The story of a ResNet-50 does not end with a top-1 of 76.3% on ImageNet-1k. Neither do the other ResNe(X)t networks end with the results of the original papers or the pretrained weights of canonical Caffe, Tensorflow, or PyTorch implementations. Many papers compare shiny new architectures trained with recent techniques (or algorithmically searched hyper-parameters) to ResNet baselines that aren't given the same training effort. A ResNet-50 can be trained to well over 78% on ImageNet -- better than an 'original' ResNet-152 -- a 35M parameter difference! I've selected better pretrained models to compare against the EfficientNets. \n", + "\n", + "3. Most PyTorch implementations of EfficientNet that I'm aware of are using the Tensorflow ported weights, like my 'tf_efficientnet_b*' models. These ported weights requires explicit padding ops to match the behaviour of Tensorflow 'SAME' padding. This padding adds a runtime penalty (about 2% for forward) and a memory penalty (reducing max batch sizes by roughly 15-20%). I've natively trained the B0 through B2 models in PyTorch now, but haven't made progress on B3 and up (very slow to train).\n", + "\n", + "4. There are some nifty inference tricks, like test time pooling, that can breathe life into old models and allow them to be used outside of their standard resolutions without retraining. A few ResNets were run with TTP here at resolutions similar to the EffNet models as a comparison.\n", + "\n", + "## Considerations\n", + "\n", + "A few additional considerations:\n", + "* I'm only running the numbers on validation here to keep the Colab notebook sane. I have trained with all of the architectures, the relative differences in throughtput and memory usage/batch size limits fit my experience training as well.\n", + "\n", + "* This comparison is for PyTorch 1.0/1.1 with a CUDA backend. Future versions of PyTorch, CUDA, or the PyTorch XLA TPU backend may change things significantly. I'm hoping to compare these models with the PyTorch XLA impl at some point. Not sure if it's ready yet?\n", + "\n", + "* The analysis is for the ImageNet classification task. The extra resolution in all EfficientNet > b0 is arguably less beneficial for this task than say fine-grained classification, segmentation, object detection and other more interesting tasks. Since the input resolution is responsible for a large amount of the GPU memory use, and ResNets for those other tasks are also run at higher res, the comparisons made do highly depend on the task.\n", + "\n", + "## What's TIMM and where are the models?\n", + "\n", + "The `timm` module use here is a PyPi packaging of my PyTorch Image Models \n", + "- https://github.com/rwightman/pytorch-image-models\n", + "\n", + "Stand alone version of the EfficientNet, MobileNet-V3, MNasNet, etc can also be found at \n", + "- https://github.com/rwightman/gen-efficientnet-pytorch" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "0f8AXYsjtKs5", + "colab_type": "code", + "outputId": "c8a180e8-8b39-4905-aa46-f82c58b974a0", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 224 + } + }, + "source": [ + "# Install necessary modules\n", + "!pip install timm" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Collecting timm\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/1e/87/7de9e1175bda1151de177198bb2e99ac78cf0bdf97309b19f6d22b215b79/timm-0.1.6-py3-none-any.whl (83kB)\n", + "\u001b[K |████████████████████████████████| 92kB 28.0MB/s \n", + "\u001b[?25hRequirement already satisfied: torchvision in /usr/local/lib/python3.6/dist-packages (from timm) (0.3.0)\n", + "Requirement already satisfied: torch>=1.0 in /usr/local/lib/python3.6/dist-packages (from timm) (1.1.0)\n", + "Requirement already satisfied: pillow>=4.1.1 in /usr/local/lib/python3.6/dist-packages (from torchvision->timm) (4.3.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torchvision->timm) (1.16.4)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from torchvision->timm) (1.12.0)\n", + "Requirement already satisfied: olefile in /usr/local/lib/python3.6/dist-packages (from pillow>=4.1.1->torchvision->timm) (0.46)\n", + "Installing collected packages: timm\n", + "Successfully installed timm-0.1.6\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1qh-__YFuWrS", + "colab_type": "text" + }, + "source": [ + "" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "_GEzMzggMxBw", + "colab_type": "code", + "outputId": "183aad75-69aa-4e00-c1bc-06f5b40baecf", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 306 + } + }, + "source": [ + "# For our convenience, take a peek at what we're working with\n", + "!nvidia-smi" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Mon Jul 1 20:17:45 2019 \n", + "+-----------------------------------------------------------------------------+\n", + "| NVIDIA-SMI 418.67 Driver Version: 410.79 CUDA Version: 10.0 |\n", + "|-------------------------------+----------------------+----------------------+\n", + "| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n", + "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n", + "|===============================+======================+======================|\n", + "| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n", + "| N/A 44C P8 15W / 70W | 0MiB / 15079MiB | 0% Default |\n", + "+-------------------------------+----------------------+----------------------+\n", + " \n", + "+-----------------------------------------------------------------------------+\n", + "| Processes: GPU Memory |\n", + "| GPU PID Type Process name Usage |\n", + "|=============================================================================|\n", + "| No running processes found |\n", + "+-----------------------------------------------------------------------------+\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "_69zvVb7v4cw", + "colab_type": "code", + "outputId": "3ca2e609-6c50-47e2-823d-d0e9a07f985f", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 51 + } + }, + "source": [ + "# Import the core modules, check which GPU we end up with and scale batch size accordingly\n", + "import torch\n", + "\n", + "# Flipping this on/off will change the memory dynamics, since I usually\n", + "# validate and train with it on, will leave it on by default\n", + "torch.backends.cudnn.benchmark = True\n", + "\n", + "import timm\n", + "from timm.data import *\n", + "from timm.utils import *\n", + "\n", + "import pynvml\n", + "from collections import OrderedDict\n", + "import logging\n", + "import time\n", + "\n", + "def log_gpu_memory():\n", + " handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n", + " info = pynvml.nvmlDeviceGetMemoryInfo(handle)\n", + " info.free = round(info.free / 1024**2)\n", + " info.used = round(info.used / 1024**2)\n", + " logging.info('GPU memory free: {}, memory used: {}'.format(info.free, info.used))\n", + " return info.used\n", + "\n", + "def get_gpu_memory_total():\n", + " handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n", + " info = pynvml.nvmlDeviceGetMemoryInfo(handle)\n", + " info.total = round(info.total / 1024**2)\n", + " return info.total\n", + " \n", + "pynvml.nvmlInit()\n", + "setup_default_logging()\n", + "log_gpu_memory()\n", + "\n", + "total_gpu_mem = get_gpu_memory_total()\n", + "if total_gpu_mem > 12300:\n", + " logging.info('Running on a T4 GPU or other with > 12GB memory, setting batch size to 128')\n", + " batch_size = 128\n", + "else:\n", + " logging.info('Running on a K80 GPU or other with < 12GB memory, batch size set to 80')\n", + " batch_size = 80" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "GPU memory free: 15080, memory used: 0\n", + "Running on a T4 GPU or other with > 12GB memory, setting batch size to 128\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OVQORlCtNEkX", + "colab_type": "text" + }, + "source": [ + "# ImageNet-'V2' Validation\n", + "\n", + "If you're not aware, ImageNet-V2 (https://github.com/modestyachts/ImageNetV2) is a useful collection of 3 ImageNet-like validation sets that have been collected more recently, 10 years after the original ImageNet.\n", + "\n", + "Aside from being conveniently smaller and easier to deploy in a notebook, it's a useful test set to compare how models might generalize beyond the original ImageNet-1k data. We're going to use the 'Matched Frequency' version of the dataset. There is a markedly lower accuracy rate across the board for this test set. It's very interesting to see how different models fall relative to each other. I've included an analysis of those differences at the bottom.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "IfBJUXdPxa2C", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Download and extract the dataset (note it's not actually a gz like the file says)\n", + "if not os.path.exists('./imagenetv2-matched-frequency'):\n", + " !curl -s https://s3-us-west-2.amazonaws.com/imagenetv2public/imagenetv2-matched-frequency.tar.gz | tar x\n", + "dataset = Dataset('./imagenetv2-matched-frequency/')\n", + "for i in range(len(dataset)): # warmup\n", + " dummy = dataset[i]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "yPPC-A50wUji", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# A basic validation routine with timing and accuracy metrics\n", + "\n", + "def validate(model, loader):\n", + " batch_time = AverageMeter()\n", + " losses = AverageMeter()\n", + " top1 = AverageMeter()\n", + " top5 = AverageMeter()\n", + "\n", + " model.eval()\n", + " #torch.cuda.reset_max_memory_allocated()\n", + " #torch.cuda.reset_max_memory_cached()\n", + " gpu_used_baseline = log_gpu_memory()\n", + " gpu_used = 0\n", + " start = end = time.time()\n", + " num_batches = len(loader)\n", + " log_iter = round(0.25 * num_batches)\n", + " with torch.no_grad():\n", + " for i, (input, target) in enumerate(loader):\n", + " target = target.cuda()\n", + " input = input.cuda()\n", + "\n", + " output = model(input)\n", + "\n", + " prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n", + " top1.update(prec1.item(), input.size(0))\n", + " top5.update(prec5.item(), input.size(0))\n", + "\n", + " batch_time.update(time.time() - end)\n", + " end = time.time()\n", + "\n", + " if i and i % log_iter == 0:\n", + " if gpu_used == 0:\n", + " gpu_used = log_gpu_memory()\n", + " logging.info(\n", + " 'Test: [{0:>4d}/{1}] '\n", + " 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n", + " 'Rate: {rate_avg:.3f} img/sec '\n", + " 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '\n", + " 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(\n", + " i, len(loader), batch_time=batch_time,\n", + " rate_avg=input.size(0) / batch_time.avg,\n", + " loss=losses, top1=top1, top5=top5))\n", + " gpu_used = gpu_used - gpu_used_baseline\n", + " # These measures are less consistent than method being used wrt\n", + " # where the batch sizes can be pushed for each model\n", + " #gpu_used = torch.cuda.max_memory_allocated()\n", + " #gpu_cached = torch.cuda.max_memory_cached()\n", + " elapsed = time.time() - start\n", + " results = OrderedDict(\n", + " top1=round(top1.avg, 3), top1_err=round(100 - top1.avg, 3),\n", + " top5=round(top5.avg, 3), top5_err=round(100 - top5.avg, 3),\n", + " rate=len(loader.dataset) / elapsed, gpu_used=gpu_used,\n", + " )\n", + "\n", + " logging.info(' * Prec@1 {:.3f} ({:.3f}) Prec@5 {:.3f} ({:.3f}) Rate {:.3f}'.format(\n", + " results['top1'], results['top1_err'], results['top5'],\n", + " results['top5_err'], results['rate']))\n", + "\n", + " return results\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9hj8cy16Wnju", + "colab_type": "text" + }, + "source": [ + "# Model Selection\n", + "\n", + "As per the intro, one of the goals here is to compare EfficientNets with a more capable set of baseline models. I've gone through the various models included in my collection and selected several that I feel are more appropriate matches based on their Top-1 scores from much better training setups than originals.\n", + "\n", + "Here we will split them into 4 lists for analysis and charting:\n", + "* EfficientNet models with natively trained PyTorch weights and no padding hacks\n", + "* EfficientNet models with weights ported from Tensorflow and SAME padding hack\n", + "* ResNe(X)t (or DPN) models at 224x224 native resoultion with weights from myself, Gluon model zoo, or Facebook Instagram trained models\n", + "* ResNe(X)t models at non-native resolutions with Test Time Pooling enabled\n", + "\n", + "Note: I realize it's not entirely fair to include the IG ResNext model since it's not technically trained purely on ImageNet like the others. But, it's a truly impressive model, and actually quite a bit easier to work with in PyTorch than even the B4 EfficientNet." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "DCQg0hky5lVm", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Define the models and arguments that will be used for comparisons\n", + "\n", + "# include original ImageNet-1k validation results for comparison against ImageNet-V2 here\n", + "orig_top1 = dict(\n", + " efficientnet_b0=76.912,\n", + " efficientnet_b1=78.692,\n", + " efficientnet_b2=79.760,\n", + " tf_efficientnet_b1=78.554,\n", + " tf_efficientnet_b2=79.606,\n", + " tf_efficientnet_b3=80.874,\n", + " tf_efficientnet_b4=82.604,\n", + " dpn68b=77.514,\n", + " seresnext26_32x4d=77.104,\n", + " resnet50=78.486,\n", + " gluon_seresnext50_32x4d=79.912,\n", + " gluon_seresnext101_32x4d=80.902,\n", + " ig_resnext101_32x8d=82.688,\n", + ")\n", + "\n", + "models_effnet = [\n", + " dict(model_name='efficientnet_b0'),\n", + " dict(model_name='efficientnet_b1'),\n", + " dict(model_name='efficientnet_b2'),\n", + "]\n", + "\n", + "models_effnet_tf = [\n", + " dict(model_name='tf_efficientnet_b2'), # overlapping between TF non-TF for comparison\n", + " dict(model_name='tf_efficientnet_b3'),\n", + " dict(model_name='tf_efficientnet_b4'),\n", + "]\n", + "\n", + "models_resnet = [\n", + " dict(model_name='dpn68b'), # b0, yes, not a ResNet, need to find a better b0 comparison\n", + " #dict(model_name='seresnext26_32x4d'), # b0, not the best b0 comparison either, a little slow\n", + " dict(model_name='resnet50'), # b1\n", + " dict(model_name='gluon_seresnext50_32x4d'), # b2-b3\n", + " dict(model_name='gluon_seresnext101_32x4d'), # b3\n", + " dict(model_name='ig_resnext101_32x8d'), # b4\n", + "]\n", + "\n", + "models_resnet_ttp = [\n", + " dict(model_name='resnet50', input_size=(3, 240, 240), ttp=True),\n", + " dict(model_name='resnet50', input_size=(3, 260, 260), ttp=True),\n", + " dict(model_name='gluon_seresnext50_32x4d', input_size=(3, 260, 260), ttp=True),\n", + " dict(model_name='gluon_seresnext50_32x4d', input_size=(3, 300, 300), ttp=True),\n", + " dict(model_name='gluon_seresnext101_32x4d', input_size=(3, 260, 260), ttp=True),\n", + " dict(model_name='gluon_seresnext101_32x4d', input_size=(3, 300, 300), ttp=True),\n", + " dict(model_name='ig_resnext101_32x8d', input_size=(3, 300, 300), ttp=True),\n", + "]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PPloo-oE545b", + "colab_type": "text" + }, + "source": [ + "# Model Runner\n", + "\n", + "The runner creates each model, a matching data loader, and runs the validation. It uses several features of my image collection module for this.\n", + "\n", + "Test time pooling is enabled here if requested in the model_args. The pooling is implemented as a module the wraps the base network. It's important to set the crop factor for the images to 1.0 when enabling pooling." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BX_CKBnM8XNO", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from timm.models import TestTimePoolHead\n", + "\n", + "def model_runner(model_args):\n", + " model_name = model_args['model_name']\n", + " pretrained = True\n", + " checkpoint_path = ''\n", + " if 'model_url' in model_args and model_args['model_url']:\n", + " !wget -q {model_args['model_url']}\n", + " checkpoint_path = './' + os.path.basename(model_args['model_url'])\n", + " logging.info('Downloaded checkpoint {} from specified URL'.format(checkpoint_path))\n", + " pretrained = False\n", + " \n", + " model = timm.create_model(\n", + " model_name,\n", + " num_classes=1000,\n", + " in_chans=3,\n", + " pretrained=pretrained,\n", + " checkpoint_path=checkpoint_path)\n", + "\n", + " data_config = timm.data.resolve_data_config(model_args, model=model, verbose=True)\n", + " \n", + " ttp = False\n", + " if 'ttp' in model_args and model_args['ttp']:\n", + " ttp = True\n", + " logging.info('Applying test time pooling to model')\n", + " model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size'])\n", + " \n", + " model_key = [model_name, str(data_config['input_size'][-1])]\n", + " if ttp:\n", + " model_key += ['ttp']\n", + " model_key = '-'.join(model_key)\n", + " param_count = sum([m.numel() for m in model.parameters()])\n", + " logging.info('Model {} created, param count: {}. Running...'.format(model_key, param_count))\n", + "\n", + " model = model.cuda()\n", + "\n", + " loader = create_loader(\n", + " dataset,\n", + " input_size=data_config['input_size'],\n", + " batch_size=batch_size,\n", + " use_prefetcher=True,\n", + " interpolation='bicubic',\n", + " mean=data_config['mean'],\n", + " std=data_config['std'],\n", + " crop_pct=1.0 if ttp else data_config['crop_pct'],\n", + " num_workers=2)\n", + "\n", + " result = validate(model, loader)\n", + " \n", + " logging.info('Model {} done.\\n'.format(model_key))\n", + " result['param_count'] = param_count / 1e6\n", + " # add extra non-metric keys for comparisons \n", + " result['model_name'] = model_name\n", + " result['input_size'] = data_config['input_size']\n", + " result['ttp'] = ttp\n", + "\n", + " del model\n", + " del loader\n", + " torch.cuda.empty_cache()\n", + " \n", + " return model_key, result" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "xx-j8Z-z_EGo", + "colab_type": "code", + "outputId": "8c6571b5-131e-419d-b9e6-2366a45cda8e", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + } + }, + "source": [ + "# Run validation on all the models, get a coffee (or two)\n", + "results_effnet = {}\n", + "results_effnet_tf = {}\n", + "results_resnet = {}\n", + "results_resnet_ttp = {}\n", + "\n", + "logging.info('Running validation on native PyTorch EfficientNet models')\n", + "for ma in models_effnet:\n", + " mk, mr = model_runner(ma)\n", + " results_effnet[mk] = mr\n", + " \n", + "logging.info('Running validation on ported Tensorflow EfficientNet models')\n", + "for ma in models_effnet_tf:\n", + " mk, mr = model_runner(ma)\n", + " results_effnet_tf[mk] = mr\n", + " \n", + "logging.info('Running validation on ResNe(X)t models')\n", + "for ma in models_resnet:\n", + " mk, mr = model_runner(ma)\n", + " results_resnet[mk] = mr\n", + " \n", + "logging.info('Running validation on ResNe(X)t models w/ Test Time Pooling enabled')\n", + "for ma in models_resnet_ttp:\n", + " mk, mr = model_runner(ma)\n", + " results_resnet_ttp[mk] = mr\n", + " \n", + "results = {**results_effnet, **results_effnet_tf, **results_resnet, **results_resnet_ttp}" + ], + "execution_count": 8, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Running validation on native PyTorch EfficientNet models\n", + "Downloading: \"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0-d6904d92.pth\" to /root/.cache/torch/checkpoints/efficientnet_b0-d6904d92.pth\n", + "100%|██████████| 21376958/21376958 [00:02<00:00, 8676444.76it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 224, 224)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Model efficientnet_b0-224 created, param count: 5288548. Running...\n", + "GPU memory free: 14276, memory used: 804\n", + "GPU memory free: 11346, memory used: 3734\n", + "Test: [ 20/79] Time: 0.190 (0.805) Rate: 159.098 img/sec Prec@1: 64.8438 (69.6801) Prec@5: 87.5000 (88.9509)\n", + "Test: [ 40/79] Time: 0.194 (0.800) Rate: 159.972 img/sec Prec@1: 51.5625 (68.8072) Prec@5: 79.6875 (88.5671)\n", + "Test: [ 60/79] Time: 0.186 (0.790) Rate: 162.028 img/sec Prec@1: 60.9375 (66.1501) Prec@5: 83.5938 (86.6035)\n", + " * Prec@1 64.580 (35.420) Prec@5 85.890 (14.110) Rate 165.732\n", + "Model efficientnet_b0-224 done.\n", + "\n", + "Downloading: \"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth\" to /root/.cache/torch/checkpoints/efficientnet_b1-533bc792.pth\n", + "100%|██████████| 31502706/31502706 [00:03<00:00, 9936470.52it/s] \n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 240, 240)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.882\n", + "Model efficientnet_b1-240 created, param count: 7794184. Running...\n", + "GPU memory free: 14260, memory used: 820\n", + "GPU memory free: 10890, memory used: 4190\n", + "Test: [ 20/79] Time: 0.311 (0.919) Rate: 139.286 img/sec Prec@1: 69.5312 (73.9583) Prec@5: 86.7188 (90.7366)\n", + "Test: [ 40/79] Time: 0.310 (0.878) Rate: 145.851 img/sec Prec@1: 58.5938 (72.1799) Prec@5: 81.2500 (89.9200)\n", + "Test: [ 60/79] Time: 0.312 (0.867) Rate: 147.679 img/sec Prec@1: 67.1875 (69.0958) Prec@5: 81.2500 (87.9867)\n", + " * Prec@1 67.550 (32.450) Prec@5 87.290 (12.710) Rate 151.628\n", + "Model efficientnet_b1-240 done.\n", + "\n", + "Downloading: \"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2-cf78dc4d.pth\" to /root/.cache/torch/checkpoints/efficientnet_b2-cf78dc4d.pth\n", + "100%|██████████| 36788101/36788101 [00:03<00:00, 11752398.17it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 260, 260)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.89\n", + "Model efficientnet_b2-260 created, param count: 9109994. Running...\n", + "GPU memory free: 14258, memory used: 822\n", + "GPU memory free: 10266, memory used: 4814\n", + "Test: [ 20/79] Time: 0.416 (0.941) Rate: 136.036 img/sec Prec@1: 68.7500 (72.9539) Prec@5: 88.2812 (91.0714)\n", + "Test: [ 40/79] Time: 0.429 (0.914) Rate: 140.068 img/sec Prec@1: 58.5938 (71.9893) Prec@5: 82.0312 (90.4535)\n", + "Test: [ 60/79] Time: 0.527 (0.894) Rate: 143.120 img/sec Prec@1: 64.0625 (69.3904) Prec@5: 85.9375 (88.8960)\n", + " * Prec@1 67.800 (32.200) Prec@5 88.200 (11.800) Rate 144.201\n", + "Model efficientnet_b2-260 done.\n", + "\n", + "Running validation on ported Tensorflow EfficientNet models\n", + "Downloading: \"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth\" to /root/.cache/torch/checkpoints/tf_efficientnet_b2-e393ef04.pth\n", + "100%|██████████| 36797929/36797929 [00:03<00:00, 11014399.83it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 260, 260)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.89\n", + "Model tf_efficientnet_b2-260 created, param count: 9109994. Running...\n", + "GPU memory free: 14258, memory used: 822\n", + "GPU memory free: 9568, memory used: 5512\n", + "Test: [ 20/79] Time: 1.217 (0.960) Rate: 133.306 img/sec Prec@1: 66.4062 (72.7679) Prec@5: 87.5000 (90.4018)\n", + "Test: [ 40/79] Time: 0.522 (0.917) Rate: 139.645 img/sec Prec@1: 58.5938 (71.3986) Prec@5: 79.6875 (89.7675)\n", + "Test: [ 60/79] Time: 0.939 (0.908) Rate: 141.046 img/sec Prec@1: 64.8438 (68.9037) Prec@5: 85.1562 (88.2172)\n", + " * Prec@1 67.400 (32.600) Prec@5 87.580 (12.420) Rate 142.727\n", + "Model tf_efficientnet_b2-260 done.\n", + "\n", + "Downloading: \"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth\" to /root/.cache/torch/checkpoints/tf_efficientnet_b3-e3bd6955.pth\n", + "100%|██████████| 49381362/49381362 [00:03<00:00, 12584590.15it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 300, 300)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.904\n", + "Model tf_efficientnet_b3-300 created, param count: 12233232. Running...\n", + "GPU memory free: 14242, memory used: 838\n", + "GPU memory free: 5604, memory used: 9476\n", + "Test: [ 20/79] Time: 1.267 (1.161) Rate: 110.269 img/sec Prec@1: 66.4062 (73.8467) Prec@5: 90.6250 (91.6667)\n", + "Test: [ 40/79] Time: 0.833 (1.097) Rate: 116.649 img/sec Prec@1: 60.9375 (72.8087) Prec@5: 83.5938 (90.7393)\n", + "Test: [ 60/79] Time: 1.242 (1.082) Rate: 118.310 img/sec Prec@1: 67.1875 (70.1588) Prec@5: 84.3750 (89.1522)\n", + " * Prec@1 68.520 (31.480) Prec@5 88.700 (11.300) Rate 119.134\n", + "Model tf_efficientnet_b3-300 done.\n", + "\n", + "Downloading: \"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth\" to /root/.cache/torch/checkpoints/tf_efficientnet_b4-74ee3bed.pth\n", + "100%|██████████| 77989689/77989689 [00:06<00:00, 12751872.12it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 380, 380)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.922\n", + "Model tf_efficientnet_b4-380 created, param count: 19341616. Running...\n", + "GPU memory free: 14214, memory used: 866\n", + "GPU memory free: 2460, memory used: 12620\n", + "Test: [ 20/79] Time: 1.761 (2.057) Rate: 62.222 img/sec Prec@1: 69.5312 (76.4509) Prec@5: 91.4062 (92.6339)\n", + "Test: [ 40/79] Time: 1.740 (1.914) Rate: 66.889 img/sec Prec@1: 64.8438 (75.4954) Prec@5: 83.5938 (92.2637)\n", + "Test: [ 60/79] Time: 1.782 (1.866) Rate: 68.600 img/sec Prec@1: 71.0938 (72.8740) Prec@5: 85.1562 (90.6634)\n", + " * Prec@1 71.340 (28.660) Prec@5 90.110 (9.890) Rate 69.103\n", + "Model tf_efficientnet_b4-380 done.\n", + "\n", + "Running validation on ResNe(X)t models\n", + "Downloading: \"https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn68b_extra-84854c156.pth\" to /root/.cache/torch/checkpoints/dpn68b_extra-84854c156.pth\n", + "100%|██████████| 50765517/50765517 [00:04<00:00, 12271223.44it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 224, 224)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.48627450980392156, 0.4588235294117647, 0.40784313725490196)\n", + "\tstd: (0.23482446870963955, 0.23482446870963955, 0.23482446870963955)\n", + "\tcrop_pct: 0.875\n", + "Model dpn68b-224 created, param count: 12611602. Running...\n", + "GPU memory free: 14240, memory used: 840\n", + "GPU memory free: 11342, memory used: 3738\n", + "Test: [ 20/79] Time: 0.442 (0.876) Rate: 146.176 img/sec Prec@1: 54.6875 (70.2381) Prec@5: 85.9375 (88.9509)\n", + "Test: [ 40/79] Time: 1.007 (0.847) Rate: 151.177 img/sec Prec@1: 57.8125 (69.5122) Prec@5: 78.9062 (88.4337)\n", + "Test: [ 60/79] Time: 1.015 (0.834) Rate: 153.556 img/sec Prec@1: 60.1562 (66.8033) Prec@5: 78.9062 (86.5907)\n", + " * Prec@1 65.600 (34.400) Prec@5 85.940 (14.060) Rate 155.150\n", + "Model dpn68b-224 done.\n", + "\n", + "Downloading: \"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/rw_resnet50-86acaeed.pth\" to /root/.cache/torch/checkpoints/rw_resnet50-86acaeed.pth\n", + "100%|██████████| 102488165/102488165 [00:07<00:00, 13755311.81it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 224, 224)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Model resnet50-224 created, param count: 25557032. Running...\n", + "GPU memory free: 14182, memory used: 898\n", + "GPU memory free: 12652, memory used: 2428\n", + "Test: [ 20/79] Time: 0.406 (0.859) Rate: 149.042 img/sec Prec@1: 66.4062 (72.6562) Prec@5: 90.6250 (90.4762)\n", + "Test: [ 40/79] Time: 0.662 (0.820) Rate: 156.156 img/sec Prec@1: 58.5938 (71.1128) Prec@5: 85.9375 (89.5960)\n", + "Test: [ 60/79] Time: 0.601 (0.807) Rate: 158.594 img/sec Prec@1: 61.7188 (68.3017) Prec@5: 82.0312 (87.7946)\n", + " * Prec@1 66.810 (33.190) Prec@5 87.000 (13.000) Rate 159.510\n", + "Model resnet50-224 done.\n", + "\n", + "Downloading: \"https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth\" to /root/.cache/torch/checkpoints/gluon_seresnext50_32x4d-90cf2d6e.pth\n", + "100%|██████████| 110578827/110578827 [00:08<00:00, 12788555.61it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 224, 224)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Model gluon_seresnext50_32x4d-224 created, param count: 27559896. Running...\n", + "GPU memory free: 14180, memory used: 900\n", + "GPU memory free: 12510, memory used: 2570\n", + "Test: [ 20/79] Time: 1.013 (0.875) Rate: 146.238 img/sec Prec@1: 70.3125 (74.2188) Prec@5: 88.2812 (91.0714)\n", + "Test: [ 40/79] Time: 1.197 (0.859) Rate: 149.059 img/sec Prec@1: 60.9375 (72.8849) Prec@5: 82.8125 (90.4345)\n", + "Test: [ 60/79] Time: 1.185 (0.859) Rate: 148.930 img/sec Prec@1: 64.8438 (70.0307) Prec@5: 84.3750 (88.8064)\n", + " * Prec@1 68.670 (31.330) Prec@5 88.320 (11.680) Rate 150.435\n", + "Model gluon_seresnext50_32x4d-224 done.\n", + "\n", + "Downloading: \"https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth\" to /root/.cache/torch/checkpoints/gluon_seresnext101_32x4d-cf52900d.pth\n", + "100%|██████████| 196505510/196505510 [00:12<00:00, 16164511.02it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 224, 224)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Model gluon_seresnext101_32x4d-224 created, param count: 48955416. Running...\n", + "GPU memory free: 14086, memory used: 994\n", + "GPU memory free: 12272, memory used: 2808\n", + "Test: [ 20/79] Time: 0.897 (1.016) Rate: 125.932 img/sec Prec@1: 72.6562 (75.5580) Prec@5: 88.2812 (91.6667)\n", + "Test: [ 40/79] Time: 0.899 (0.997) Rate: 128.324 img/sec Prec@1: 64.8438 (74.4284) Prec@5: 83.5938 (91.2538)\n", + "Test: [ 60/79] Time: 0.867 (0.986) Rate: 129.853 img/sec Prec@1: 67.1875 (71.7597) Prec@5: 89.0625 (89.6644)\n", + " * Prec@1 70.010 (29.990) Prec@5 88.910 (11.090) Rate 131.572\n", + "Model gluon_seresnext101_32x4d-224 done.\n", + "\n", + "Downloading: \"https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth\" to /root/.cache/torch/checkpoints/ig_resnext101_32x8-c38310e5.pth\n", + "100%|██████████| 356056638/356056638 [00:11<00:00, 31320647.42it/s]\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 224, 224)\n", + "\tinterpolation: bilinear\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Model ig_resnext101_32x8d-224 created, param count: 88791336. Running...\n", + "GPU memory free: 13946, memory used: 1134\n", + "GPU memory free: 10564, memory used: 4516\n", + "Test: [ 20/79] Time: 1.560 (1.664) Rate: 76.934 img/sec Prec@1: 76.5625 (78.9807) Prec@5: 93.7500 (94.2708)\n", + "Test: [ 40/79] Time: 1.450 (1.582) Rate: 80.907 img/sec Prec@1: 66.4062 (77.9535) Prec@5: 88.2812 (93.7881)\n", + "Test: [ 60/79] Time: 1.470 (1.540) Rate: 83.129 img/sec Prec@1: 74.2188 (75.0256) Prec@5: 91.4062 (92.6358)\n", + " * Prec@1 73.830 (26.170) Prec@5 92.280 (7.720) Rate 83.352\n", + "Model ig_resnext101_32x8d-224 done.\n", + "\n", + "Running validation on ResNe(X)t models w/ Test Time Pooling enabled\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 240, 240)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Applying test time pooling to model\n", + "Model resnet50-240-ttp created, param count: 25557032. Running...\n", + "GPU memory free: 14182, memory used: 898\n", + "GPU memory free: 12098, memory used: 2982\n", + "Test: [ 20/79] Time: 0.429 (0.892) Rate: 143.505 img/sec Prec@1: 67.1875 (72.7679) Prec@5: 89.0625 (90.3274)\n", + "Test: [ 40/79] Time: 0.757 (0.845) Rate: 151.416 img/sec Prec@1: 55.4688 (71.1128) Prec@5: 84.3750 (89.5198)\n", + "Test: [ 60/79] Time: 1.154 (0.831) Rate: 154.108 img/sec Prec@1: 61.7188 (68.4170) Prec@5: 82.8125 (87.6537)\n", + " * Prec@1 67.020 (32.980) Prec@5 87.040 (12.960) Rate 154.346\n", + "Model resnet50-240-ttp done.\n", + "\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 260, 260)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Applying test time pooling to model\n", + "Model resnet50-260-ttp created, param count: 25557032. Running...\n", + "GPU memory free: 14182, memory used: 898\n", + "GPU memory free: 11650, memory used: 3430\n", + "Test: [ 20/79] Time: 1.172 (1.097) Rate: 116.650 img/sec Prec@1: 68.7500 (72.9911) Prec@5: 87.5000 (90.5134)\n", + "Test: [ 40/79] Time: 0.902 (0.976) Rate: 131.211 img/sec Prec@1: 57.8125 (72.0084) Prec@5: 82.8125 (89.9581)\n", + "Test: [ 60/79] Time: 0.832 (0.940) Rate: 136.223 img/sec Prec@1: 60.1562 (69.2751) Prec@5: 85.9375 (88.2684)\n", + " * Prec@1 67.630 (32.370) Prec@5 87.630 (12.370) Rate 135.915\n", + "Model resnet50-260-ttp done.\n", + "\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 260, 260)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Applying test time pooling to model\n", + "Model gluon_seresnext50_32x4d-260-ttp created, param count: 27559896. Running...\n", + "GPU memory free: 14180, memory used: 900\n", + "GPU memory free: 11594, memory used: 3486\n", + "Test: [ 20/79] Time: 1.229 (1.147) Rate: 111.577 img/sec Prec@1: 71.8750 (74.4420) Prec@5: 86.7188 (91.2946)\n", + "Test: [ 40/79] Time: 1.056 (1.053) Rate: 121.593 img/sec Prec@1: 62.5000 (73.8567) Prec@5: 85.1562 (90.6822)\n", + "Test: [ 60/79] Time: 1.133 (1.015) Rate: 126.067 img/sec Prec@1: 68.7500 (71.1194) Prec@5: 86.7188 (89.0625)\n", + " * Prec@1 69.670 (30.330) Prec@5 88.620 (11.380) Rate 126.519\n", + "Model gluon_seresnext50_32x4d-260-ttp done.\n", + "\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 300, 300)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Applying test time pooling to model\n", + "Model gluon_seresnext50_32x4d-300-ttp created, param count: 27559896. Running...\n", + "GPU memory free: 14180, memory used: 900\n", + "GPU memory free: 10880, memory used: 4200\n", + "Test: [ 20/79] Time: 1.041 (1.484) Rate: 86.250 img/sec Prec@1: 71.8750 (76.3021) Prec@5: 89.0625 (91.9271)\n", + "Test: [ 40/79] Time: 1.037 (1.287) Rate: 99.457 img/sec Prec@1: 64.0625 (75.0572) Prec@5: 86.7188 (91.3300)\n", + "Test: [ 60/79] Time: 1.064 (1.216) Rate: 105.295 img/sec Prec@1: 71.0938 (72.1952) Prec@5: 88.2812 (89.7285)\n", + " * Prec@1 70.470 (29.530) Prec@5 89.180 (10.820) Rate 104.694\n", + "Model gluon_seresnext50_32x4d-300-ttp done.\n", + "\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 260, 260)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Applying test time pooling to model\n", + "Model gluon_seresnext101_32x4d-260-ttp created, param count: 48955416. Running...\n", + "GPU memory free: 14086, memory used: 994\n", + "GPU memory free: 11634, memory used: 3446\n", + "Test: [ 20/79] Time: 1.307 (1.413) Rate: 90.559 img/sec Prec@1: 71.8750 (76.3393) Prec@5: 89.0625 (92.0387)\n", + "Test: [ 40/79] Time: 1.307 (1.362) Rate: 93.981 img/sec Prec@1: 61.7188 (75.6479) Prec@5: 82.0312 (91.8826)\n", + "Test: [ 60/79] Time: 1.303 (1.343) Rate: 95.329 img/sec Prec@1: 74.2188 (72.8868) Prec@5: 87.5000 (90.1895)\n", + " * Prec@1 71.140 (28.860) Prec@5 89.470 (10.530) Rate 95.842\n", + "Model gluon_seresnext101_32x4d-260-ttp done.\n", + "\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 300, 300)\n", + "\tinterpolation: bicubic\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Applying test time pooling to model\n", + "Model gluon_seresnext101_32x4d-300-ttp created, param count: 48955416. Running...\n", + "GPU memory free: 14086, memory used: 994\n", + "GPU memory free: 10834, memory used: 4246\n", + "Test: [ 20/79] Time: 1.691 (1.786) Rate: 71.683 img/sec Prec@1: 71.8750 (77.5298) Prec@5: 91.4062 (93.1176)\n", + "Test: [ 40/79] Time: 1.669 (1.732) Rate: 73.888 img/sec Prec@1: 63.2812 (76.2767) Prec@5: 85.1562 (92.5877)\n", + "Test: [ 60/79] Time: 1.693 (1.715) Rate: 74.635 img/sec Prec@1: 75.0000 (73.7193) Prec@5: 92.1875 (90.9964)\n", + " * Prec@1 71.990 (28.010) Prec@5 90.100 (9.900) Rate 74.874\n", + "Model gluon_seresnext101_32x4d-300-ttp done.\n", + "\n", + "Data processing configuration for current model + dataset:\n", + "\tinput_size: (3, 300, 300)\n", + "\tinterpolation: bilinear\n", + "\tmean: (0.485, 0.456, 0.406)\n", + "\tstd: (0.229, 0.224, 0.225)\n", + "\tcrop_pct: 0.875\n", + "Applying test time pooling to model\n", + "Model ig_resnext101_32x8d-300-ttp created, param count: 88791336. Running...\n", + "GPU memory free: 13946, memory used: 1134\n", + "GPU memory free: 9288, memory used: 5792\n", + "Test: [ 20/79] Time: 2.850 (3.122) Rate: 41.006 img/sec Prec@1: 75.0000 (79.3155) Prec@5: 93.7500 (94.8661)\n", + "Test: [ 40/79] Time: 2.855 (2.989) Rate: 42.826 img/sec Prec@1: 64.8438 (78.6966) Prec@5: 87.5000 (94.3979)\n", + "Test: [ 60/79] Time: 2.856 (2.945) Rate: 43.463 img/sec Prec@1: 74.2188 (76.2295) Prec@5: 89.0625 (93.0456)\n", + " * Prec@1 75.170 (24.830) Prec@5 92.660 (7.340) Rate 43.622\n", + "Model ig_resnext101_32x8d-300-ttp done.\n", + "\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "URXdMbNaOYtq", + "colab_type": "text" + }, + "source": [ + "# Results\n", + "\n", + "We're going walk through the results and look at several things:\n", + "\n", + "1. A look at the Top-1 accuracy % across all the models\n", + "2. Parameter efficiency\n", + "3. Model throughput (images/sec)\n", + "4. (Practical) GPU memory usage in PyTorch\n", + "5. A comparison of model-model pairings\n", + "6. ImageNet-V2 generalization" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MvVqWbobe9Jo", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Setup common charting variables\n", + "import numpy as np\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "matplotlib.rcParams['figure.figsize'] = [16, 10]\n", + "\n", + "def annotate(ax, xv, yv, names, xo=0., yo=0., align='left'):\n", + " for i, (x, y) in enumerate(zip(xv, yv)):\n", + " ax1.text(x + xo, y + yo, names[i], fontsize=9, ha=align)\n", + "\n", + "names_all = list(results.keys())\n", + "names_effnet = list(results_effnet.keys())\n", + "names_effnet_tf = list(results_effnet_tf.keys())\n", + "names_resnet = list(results_resnet.keys())\n", + "names_resnet_ttp = list(results_resnet_ttp.keys())\n", + "\n", + "acc_all = np.array([results[m]['top1'] for m in names_all])\n", + "acc_effnet = np.array([results[m]['top1'] for m in names_effnet])\n", + "acc_effnet_tf = np.array([results[m]['top1'] for m in names_effnet_tf])\n", + "acc_resnet = np.array([results[m]['top1'] for m in names_resnet])\n", + "acc_resnet_ttp = np.array([results[m]['top1'] for m in names_resnet_ttp])" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P9vtQbVa48kW", + "colab_type": "text" + }, + "source": [ + "# Top-1 accuracy\n", + "\n", + "We'll start by ranking the models by Top-1 accuracy on the ImageNet-V2 validation set. \n", + "\n", + "You'll notice that a well trained\n", + "* ResNet-50 is holding it's own against an EfficientNet-B1, much closer to that than the B0 it's paired with in the paper\n", + "* SE-ResNeXt50-32x4d can best the B2 and B3\n", + "* SE-ResNeXt101-32x4d is very close to the B4.\n", + "\n", + "The ResNeXt101-32x8d pretrained on Facebook's Instagram is in a class of it's own. Somewhat unfairly since pretrained on a larger dataset. However, since it generalizes better than any model I've seen to this dataset (see bottom) and runs faster with less memory overehead than the EfficientNet-B4 (despite it's 88M parameters), I've included it." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MjM-eMtSalDS", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 340 + }, + "outputId": "3bdd1164-4395-47f8-d090-f4c5868027c6" + }, + "source": [ + "print('Results by top-1 accuracy:')\n", + "results_by_top1 = list(sorted(results.keys(), key=lambda x: results[x]['top1'], reverse=True))\n", + "for m in results_by_top1:\n", + " print(' Model: {:34}, Top-1 {:4.2f}, Top-5 {:4.2f}, Rate: {:4.2f}'.format(m, results[m]['top1'], results[m]['top5'], results[m]['rate']))" + ], + "execution_count": 10, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Results by top-1 accuracy:\n", + " Model: ig_resnext101_32x8d-300-ttp , Top-1 75.17, Top-5 92.66, Rate: 43.62\n", + " Model: ig_resnext101_32x8d-224 , Top-1 73.83, Top-5 92.28, Rate: 83.35\n", + " Model: gluon_seresnext101_32x4d-300-ttp , Top-1 71.99, Top-5 90.10, Rate: 74.87\n", + " Model: tf_efficientnet_b4-380 , Top-1 71.34, Top-5 90.11, Rate: 69.10\n", + " Model: gluon_seresnext101_32x4d-260-ttp , Top-1 71.14, Top-5 89.47, Rate: 95.84\n", + " Model: gluon_seresnext50_32x4d-300-ttp , Top-1 70.47, Top-5 89.18, Rate: 104.69\n", + " Model: gluon_seresnext101_32x4d-224 , Top-1 70.01, Top-5 88.91, Rate: 131.57\n", + " Model: gluon_seresnext50_32x4d-260-ttp , Top-1 69.67, Top-5 88.62, Rate: 126.52\n", + " Model: gluon_seresnext50_32x4d-224 , Top-1 68.67, Top-5 88.32, Rate: 150.43\n", + " Model: tf_efficientnet_b3-300 , Top-1 68.52, Top-5 88.70, Rate: 119.13\n", + " Model: efficientnet_b2-260 , Top-1 67.80, Top-5 88.20, Rate: 144.20\n", + " Model: resnet50-260-ttp , Top-1 67.63, Top-5 87.63, Rate: 135.92\n", + " Model: efficientnet_b1-240 , Top-1 67.55, Top-5 87.29, Rate: 151.63\n", + " Model: tf_efficientnet_b2-260 , Top-1 67.40, Top-5 87.58, Rate: 142.73\n", + " Model: resnet50-240-ttp , Top-1 67.02, Top-5 87.04, Rate: 154.35\n", + " Model: resnet50-224 , Top-1 66.81, Top-5 87.00, Rate: 159.51\n", + " Model: dpn68b-224 , Top-1 65.60, Top-5 85.94, Rate: 155.15\n", + " Model: efficientnet_b0-224 , Top-1 64.58, Top-5 85.89, Rate: 165.73\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ENtozBUwwdO-", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 745 + }, + "outputId": "c0834583-d1f3-4976-9c7b-c54ef4520e79" + }, + "source": [ + "sort_ix = np.argsort(acc_all)\n", + "acc_sorted = acc_all[sort_ix]\n", + "acc_min, acc_max = acc_sorted[0], acc_sorted[-1]\n", + "names_sorted = np.array(names_all)[sort_ix]\n", + "fig = plt.figure()\n", + "ax1 = fig.add_subplot(111)\n", + "ix = np.arange(len(acc_sorted))\n", + "ix_effnet = ix[np.in1d(names_sorted[ix], names_effnet)]\n", + "ix_effnet_tf = ix[np.in1d(names_sorted[ix], names_effnet_tf)]\n", + "ix_resnet = ix[np.in1d(names_sorted[ix], names_resnet)]\n", + "ix_resnet_ttp = ix[np.in1d(names_sorted[ix], names_resnet_ttp)]\n", + "ax1.bar(ix_effnet, acc_sorted[ix_effnet], color='r', label='EfficientNet')\n", + "ax1.bar(ix_effnet_tf, acc_sorted[ix_effnet_tf], color='#8C001A', label='TF-EfficientNet')\n", + "ax1.bar(ix_resnet, acc_sorted[ix_resnet], color='b', label='ResNet')\n", + "ax1.bar(ix_resnet_ttp, acc_sorted[ix_resnet_ttp], color='#43C6DB', label='ResNet + TTP')\n", + "plt.ylim([math.ceil(acc_min - .3*(acc_max - acc_min)),\n", + " math.ceil(acc_max + .3*(acc_max - acc_min))])\n", + "ax1.set_title('Top-1 Comparison')\n", + "ax1.set_ylabel('Top-1 Accuracy (%)')\n", + "ax1.set_xlabel('Network Architecture')\n", + "ax1.set_xticks(ix)\n", + "ax1.set_xticklabels(names_sorted, rotation='45', ha='right')\n", + "ax1.legend()\n", + "plt.show()" + ], + "execution_count": 11, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA7AAAALYCAYAAABFbR5BAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzs3XuUXmV9N/zvzwSECMUKoRaRF3yt\nFghhCIEaKZYzgoIPLXZVgRcECvi09S3FVGhB0YIiB1EoIlSwHpBiUbBV7BuxUvHAIYTIo4AQNViC\nkghFIYQQ8Hr/mMk4OU2GkHsmO34+a83K7Gvvfe3ffc9aLr9ch12ttQAAAMC67gVjXQAAAACMhAAL\nAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnCLAAwKCq2q+qvjvWdQDAygiwAHRSVT0x5OdX\nVbVoyPERa/lZL6qqL1TVA1XVquo1I7jnDVX1zYF65lfVf1bVQWuzrl5ord3YWtt5rOsAgJURYAHo\npNbaJkt/kvwkySFD2q5a249LclOStyT5n9VdPBCgP5vkn5JsleR3k5yd5E1rua61qqrGj3UNADAc\nARaA9VJVbVxVl1TVT6vqwao6r6o2GDj3+qqaU1XvrapHq+rHVfXmVfXVWnuytXZRa+3bSX61mueO\nT3JBktNba59srf2ytfZsa+1rrbWTBq4ZN/Dsn1TVw1V1ZVVtOnDu96vqmao6rqrmVdUjVXVsVb22\nqr5XVY9V1YeGPO+kgdHdy6rql1V1d1W9bsj5E6vq3qp6fOAzHzvk3NLv4YyqejjJpUvbhlxzxsB3\n+Muquqeq9nwO3+/fVdWCgc+xVkfFAfjNJMACsL56b5LJSXZKsmuSvZL87ZDz2ybZMMlLk/x5kk9W\n1XZr4bmTkvxOkmuHuebEJH+aZM8kv5dkyyQfGnJ+3EDtr0jytiQXJzll4DNMTvK2qvqDIde/Lsl3\nk2ye5Jwk11fVbw2c+2mSg5L8VpKTklxSVTsOuXfbJBskeXmSdwwtsqp2Hnh+X5LNkrwhyYMDp1f3\n/f5fSSr9I9B/meRjVbXJMN8JAKyWAAvA+uqIJO9prf28tfZwkrOSHDXk/DNJ3ttae7q1dmOSG5Mc\nvhaeu3n6pxw/vJrazmutPdBa+2WSv09yRFXVkGve11pb3Fr7t4HjTw18lp8k+XaSXYZc+9+ttY+2\n1pa01j6V/pB5YJK01v6ttfbj1u/GJP+V5A+H3Ls4yT8MfA+LlqvzmSQbJ9khybjW2o9aaz8e8hmG\n+36fTPKBgZquG/hOXjnMdwIAqyXAArDeGQiCL03ywJDmB5K8bMjxgtbaU8ud36qqXjVkM6ifr8Hj\nH0n/yOPvDHPNViupbeMkLxk4fra19siQ84uybCBelGToaOaDWdYDA89IVR1aVbcNTJV+LMk+SbYY\ncu3PWmtLVlZka+37SU5N//rd+VV1VVX9znP4fodOt35yuZoB4DkTYAFY77TWWpKfpX8a61LbJJk3\n5HiLqtpoufMPtdbuG7IZ1NCgN1LfS3/Y/JNhrnloJbUtSvLoGjwvSbZe7nibJA9V1YuS/GuSf0iy\nZWvtxUn+M/0Be6k2XMcD63hfm/7pzBslOWuE3y8ArHUCLADrq6uTvKeqNq+qLdM/TfczQ85vkOSM\nqtqwqvZJsn+Sz6+qs6p64ZDAu+Fy4XdQa+2ZJO9MclZVHVVVm1bVC6rqj6rqo0Nqe2dVbTOwedNZ\nST47EAzXxMsHNnMaX1VHpn8964z0j+pukGR+kl9V1aHpX6s6IlW1w0DdL0x/wF6UX29itbrvFwDW\nOtvlA7C+enf6dwP+fvpD178kOXfI+bnpX+P5syS/TPK21tqPhunvgfx6WvB/JUlV/W5r7WfLX9ha\n+8zAdN3Tklya/umz30vywYFLLk3/FNxvp38jqRuS/M1z/oS/9o30r4l9NP2joH/cWvvFQI3vTPLv\n6Q+y1w08a6Q2Tv93+OokSwaes7TO1X2/ALDW1Zr/x14A6Kaqen2Sf2ytdX5Toao6KcnhrbX9xroW\nAOg1U4gBAADoBAEWAACATjCFGAAAgE4wAgsAAEAnCLAAAAB0Qideo7PFFlu0bbfddqzLAAAAoAfu\nuOOOn7fWJq7uuk4E2G233TYzZ84c6zIAAADogap6YCTXmUIMAABAJwiwAAAAdIIACwAAQCd0Yg0s\nAADA8pYsWZIHH3wwTz311FiXwghttNFG2XrrrbPBBhus0f0CLAAA0EkPPvhgNt1002y77bapqrEu\nh9VoreWRRx7Jgw8+mO22226N+jCFGAAA6KSnnnoqm2++ufDaEVWVzTff/HmNmAuwAABAZwmv3fJ8\n/14CLAAAwBoaN25c+vr6Bn/OOeecJMnNN9+cHXfcMX19fVm0aFGmT5+eHXfcMdOnT8/HPvaxfOpT\nn1plnw899FAOP/zwNa7pwx/+cJ588snB42233TZ/8id/Mnh87bXX5phjjhm2j9mzZ+eGG25Y4xp6\nxRpYAABg/bC2R2NbW+0lG2+8cWbPnr1C+1VXXZXTTjstRx55ZJLk8ssvz6OPPppx48atts+tttoq\n11577XOvd8CHP/zhHHnkkZkwYcJg2x133JG77747O+yww4j6mD17dmbOnJmDDz54jevoBSOwAAAA\na9HHP/7xfO5zn8sZZ5yRI444IoceemieeOKJ7Lrrrrnmmmty5pln5vzzz0+SzJkzJ/vtt1923nnn\nTJkyJT/84Q8zd+7cTJo0KUny7LPPZvr06dltt90yefLkXHbZZUmSm266KXvttVcOP/zw/P7v/36O\nOOKItNZy0UUX5aGHHsree++dvffee7CmU045JWefffYKtS5cuDDHHntsdt999+yyyy754he/mKef\nfjrvfve7c80116Svry/XXHPNKHxrI2MEFgAAYA0tWrQofX19g8ennXZajj/++Hzzm9/MG9/4xsGp\nwJtsssngSO2ZZ545eP0RRxyRU089NYcddlieeuqp/OpXv8r8+fMHz19xxRXZbLPNcvvtt2fx4sXZ\nY489csABByRJ7rzzznz/+9/PVlttlT322CPf+ta38o53vCMf+tCH8vWvfz1bbLHFYD9/+qd/mo9+\n9KOZM2fOMvWfffbZ2WeffXLllVfmsccey+6775799tsv73vf+zJz5sz84z/+41r/zp4PARYAAGAN\nrWoK8Ug8/vjjmTdvXg477LAk/e9IXd6MGTNy1113DU4p/sUvfpH7778/G264YXbfffdsvfXWSZK+\nvr7MnTs3f/iHf7jSZ40bNy7Tp0/PBz7wgRx00EHL9P9v//ZvgyPCTz31VH7yk5+s0ecZDQIsAADA\nOqq1losvvjgHHnjgMu033XRTXvjCFw4ejxs3Ls8888ywfR111FH5wAc+MDg9eWn/n//85/PqV796\nmWtvvfXWtVD92mcNLAAAwBjYdNNNs/XWW+f6669PkixevHiZ3YOT5MADD8yll16aJUuWJEnuu+++\nLFy4cLX9Pv744yu0b7DBBjn55JNz4YUXLtP/xRdfnDawYdWdd945bB9jTYAFAABYQ0vXwC79OfXU\nU5/T/Z/+9Kdz0UUXZfLkyXnta1+bn/3sZ8ucP/7447PDDjtkypQpmTRpUk488cTVjrSecMIJef3r\nX7/MJk5LHXfcccvcf8YZZ2TJkiWZPHlydtxxx5xxxhlJkr333jt33333OreJU7URbA091qZOndpm\nzpw51mUAAADrkHvuuSfbb7/9WJfBc7Syv1tV3dFam7q6e43AAgAA0AkCLAAAAJ0gwAIAANAJAiwA\nAACdIMACAADQCQIsAAAAnSDAAgAArIFHHnlk8P2vL33pS/Oyl71s8Liqlnk/7Ny5c1e4/5hjjsl2\n2203eM1rX/vaJMnixYuz3377Db6D9eabb86OO+6Yvr6+zJs3L4cffviwdR1//PG5++671+gz3XTT\nTfn2t789eHzmmWdmwoQJmT9//mDbJptsstp+3v/+96/R81dnfE96BQAAGGUX1MvWan+ntHnDnt98\n880ze/bsJP1Bb5NNNsk73/nOJP0hb+m54Zx33nkrBNI777wzSQbvP+mkk3LaaaflyCOPTJJce+21\nw/b58Y9/fLXPXZWbbropm2yyyWCYTpItttgiF1xwQT74wQ+OuJ/3v//9+bu/+7s1rmNVjMACAACs\nI+bPn58jjzwyt99+e/r6+nLZZZflc5/7XM4444wcccQRmTt3biZNmpQkefbZZ/POd74zkyZNyuTJ\nk3PxxRcnSfbaa6/MnDkzSTJjxoxMmzYtU6ZMyZvf/OY88cQTSZJtt90273nPezJlypTstNNOuffe\nezN37tx87GMfy4UXXpi+vr7cfPPNSZJjjz0211xzTR599NEV6v3MZz6T3XffPX19fTnxxBPz7LPP\n5tRTT82iRYvS19eXI444Yq1+PwIsAADAWrY0wPX19eWwww5b5XXTp08fvO6II47IlltumY9//OPZ\nc889M3v27Jx44ok59NBDc9555+Wqq65a5t7LL788c+fOzezZs3PXXXetEBZ//vOf56yzzsqNN96Y\nWbNmZerUqfnQhz40eH6LLbbIrFmz8va3vz3nn39+tt1225x00kk5+eSTM3v27Oy5555J+keTjz32\n2HzkIx9Zpv977rkn11xzTb71rW9l9uzZGTduXK666qqcc8452XjjjTN79uwVan6+ejaFuKpeneSa\nIU2vSPLuJDcl+ViSjZI8k+R/t9Zu61UdAAAAo21pgFudlU0hHqkbb7wxJ510UsaP7491L3nJS5Y5\nf8stt+Tuu+/OHnvskSR5+umnM23atMHzf/zHf5wk2XXXXfOFL3xh2Ge94x3vSF9f3+AU6ST52te+\nljvuuCO77bZbkv7QvuWWW67RZxmpngXY1toPkvQlSVWNSzIvyXVJ/inJe1trX6mqg5Ocm2SvXtUB\nAACwLnjb296WO++8M1tttVVuuOGGnj+vtZb9998/V1999UrPv/CFL0ySjBs3Ls8888ywfb34xS/O\nW9/61lxyySXL9H/00UfnAx/4wNorejVGawrxvkl+2Fp7IElL8lsD7ZsleWiUagAAABgzn/jEJzJ7\n9uy1Fl7333//XHbZZYPhc/k1qq95zWvyrW99K3PmzEmSLFy4MPfdd9+wfW666aZ5/PHHV3rub/7m\nb5Z53r777ptrr712cIfiRx99NA888ECSZIMNNsiSJUvW/MOtwmgF2D9LsjT2/3WS86rqv5Ocn+S0\nUaoBAABgnTJ0DWxfX1+efvrpEd97/PHHZ5tttsnkyZOz884757Of/ewy5ydOnJh//ud/zlve8pZM\nnjw506ZNy7333jtsn4ccckiuu+66ZTZxWmqLLbbIYYcdlsWLFydJdthhh5x11lk54IADMnny5Oy/\n//756U9/miQ54YQTMnny5LW+iVO11tZqhys8oGrD9I+y7thae7iqLkryX621z1fVnyY5obW230ru\nOyHJCUmyzTbb7Lo0yQMAACT9mwhtv/32Y10Gz9HK/m5VdUdrberq7h2NEdiDksxqrT08cHx0kqUr\nhP81ye4ru6m1dnlrbWprberEiRNHoUwAAADWZaMRYN+SX08fTvpHY/9o4Pd9ktw/CjUAAADQcT3b\nhThJqupFSfZPcuKQ5j9P8pGqGp/kqQxMEwYAAIDh9DTAttYWJtl8ubZvJtm1l88FAABg/TNauxAD\nAADA8yLAAgAA0AkCLAAAwBoaN25c+vr6MmnSpBxyyCF57LHH1qifvfbaK1On/votMjNnzsxee+01\n7D1z585d4d2v6zsBFgAAWC9Urd2fkdh4440ze/bsfO9738tLXvKSXHLJJWtc//z58/OVr3xlxNcL\nsAAAAKyRadOmZd68eYPH5513XnbbbbdMnjw573nPe5IkCxcuzBve8IbsvPPOmTRpUq655prB66dP\nn56zzz57hX6fffbZTJ8+fbCvyy67LEly6qmn5uabb05fX18uvPDCHn+6dUNPdyEGAAD4TfDss8/m\na1/7Wo477rgkyYwZM3L//ffntttuS2sthx56aL7xjW9kwYIF2WqrrfLlL385SfKLX/xisI9p06bl\nuuuuy9e//vVsuummg+1XXHFFNttss9x+++1ZvHhx9thjjxxwwAE555xzcv755+dLX/rS6H7YMWQE\nFgAAYA0tWrQofX19eelLX5qHH344+++/f5L+ADtjxozssssumTJlSu69997cf//92WmnnfLVr341\n73rXu3LzzTdns802W6a/008/PWedddYybTNmzMinPvWp9PX15Q/+4A/yyCOP5P777x+1z7guEWAB\nAADW0NI1sA888EBaa4NrYFtrOe200zJ79uzMnj07c+bMyXHHHZdXvepVmTVrVnbaaaecfvrped/7\n3rdMf/vss08WLVqUW265ZbCttZaLL754sK8f//jHOeCAA0b1c64rBFgAAIDnacKECbnoootywQUX\n5JlnnsmBBx6YK6+8Mk888USSZN68eZk/f34eeuihTJgwIUceeWSmT5+eWbNmrdDX6aefnnPPPXfw\n+MADD8yll16aJUuWJEnuu+++LFy4MJtuumkef/zx0fmA6whrYAEAANaCXXbZJZMnT87VV1+do446\nKvfcc0+mTZuWJNlkk03ymc98JnPmzMn06dPzghe8IBtssEEuvfTSFfo5+OCDM3HixMHj448/PnPn\nzs2UKVPSWsvEiRNz/fXXZ/LkyRk3blx23nnnHHPMMTn55JNH7bOOlWqtjXUNqzV16tQ2c+bMsS4D\nAABYh9xzzz3Zfvvtx7oMnqOV/d2q6o7W2tRV3DLIFGIAAAA6QYAFAACgEwRYAAAAOkGABQAAoBME\nWAAAADpBgAUAAKATBFgAAIA1NG7cuPT19WXSpEk55JBD8thjj61RP3vttVemTv31W2RmzpyZvfba\na9h75s6dm89+9rNr9Lyh/uIv/iJ9fX3ZYYcdsvHGG6evry99fX3ZcccdV9p+7bXX5phjjsl2222X\nvr6+TJkyJd/5zneedx0jMX5UngIAANBj+31nzlrt78Zpr1ztNRtvvHFmz56dJDn66KNzySWX5O//\n/u/X6Hnz58/PV77ylRx00EEjun5pgH3rW9867HX//M//nLlz5+bMM89c6flLLrlksL83vvGNg59n\n6HOWb//Sl76U8847L4cffnhmzJiRE088MXfdddeI6n4+jMACAACsBdOmTcu8efMGj88777zstttu\nmTx5ct7znvckSRYuXJg3vOEN2XnnnTNp0qRcc801g9dPnz49Z5999gr9Pvvss5k+ffpgX5dddlmS\n5NRTT83NN9+cvr6+XHjhhT3+dKv2ute9LnPmrN3/eLAqRmABAACep2effTZf+9rXctxxxyVJZsyY\nkfvvvz+33XZbWms59NBD841vfCMLFizIVlttlS9/+ctJkl/84heDfUybNi3XXXddvv71r2fTTTcd\nbL/iiiuy2Wab5fbbb8/ixYuzxx575IADDsg555yT888/P1/60pdG98Mu59///d+z0047jcqzBFgA\nAIA1tGjRovT19WXevHnZfvvts//++yfpD7AzZszILrvskiR54okncv/992fPPffMKaeckne96115\n4xvfmD333HOZ/k4//fScddZZ+eAHPzjYNmPGjNx111259tprk/SH3vvvvz8bbrjhKut65JFHsu++\n+yZJHn300Tz99NO5/vrrkySf/vSn10rgnD59es4666xMnDgxV1xxxfPubyQEWAAAgDW0dA3sk08+\nmQMPPDCXXHJJ3vGOd6S1ltNOOy0nnnjiCvfMmjUrN9xwQ04//fTsu+++efe73z14bp999snpp5+e\nW265ZbCttZaLL744Bx544DL93HTTTausa/PNNx9cs7q6NbBrauka2NFkDSwAAMDzNGHChFx00UW5\n4IIL8swzz+TAAw/MlVdemSeeeCJJMm/evMyfPz8PPfRQJkyYkCOPPDLTp0/PrFmzVujr9NNPz7nn\nnjt4fOCBB+bSSy/NkiVLkiT33XdfFi5cmE033TSPP/746HzAdYQRWAAAgLVgl112yeTJk3P11Vfn\nqKOOyj333JNp06YlSTbZZJN85jOfyZw5czJ9+vS84AUvyAYbbJBLL710hX4OPvjgTJw4cfD4+OOP\nz9y5czNlypS01jJx4sRcf/31mTx5csaNG5edd945xxxzTE4++eRR+6xjpVprY13Dak2dOrXNnDlz\nrMsAAADWIffcc0+23377sS6D52hlf7equqO1NnUVtwwyhRgAAIBOEGABAADoBAEWAACAThBgAQCA\nzurCnj782vP9ewmwAABAJ2200UZ55JFHhNiOaK3lkUceyUYbbbTGfXiNDgAA0Elbb711HnzwwSxY\nsGCsS2GENtpoo2y99dZrfL8ACwAAdNIGG2yQ7bbbbqzLYBSZQgwAAEAnCLAAAAB0ggALAABAJwiw\nAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSC\nAAsAAEAnCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnCLAAAAB0ggALAABA\nJwiwAAAAdELPAmxVvbqqZg/5+WVV/fXAub+qqnur6vtVdW6vagAAAGD9Mb5XHbfWfpCkL0mqalyS\neUmuq6q9k7wpyc6ttcVVtWWvagAAAGD9MVpTiPdN8sPW2gNJ3p7knNba4iRprc0fpRoAAADosNEK\nsH+W5OqB31+VZM+qurWq/quqdhulGgAAAOiwngfYqtowyaFJ/nWgaXySlyR5TZLpST5XVbWS+06o\nqplVNXPBggW9LhMAAIB13GiMwB6UZFZr7eGB4weTfKH1uy3Jr5JssfxNrbXLW2tTW2tTJ06cOApl\nAgAAsC4bjQD7lvx6+nCSXJ9k7ySpqlcl2TDJz0ehDgAAADqspwG2ql6UZP8kXxjSfGWSV1TV95L8\nS5KjW2utl3UAAADQfT17jU6StNYWJtl8ubankxzZy+cCAACw/hmtXYgBAADgeRFgAQAA6AQBFgAA\ngE4QYAEAAOgEARYAAIBOEGABAADoBAEWAACAThBgAQAA6AQBFgAAgE4QYAEAAOgEARYAAIBOEGAB\nAADoBAEWAACAThBgAQAA6AQBFgAAgE4QYAEAAOgEARYAAIBOEGABAADoBAEWAACAThBgAQAA6AQB\nFgAAgE4YP9YFAAAArE/2+86csS5hGTdOe+VYl7DWGIEFAACgEwRYAAAAOkGABQAAoBMEWAAAADpB\ngAUAAKATBFgAAAA6QYAFAACgEwRYAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAFAACg\nEwRYAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAFAACgEwRYAAAAOkGABQAAoBMEWAAA\nADpBgAUAAKATBFgAAAA6QYAFAACgEwRYAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAF\nAACgEwRYAAAAOkGABQAAoBN6FmCr6tVVNXvIzy+r6q+HnD+lqlpVbdGrGgAAAFh/jO9Vx621HyTp\nS5KqGpdkXpLrBo5fnuSAJD/p1fMBAABYv4zWFOJ9k/ywtfbAwPGFSf42SRul5wMAANBxoxVg/yzJ\n1UlSVW9KMq+19t1RejYAAADrgZ5NIV6qqjZMcmiS06pqQpK/S//04dXdd0KSE5Jkm2226WmNAAAA\nrPtGYwT2oCSzWmsPJ/m/k2yX5LtVNTfJ1klmVdVLl7+ptXZ5a21qa23qxIkTR6FMAAAA1mU9H4FN\n8pYMTB9urf2fJFsuPTEQYqe21n4+CnUAAADQYT0dga2qFyXZP8kXevkcAAAA1n89HYFtrS1Msvkw\n57ft5fMBAABYf4zGFGIAAIDnrGqsK1hR8yLQMTVar9EBAACA50WABQAAoBMEWAAAADpBgAUAAKAT\nBFgAAAA6QYAFAACgEwRYAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAFAACgEwRYAAAA\nOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAFAACgEwRYAAAAOkGABQAAoBMEWAAAADpBgAUA\nAKATBFgAAAA6QYAFAACgEwRYAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAFAACgEwRY\nAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAFAACgEwRYAAAAOkGABQAAoBMEWAAAADpB\ngAUAAKATBFgAAAA6QYAFAACgEwRYAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAFAACg\nEwRYAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6QYAFAACgE8b3quOqenWSa4Y0vSLJu5O8\nLMkhSZ5O8sMkb2utPdarOgAAAFg/9GwEtrX2g9ZaX2utL8muSZ5Mcl2SryaZ1FqbnOS+JKf1qgYA\nAADWH6M1hXjfJD9srT3QWpvRWntmoP2WJFuPUg0AAAB02GgF2D9LcvVK2o9N8pVRqgEAAIAO69ka\n2KWqasMkh2a5qcJV9fdJnkly1SruOyHJCUmyzTbb9LhKAABYf+33nTljXcIKbpz2yrEugQ4aNsAO\nhM+Dk+yZZKski5J8L8mXW2s/GOEzDkoyq7X28JB+j0nyxiT7ttbaym5qrV2e5PIkmTp16kqvAQAA\n4DfHKgNsVZ2R5I+TfCPJHenffGmjJK9K8uGqqiTvbK19bzXPeEuGTB+uqtcn+dskf9Rae/L5lQ8A\nAMBviuFGYO9qrf3DKs6dW1W/m+Tlw3VeVS9Ksn+SE4c0/2OSFyb5an8Gzi2ttZNGXjIAAAC/iVYZ\nYFtrX1y+bWBK8fjW2pOttZ8m+elwnbfWFibZfLk2k90BAAB4zka8C3FVvS3Jl5N8qarO6l1JAAAA\nsKJVBtiqOni5pgNba/u31vZJckhvywIAAIBlDTcCu1tVXVdVkwaOv19Vl1XVpUnuHYXaAAAAYNBw\na2DfW1VbJfmHqlqS5N1JXpJkQmtt1mgVCAAAAMlq3gOb5H+S/O8kOya5Msm3k1zQ66IAAABgecOt\ngX1vki8lmZFkj9baG9M/dfiDri4lAAAgAElEQVSGqnrrKNUHAAAASYZfA/um1tq+SfZK8rYkaa19\nIcnrk/xu70sDAACAXxtuCvE9VfXRJBOSfHNpY2ttSUwjBgAAYJQNt4nTW6pqlyRLWmvfG8WaAAAA\nYAWrDLBV9ZrW2i3DnN8kyTattbt7UhkAAAAMMdwU4rdW1XlJvpLkjiQLkmyU5JVJ9h749509rxAA\nAAAy/BTid1TVFknenOSo9G/ctCjJPUk+2Vq7aVQqBAAAgKzmPbCttZ8nuXTgBwAAAMbMcK/RAQAA\ngHWGAAsAAEAnCLAAAAB0wrBrYJOkqm5NcmWSq1trv+x9SQAAsO66oF421iUs45Q2b6xLgFEzkhHY\no5O8IsnsqvpMVe3b45oAAABgBasNsK21e1tr70rye0k+n+RTVfXjqjqjql7c8woBAAAgI1wDW1U7\nJDknyQeSfDHJkUmeTvKfvSsNAAAAfm0ka2BvS/Jk+tfBvru1tmjg1Leqao9eFgcAAABLrTbAJjmy\ntXbfyk601g5dy/UAAADASo0kwB5VVRe01h5Lkqr67SR/3Vp7T29LAwBgfbbfd+aMdQkruHHaK8e6\nBGAYI1kD+8al4TVJWmv/k+SQ3pUEAAAAKxpJgB1XVRsuPaiqjZJsOMz1AAAAsNaNZArxvyT5alVd\nOXB8bJKrelcSAAAArGi1Aba19v6q+j9J9h1oOre19uXelgUAAADLGskIbFpr/57k33tcCwAAAKzS\natfAVtVuVXVLVf2iqp6qqsVV9cvRKA4AAACWGskmTh9NcnSSHyXZNMlfJrmol0UBAADA8kYSYF/Q\nWvtBkvGttSWttX9K8oYe1wUAAADLGMka2IUDr9H5blW9P8lPk4zrbVkAAACwrJGMwB4zcN1fJnk2\nye8lObyHNQEAAMAKhh2BrapxSc5srf0/SZ5KcsaoVAUAAADLGXYEtrX2bJJXVNUGo1QPAAAArNRI\n1sD+MMnNVfXFJAuXNrbW7EQMAADAqBlJgP3JwM+EgR8AAAAYdasNsK01614BAAAYc6sNsFX11SRt\n+fbW2gE9qQgAAABWYiRTiE8f8vtGSf4kyeLelAMAAAArN5IpxLcu1/RfVbV8GwAAAPTUSKYQ/9aQ\nwxck2TXJb/esIgAAAFiJkUwh/n7618BWkmeS/DjJn/eyKAAAAFjeSKYQv3w0CgEAAIDhjGQK8UlJ\n/qW19tjA8W8neXNr7fJeFwcAwMjs9505Y13CMm6c9sqxLgFYD71gBNectDS8Jklr7X+SvL13JQEA\nAMCKRhJgxw09qKoXJNmgN+UAAADAyo1kE6evVtXVST42cHxSkht7VxIAAACsaCQBdnr6pwyfPHD8\n1SSX9awiAAAAWImRBNgNkny0tfaPyeAU4g3T/0odAAAAGBUjWQP79SQvGnL8oiT/2ZtyAAAAYOVG\nEmA3bq09vvRg4PcJvSsJAAAAVjSSAPtkVe289KCq+pI81buSAAAAYEUjWQN7cpLrquqBJJXk5Une\n2tOqAAAAYDmrDbCttVuravsk2w803Z3k2Z5WBQAAAMsZyRTitNYWt9ZmJ9ksycVJ5q3unqp6dVXN\nHvLzy6r666p6SVV9taruH/j3t5/nZwAAAOA3wGoDbFVNraoPDUwhviHJbUkmre6+1toPWmt9rbW+\nJLsmeTLJdUlOTfK11trvJfnawDEAAAAMa5UBtqreV1U/SHJBkvuSTE0yv7V2RWvt58/xOfsm+WFr\n7YEkb0ryyYH2Tyb5X8+9bAAAAH7TDLcG9i+SfD/JhUluaK09XVVtDZ/zZ0muHvj9d1prPx34/WdJ\nfmcN+wQAWOuqxrqCFbU1/X9gAOuZ4aYQvzTJuUnenORHVfWJJBtX1YjWzS5VVRsmOTTJvy5/rrXW\nkqz0f5Kr6oSqmllVMxcsWPBcHgkAAMB6aJVhtLW2pLX2pdbaEUl+L8l/JLk1ybyq+tRzeMZBSWa1\n1h4eOH64qn43SQb+nb+K51/eWpvaWps6ceLE5/A4AAAA1kcj3YV4UWvtmtba/0r/63Rueg7PeEt+\nPX04Sf4tydEDvx+d5IvPoS8AAAB+Qz2n6cBJ0lp7rLV25UiuraoXJdk/yReGNJ+TZP+quj/JfgPH\nAAAAMKzhNnF63lprC5NsvlzbI+nflRgAAABG7DmPwAIAAMBYWKMAW1V7r+1CAAAAYDhrOgL7ybVa\nBQAAAKzGKtfAVtUXVnUqy61rBQAAgF4bbhOnvdP/mpuFy7VXktf2rCIAAABYieEC7K1JHm+tfX35\nE1X1w96VBACsL/b7zpyxLmEZN0575ViXAMDzMFyAPai11lZ2orVmBBYAAIBRtcpNnFYWXqvq9b0t\nBwAAAFbuue5C/P6eVAEAAACr8VwDbPWkCgAAAFiN5xpg/3dPqgAAAIDVGG4TpyRJVb0wyYlJ/jBJ\nq6qpSS5vrS3udXEAAACw1GoDbJJPJlmc5J8Gjt860PZnvSoKAAAAljeSADu5tbbDkOOvVtXdvSoI\nAAAAVmYka2C/W1W7LT2oql2T3Nm7kgAAAGBFIxmB3SnJrVX1o4Hj7ZLcU1V3pv91sVN6Vh0AAAAM\nGEmAfVPPqwAAAIDVWG2Aba39sKp2TLLnQNPNrbXv97YsAAAAWNZq18BW1V8m+dck2wz8fK6qvA8W\nAACAUTWSKcQnJNm9tfZEklTV+5N8O8lHe1kYAAAADDWSXYgrydNDjpcMtAEAAMCoWeUIbFWNb609\nk+TT6d+F+PMDpw5L8snRKA4AAACWGm4K8W1JprTWzq2qm5L84UD7Sa2123teGQAAAAwxXIAdnCbc\nWrst/YEWAAAAxsRwAXZiVf3Nqk621j7Ug3oAAABgpYYLsOOSbBIbNgEAALAOGC7A/rS19r5RqwQA\nAACGMdxrdIy8AgAAsM4YLsDuO2pVAAAAwGqsMsC21h4dzUIAAABgOMONwAIAAMA6Q4AFAACgEwRY\nAAAAOkGABQAAoBMEWAAAADpBgAUAAKATBFgAAAA6YfxYFwAArF7VWFewrNbGugIAfhMZgQUAAKAT\nBFgAAAA6QYAFAACgEwRYAAAAOsEmTgD8RrmgXjbWJazglDZvrEsAgE4wAgsAAEAnCLAAAAB0ggAL\nAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcI\nsAAAAHRCTwNsVb24qq6tqnur6p6qmlZVfVV1S1XNrqqZVbV7L2sAAABg/TC+x/1/JMl/tNYOr6oN\nk0xI8rkk722tfaWqDk5ybpK9elwHAAAAHdezAFtVmyV5XZJjkqS19nSSp6uqJfmtgcs2S/JQr2oA\nAABg/dHLEdjtkixI8omq2jnJHUn+3yR/neT/q6rz0z+F+bU9rAEAAID1RC/XwI5PMiXJpa21XZIs\nTHJqkrcnObm19vIkJye5YmU3V9UJA2tkZy5YsKCHZQIAANAFvQywDyZ5sLV268DxtekPtEcn+cJA\n278mWekmTq21y1trU1trUydOnNjDMgEAAOiCngXY1trPkvx3Vb16oGnfJHenf83rHw207ZPk/l7V\nAAAAwPqj17sQ/1WSqwZ2IP5Rkrcl+WKSj1TV+CRPJTmhxzUAAACwHuhpgG2tzU4ydbnmbybZtZfP\nBQAAYP3TyzWwAAAAsNb0egoxAOuzqrGuYFmtjXUFAEAPGYEFAACgEwRYAAAAOkGABQAAoBMEWAAA\nADpBgAUAAKAT7EIMsA7Y7ztzxrqEFdw47ZVjXQIAwDKMwAIAANAJRmCB9Y93kwIArJeMwAIAANAJ\nAiwAAACdIMACAADQCQIsAAAAnWATJ2CVLqiXjXUJyzilzRvrEgAAGENGYAEAAOgEARYAAIBOEGAB\nAADoBAEWAACAThBgAQAA6AQBFgAAgE4QYAEAAOgEARYAAIBOEGABAADoBAEWAACAThBgAQAA6ITx\nY10A/CbY7ztzxrqEFdw47ZVjXQIAADwnRmABAADoBAEWAACAThBgAQAA6AQBFgAAgE4QYAEAAOgE\nARYAAIBO8BodOqdqrCtYVmtjXQEAAPxmMAILAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAn\nCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnCLAAAAB0ggALAABAJwiwAAAA\ndIIACwAAQCcIsAAAAHSCAAsAAEAnCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCf0NMBW1Yur6tqq\nureq7qmqaQPtfzXQ9v2qOreXNQAAALB+GN/j/j+S5D9aa4dX1YZJJlTV3knelGTn1triqtqyxzUA\nAACwHuhZgK2qzZK8LskxSdJaezrJ01X19iTntNYWD7TP71UNAAAArD96OYV4uyQLknyiqu6sqo9X\n1YuSvCrJnlV1a1X9V1Xt1sMaAAAAWE/0MsCOTzIlyaWttV2SLExy6kD7S5K8Jsn0JJ+rqlr+5qo6\noapmVtXMBQsW9LBMAAAAuqCXAfbBJA+21m4dOL42/YH2wSRfaP1uS/KrJFssf3Nr7fLW2tTW2tSJ\nEyf2sEwAAAC6oGcBtrX2syT/XVWvHmjaN8ndSa5PsneSVNWrkmyY5Oe9qgMAAID1Q693If6rJFcN\n7ED8oyRvS/9U4iur6ntJnk5ydGut9bgOAAAAOq6nAba1NjvJ1JWcOrKXzwUAAGD908s1sAAAALDW\nCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAn9PQ9sKzbqsa6ghW1NtYVAAAA\n6yojsAAAAHSCAAsAAEAnCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnCLAA\nAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnCLAAAAB0ggALAABAJwiwAAAAdIIA\nCwAAQCcIsAAAAHSCAAsAAEAnCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAn\nCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnjB/rAtYbVWNdwbJaG+sKAAAA\n1iojsAAAAHSCAAsAAEAnCLAAAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAnCLAA\nAAB0ggALAABAJwiwAAAAdIIACwAAQCcIsAAAAHSCAAsAAEAn9DTAVtWLq+raqrq3qu6pqmlDzp1S\nVa2qtuhlDQAAAKwfxve4/48k+Y/W2uFVtWGSCUlSVS9PckCSn/T4+QAAAKwnejYCW1WbJXldkiuS\npLX2dGvtsYHTFyb52yStV88HAABg/dLLKcTbJVmQ5BNVdWdVfbyqXlRVb0oyr7X23R4+GwAAgPVM\nLwPs+CRTklzaWtslycIkZyb5uyTvXt3NVXVCVc2sqpkLFizoYZkAAAB0QS8D7INJHmyt3TpwfG36\nA+12Sb5bVXOTbJ1kVlW9dPmbW2uXt9amttamTpw4sYdlAgAA0AU9C7CttZ8l+e+qevVA075JZrXW\ntmytbdta2zb9IXfKwLUAAACwSr3ehfivklw1sAPxj5K8rcfPAwAAYD3V0wDbWpudZOow57ft5fMB\nAABYf/RyDSwAAACsNQIsAAAAnSDAAgAA0AkC7P/P3nmH21UVffidFJIAKRB6DUjvvROqoUPoSi/S\nBET4IIA0ERAUlF5EOtJ7L0FAQIoICCi9iIAoTYr0Mt8fv9m5O4ebgCVZe+O8z3Oee/Y++yZz1l17\n9ppZU5IkSZIkSZIkSZJWkAZskiRJkiRJkiRJ0grSgE2SJEmSJEmSJElaQRqwSZIkSZIkSZIkSStI\nAzZJkiRJkiRJkiRpBWnAJkmSJEmSJEmSJK0gDdgkSZIkSZIkSZKkFaQBmyRJkiRJkiRJkrSCNGCT\nJEmSJEmSJEmSVpAGbJIkSZIkSZIkSdIK0oBNkiRJkiRJkiRJWkEasEmSJEmSJEmSJEkrSAM2SZIk\nSZIkSZIkaQVpwCZJkiRJkiRJkiStIA3YJEmSJEmSJEmSpBWkAZskSZIkSZIkSZK0gjRgkyRJkiRJ\nkiRJklaQBmySJEmSJEmSJEnSCtKATZIkSZIkSZIkSVpBGrBJkiRJkiRJkiRJK0gDNkmSJEmSJEmS\nJGkFacAmSZIkSZIkSZIkrSAN2CRJkiRJkiRJkqQVpAGbJEmSJEmSJEmStII0YJMkSZIkSZIkSZJW\nkAZskiRJkiRJkiRJ0grSgE2SJEmSJEmSJElaQRqwSZIkSZIkSZIkSStIAzZJkiRJkiRJkiRpBWnA\nJkmSJEmSJEmSJK0gDdgkSZIkSZIkSZKkFaQBmyRJkiRJkiRJkrSCNGCTJEmSJEmSJEmSVpAGbJIk\nSZIkSZIkSdIK0oBNkiRJkiRJkiRJWkEasEmSJEmSJEmSJEkrSAM2SZIkSZIkSZIkaQVpwCZJkiRJ\nkiRJkiStIA3YJEmSJEmSJEmSpBWkAZskSZIkSZIkSZK0gjRgkyRJkiRJkiRJklaQBmySJEmSJEmS\nJEnSCtKATZIkSZIkSZIkSVpBGrBJkiRJkiRJkiRJK0gDNkmSJEmSJEmSJGkFacAmSZIkSZIkSZIk\nrSAN2CRJkiRJkiRJkqQVpAGbJEmSJEmSJEmStII0YJMkSZIkSZIkSZJWkAZskiRJkiRJkiRJ0grS\ngE2SJEmSJEmSJElaQRqwSZIkSZIkSZIkSSsYpwasmQ0ys0vN7Akze9zMljSzI+P4ETO7wswGjUsZ\nkiRJkiRJkiRJkq8H43oH9ljgRnefA5gfeBwYCczj7vMBTwH7jmMZkiRJkiRJkiRJkq8B48yANbOB\nwFDgdAB3/9jd33L3m93907jsXmC6cSVDkiRJkiRJkiRJ8vVhXO7AzgS8BpxpZg+Z2WlmNlHHNdsA\nN4xDGZIkSZIkSZIkSZKvCebu4+YfNlsE7bAu7e73mdmxwDvufkB8vh+wCLCedyOEmW0PbB+HswNP\njhNBm8dkwOulhfgXaaPMkHKPT9ooM7RT7jbKDCn3+KSNMkM75W6jzJByj0/aKDO0U+42ygztlfvf\nYUZ3n/zLLhqXBuxUwL3uPiSOlwX2cfc1zGwrYAdgJXd/f5wI0FLM7PfuvkhpOf4V2igzpNzjkzbK\nDO2Uu40yQ8o9PmmjzNBOudsoM6Tc45M2ygztlLuNMkN75R6XjLMQYnf/G/Cimc0ep1YCHjOzVYER\nwNppvCZJkiRJkiRJkiRflV7j+N/fFTjPzCYAngO2Bu4H+gAjzQy0S7vjOJYjSZIkSZIkSZIkaTnj\n1IB19z+gPNc6s4zL//NrwKmlBfg3aKPMkHKPT9ooM7RT7jbKDCn3+KSNMkM75W6jzJByj0/aKDO0\nU+42ygztlXucMc5yYJMkSZIkSZIkSZLkv8m4bKOTJEmSJEmSJEmSJP810oBNkiRJkiRJkiRJWkEa\nsOMZMxvXhbPGCWbWs7QM/wpm1r+0DEmSJCVom75OkiRJkn+FNGDHA2b2DTO71sx6u/unbTFizWw2\nM9sXwN0/M7NWzBczGwacbWbLlJblP8WiVHdTMbOZzGxdM1u6tCxflbgf9zCz5c1sttLy/Cc0fX60\nFTObysymNLNp47gV42xmM5vZTKGv04gdx5hZn7bMjWT801Y9UtFCeecws83NrE9pWZJxTysMkq8B\nw4DVgVvNbIIwYht9g8XiZ2fgMDM7FMDdPzez3mUl+0rMBcwAfNvMvllamK9K9bAIo3D6cHh4Ux8i\nZjYrcB+wLHCqmR1gZsuXlWrshMw3oGro6wMnmNlqZaX6atTmx+xmtqCZTegNr8IXsh5mZt8ys8Xi\nXCPnc4WZzQncCRwOXGFm6zZ9nAHMbCLgAuBhM5ul7UZsC+bJN4BrgKFtGueaHpm+TQ68mtwTdHe+\nabRVj0DXmLZF3hq7AD8DVjGzfqWFScYtacCOH64Fdgf+BDwM4O4fFZXoS3D3z4DLgR8D65nZqXH+\nE2juQyN4HHgW+D2wYSz2BxSW6UsJY3V14HbgaOA+M5s2zjfxXl0OOMHd9wA2ABxY3cxWLCvWWFkM\nuMndvwscCPwCOCrGvdHEPFgNGAkcBNxjZks3dG5gZjMhHdILOQzONbONGu6U6QMcChzj7tsAPwSO\nMbPN4vNGyg3g7u8hg+p+4F4zmz/0eKOpGSYLmNkwMxtiZr2aPE+CSYD5kR5cpiXO3UqPrIkceReY\n2WlN1iMVIfdawOVmdoqZbV+dLyzaF2izHoFRY/1NMzvRzLY3s2VLy/QVeQR4DtgN2BCaP9YVZraw\nma1lZrOaWd/S8rSBVoSythlTuHBfYCVgHeB4M3sa+ARYAXiracZs7YavHmgLAw+Y2XnABMC3AUPf\noYncgXa8f4e+wyHAADPb0N3/XlSysWBmcyFD8Fvufo+Z/RS40sy+6e5vFRavOz5CBusJ7v64mZ0D\nbA4sa2a/bdq8Dt4HpgBw938Al5mZAyPM7EV3f7SodGPBzGZH47u+u99vZrsD+wDfRw6bpjE38KC7\n7w1gZvcAl5oZ7n5xWdG6x90/MrM/Au+bWU93v97MtgJON7MP3P2ywiJ2S8j6GfAKMAJYABhpZpsC\nU7j7eUUFHANmZrFYXgk4EXgB+DvwlJkd5e4flpVwrLyJnNLTABsDb5rZXwDc/e2SgnVHbaznAHZF\nC/ynkTNsXeAZNPaNJKJndgbORHr8FDMb5O4/LSvZFwk98idapkcqzGwR4GDkEJsJWMvMJnf3y8tK\n1j01/Xcn8C7wMnCAmU0CTGtmh7j7u0WFHAvh9D8J3YNvAc+F/nunrGTNptEet7ZiZv3MbCCAu3/q\n7k8Df0TG327AIGCwu/+9SYt8M+sViqDiNmBKd/8A2AgYDswc36kxxquZTWtm/WteqwmAWdHD+TVg\nKfTAm7GQiN1SjXe8nxQpsHmADwDcfQSaNwcXE7KD8A7uHIcXowfGpmbW393/AlwKrAUsX0jELxCh\ncksBuPsVwExmdkrtkptRKPQ3Ssg3Jsysd7WrY2ZTAvui+TE1gLsfjQzXHxcTshusK5zyb3HcG8Dd\nf40Wykc0zaPfsXv2PNIZfQFCD+4IHGwKG20ctZ3W54Dt3f104AzgJmBxaF5hp5pBtQCwH7Ceu68C\nnA0MRHOlcTsotZ3K59Eu5kHoWbMX8GsUbdAYavefm9kMwHboWdjL3T8FfgLMC2xfTsovUv+7h/Pu\nBuAP7n6Ru18DDAW2MrMlS8nYiZlNVLvPngaWpCV6pJrX4Si4EDjJ3Q8Hfop0yVqmnN7G3I8W4eQ1\n/fcpsJ2734HSKQ4HZmqi8VqLPJkfRYNt4O5rAr9Ea9jhBcVrBWnA/peJXbQrUc7DD2sLow+BE9BC\n+SDgejN7IoyY4goh5D4ZuNjMlotzEwEfmdl26KY6HJiyY/FfFDNbFbgaOBa4MDyybyND6og4vw/6\nm2wS36k4EWK0LDCjma2Dduj3Rt63oWY2WVx6CzK+i2Nmc6Nx/RxGhcHfjQy/Lc1sUnd/EriMBjgL\nTEyMQvhPMrM14qNhwOxmdjKAu/8T+CewSBlJv0jojaVQaOL6aGf+WOBeYAEzmy8uvQ74hzUk/C/0\nyP+Z2UB3/z0wKVr8AODutyM9OF/3/8L4J3akzjKzvcxsPXc/C+iH8qP7mXLRbwZ+AzTGCAxn0k5m\ntlvt7/8X4F1T0ZgVgauA75jZnE0JJ64cjWFQTQfMASyNduwBfgu8ThjeTQkRNbPBsaMzMYySaw60\nQ3Ut0iufAJ826H7sjZ4nS5oK7a2Enin3AGub2Wyh/84G+jVI7gmRExozWxzN69+g3MbBAO7+LHBr\nMSE7CCP7OuBYM9sL+BUwZRw3WY/0N7Mpo8bJ/Git+leUT1pFKt2PdHnPBt2PcwAnmtnPzWwNM5s4\nNotGmtkSyFnwK2BqM9ukKQ68Dv03PXJ4LU7X+uMe5PxtjGOmqTRCWX1dCM/V+cC56OZZFYW8EOcX\nBy5y95PcfWtg49jNLKoQQvGeh4yR24Gfm9mCkVN1O3AkcJW7HwrMibxzxTGzocBxwJ7Ig/Vn5F0G\neA8tinZ291OBG4FD4js1gU+A2YHTUL7r2+5+H/oew4GfmdluwA/Q36UosbDZHjjR3U8Ox0sv5BgY\nCUyHwkO3Qn+PJ4sJG0QUwT/RouImYDsz2yDCsdcH5jKzS81sb2BrNNebwmfIiNoHGa7PuftDwPHA\nEOBQMzscOAq4wd0/LyVoRei/y4H3aiGUayJnwQW1Sz+ky1gpShh616J8+Q+ADczsWHffDHnBjwPW\nMVU2Xxv9TYoTi7crgAHAZsApMGpRPzkKxb3U3ddFDtPpCok6GqFHVjeznWPnbD/k1D0A2MHMlouw\n4UeAWWJx3RQH72XAOcCP4tkDmu+rod2eY1E0x3YoP7YJTIBqExyG5H/A3W8ALgImA46O58wBwF1N\n0CPB1Cit46donk/v7tuikO0LzGwVUxTHeuj7FcXMZkTjeylwPTBbrOvWRw6PRuqRYGbgajP7PnIu\n9kEyvm5ml5iKIQ1CzppGtCeMNetlKE3sA2ANYKr4eABaM13i7tujjZfnmuDAC122ipntaeqScSDw\nIEr72MzMVnX3j4FHUaTYwCbov8bi7vn6L7xQTuhWwIjauWWBc+P9hMAs8b5H9TsNkLsXCkHcq3bu\nB8BP4/2UwCLxvmf9+xYe697IObBx7fw2wCm14xlKj283svcBJo33UyOj6WoUwjVhnF8YeZbPBZYq\nLXNN9tOBTeL9FWgRdD0qZNIf5VX9AFiltKwhY3WfHYjCsNdHC85tgA3js+2B7zZI5r7A1PF+HrS4\nvwyFUw6M87PG3+IMYO3SMtdkPxA5iUDO0bmAfrX5ciFy1jwFrF5a3pBrJuDMeN8bGByyHh/ndkZO\ngluBtUrLGzJNjMIpd47jPmght14cLwGs2c3vFX/ehBwToQXaO8B8cW7KuA9fjjlyW1PmNlrgPxL6\nY05gf2C3+Gw+4EXi+aOwaQ0AACAASURBVBlzaI4GyNwP6BvvV0A7mNcAq9WuWTaeMefU5k7ROYJ2\nJiu9/T1knBzUcc2ZwEso9Hnp0mMdMi1P15ppMuAhFFK+S5zbHlXHbYweqf+9kZH3OQq/Ba2xBgN3\nxVifCyxfWt6QrReKCNy3du584NDacSPmxRjk742M1neBhWpzZtvQf6egVIRG6L8mv7KI038Jd3cz\nuxI9nKt8o4+BOcxsInd/z8yeqy6vfqeMtF24WvqcCbwVnh5DCmvu+PzvdBV28NrvFZM9/u9PTEWl\nJjZTLhXaRVm/dukr0JVrVUDU0Yg5sTgwnSnndXaUW7wdsBMyVH6NHn4HxmthM3vEtZNYmmvQbtqP\ngCfcfV8zOwot8Fd19+PNrIeHF7/0uHvXbsKFwDrufll4yo9FoUWXuHbngfLyxu7UPMCKpsJSC6Pd\ntQXRInRSZLi+iPKPN0Dz43FX6FRpXkeh2CCnzOfAx2b2oLuva2YrIKfNVe5+e+nxDnoA85rZYu7+\nO+ANM9sGhZxv5u4nAkRI9NsNkflTtBi+y1Sz4CMzuxsZsrj7vdWF1lXcpKjONoXyuysCZi6U2/8e\n0tePuPvfzews5MBZCzjf3a+u65OCLApc51F4x8zuB/Y3szPd/REzW8Dd3whZPwGeKCls6JFFgeXN\n7AmkR76FitetZ2aTufu5yIkwEv095jKz37vqGJSSu2/I/awpL3oyVDNkPTP7NnC9u7/t7lvHc2cx\nFKFSXHeje3IHM3sKyfwgypHe3MzmcPddQs5B7v5WaXlNqVTzuYpFroj04A+Bw2O9cR/ShcPROmQG\nV/pH8bGONethIUulH65Gz8mKu+PzUfqvJKH/ermivxZAnTLeAzZBhQ5ft64iqesBl4f+Kz2vG00a\nsP8hpmT8pVGe4jUxQXH14HsGeCOM16WBxUwVW4sXQDIVc5gK+NyVq1bhZvYIKpCAKZdgEGo9Unoh\nQYzjUOAxdOO/WP+YyL2MUNb5zWyPpiiAmBNvol3K+YAfuPurESZ1IDA8wkq2Qsr4YJQX+6sS8pry\nMxZH4cB/jp9bIA9ilT+6p5ldBywE3F+fIyXGPcJYV0Ge/F+6+/so+mEeM1sU2BI97IaY2drufnVJ\neeu4cpBeQmO5MnCguz9tZn9GYVHzm/LPh6HdkxfRrtU/ConcycuoJdH8aAFxBJrHe0Zo6G31i0uN\nd8zrhYDHYnxPAc435b8+ghYWNwD1HpnvQHEjcFJU+dvc/ZYO4+6vRNhqhLu+4+4vNWHxFiyAitec\ngRwvB6L5e5mZ/dzVimtqZPy9gfJ2/+ju9xSTOHD3i8zsXhjVVeBh4G0UCg8yXqABoawwSo88goy7\n3YHN3P1uM5sGzZHlQhfOg/Th39EztXR6TeW02xdFmXzH3X9jZq8io/C9CGddNYzYa9D82aDE2qSm\nRx5397vMbFsUifS0u28Z1zyEquFOGM+it6H8swatlUaEo3R6YFd3v9fMXgduMLOFkUNsI9QO6AIz\nO9Xdty+ot6dBu8IfuPszHcbdO8AMcd2iwPRmdmWD9N/cwJFmdjYKz94fFWC8zMxOcfcdUTXzl1Bk\n1c5m9ljnMzMZncyB/Q+IhcKFKNl6OHC2jV7N8j3gNTNbG/WbfLYhxuucKJTyu8BPTTmAdXoDn5ny\nfH4FfNYAhYupV+cZyCDZkq4+X9U8fgX4g5ltiL7baU2Qu467/xHtsN4N9Dezud39E3c/AC2MeqLw\ntLfc/bfARq4iCuOVmNvXI2/gT4BN3f1xlCPTF1jU1LdsbhRiV7zNT+1+nBp5528zs77u/gdkXF0L\nnOPum6Kc71eKCTsG3P1vKJLgSuAbZrZ0zI/TkFH1OFpsvOzujwF7uPvrJWQ1FRHaxcy+b4oyuQo4\nBoU7v+Lun7v7A6jdyKASMnZiyh29Du38XRLjexpq4XKJmS3kykF6C83xiZrgBa/JfULIOUsYKlVh\nkv5Az1h4XoPyYBuDu9+FKsKfA5ztalf1NgqtXMrMrkZRBa+jZ9MZaDFXBOsqkLU7gLu/ED8/RYvl\n/igKaFngEDObpPQc6eAd5NQYCawRu65/RWN7IZHf7e4vuvtNwI/d/Y1y4o4qCngjKgr4EPCSqfDR\nlShqZiNUWOiWuH4tFKJbwnit65GLQ49U+a/9ravI3swod3TCkLkRc8QV0XUc6mH8eBivPdz9JNR2\n8NfofvyDu7+KnE7FuiHEeN+MWsY9bGYruLvX1n6OdoznRTrmtSZsuFTEjvYzKPT5gtB/HyD9N6eZ\n3YQi8F5FhfdOjuuTseENiGNu4wuF890MbB7H0wBnAbPGcS8UAvM62r36ZpwvnWMyFaoo9+04XgIZ\nqZMih0YPlNP4CvAH5O1swnjPhMJylovjZdEu7JDaNf3QwvMRYK7SMnfIb/W/PTKwfo6KmAxGRtfi\nKMyE+DuM9jvjUdYpYo5sGsfrI4N7UBwvjYoOnINyetZtwPj2R7mL29bOnQUsU/sO69U+m6C0zF8y\nP6ZDOygno92IaZGTrMoP61FSl6CwwwdQcZjzUA5mH5RCcQQqIrRozOmHaEAud4zpE8CWcXw4ytuu\ncgW3CJ1yHNod/EIeaSG5h4RcW4Se2B+FEPes6YvN4l68qyly1+TvGT+3Robpc8C0tc8HoJ3CFWvn\nehSUdw4U4rx36MFTOz4fiBb3myOn42ol5OxG7iqfsdIRPdEa5OfIaUDMn+U6/zZNeYUO+Qaqy/Gz\nSm+EfpwSmDyOexeU8cv0yNbIOXMADcr37+Z7zIgq3T8IHNbx2ZR0rWVLr1mnjvtsqzjeHK1NB9fm\n/JzI4Lu/wfpvU+SAfKka2zjfDxV9Xb52rpj+a9OruABtfcWDYZv6QwN55L7dcd3ZTbqhUNjQrrXj\naWLRM3vt3MRoF6gxcodca6AFcvWAPp/awjg+u7T+XQrL25cozFQ7N0Ht/VLxkD4bOToaUXggFOoO\nwES1c5fX5Yv53g+YLo5LP+T6o93iXrHYMeSY2a3julFFQkq/4j6bvONcn9r4LoQcBdehsvrLl5a5\nNtadzoKzO+bH5jH+VwDDS8scMvWt6zS0CL0OhZTvEefmRg68qnBd8cJHwMbA7rXjYcDFHdesgcKL\nG+FwrI9d5/2GIjpejDk+NwoVpbtrC8g8pgJZw6vvFDrkXuSYHtaAcZ6QMJ5q5yo90g+lq/wcuAM5\nQhrxnOmQt1rk946fg2KeHIkceY/QkKKMY9Aj16LIhz1QBNvaKBVoaDVvSstdk3eUgyN+ToeckQeg\n9Js7CWd1E17IWbptNY4x/lcCA2rXzIhSaRqzZu3Uf7XjA1DY/oDQf7t2/k6+vtorc2D/TVxJ1xe7\nu1eJ4pHv8BGMKvP9LPBdVw5s8TA0UAhr5DlUCfB/NbPnkWLAzKaJc2u6+9+aIHdNhhtcYXNVWfHe\nKH+jytV9AjkQmhCmPR8Ke3rPzH6PQkf+6u4fm9nMyBO3GWqjMyNwuitkuCgx1h+YCpR8bGZVs/ve\nyCtb5f584gp3fQnKh0a5+7tmdqOrwIPFfXkfCtOp/h5PusLUihN5oiehMMT7kBF1j6sgz8zx2XfR\n/LgVLeyK5wPWOBe1XqjuxZ4of+23AO5+rpldgnLsPy6tR0LXfYgWmlX/6NvdfUcz+yawk5nd6+6j\ntawqPa+Da9AOfMWDwGQRHv+hqaf0b4CFQ7+XHuvJiQJewLuhs/vE3J7Y3feOafMYaie2b/W7Xj7s\n7xNkNP3WRi+QNap3I0qveQi4zdXXsxih1w4BJjCzi4A/ufv9NT2yD1DVWFgHeKkhz5m5UKqBoyrg\nrwC4+ych93IoZHU7lIt+oBcsMFXxVfQIcJ/XaitA8dz5b6A0txdR3ZB3a7pjOmTAro6q3y4GHO1R\ny6UhPMnoNQg+NLP+aO33JzMb7O4vmHoav9YA/Tclcs59ALzVof8GuvshZvY5chp8gKLwgMY8b1pD\n5sD+B3hXZdj6Q7eHmS2CdgLn8eg72qSJGYZHfbEwCdDXVEDoCjOboXZNcbkrGWryThA/3wT+Hrmx\nR6GWHU0wXgcg4/V0VHxiLhRitGh8djjwW3d/090fd/cb3f2OchJ3URvrj+NUPb/4lch5PZOG9IOr\n4yqSUZ+zfYDepqIO1yADqzimapuHo7zL9dEibl1gfVN1yP2AO939uZgjv2+S8eru7wI3hmOjGu/7\nUCE7zGz+eGB/WM2j0nqk0zCKe27HeD8SecS/UUK2L8Pd3/fRq0z3RCG4H5rZcsCpwMeu/PqihH64\nHum+I0zF9IjF20zAhZEvuDeKYNrY3a8tJnAH4eC6390/8q4CMPUCWfPEgnknd7+45sAZ78Qi/hwU\nznwcCjXfxtRrFOQ0/bO7P+Pu/3T381xFkYr2lQwH6BWo1/UUwLUxjzGzwShyY9rQ58ehKJorS8sN\nX1mPzFxCtu4wFTW8GlWx3wS42cymCN0xBOVDz+PqNvFtYAt3v7wJYw2jHOqfuftLcdwr8l4HAB+Y\n2VJo/kzu7q8VFZZR+u86VPPmcDP7LozSfzMj/be8ux+GKoNv5O7XNGW820buwP4bhBfu82rntbY4\newcVGZgI2MdVPKZx2BdbEzyP4vOHAj9qgqezk1BaHmNdVX18GnmXPwZ2qry4DaAvEWbm7s+GEhuB\njJRLgSM9Kj+X9haOiWqO1AzZ51Ffu+lQwY8mtG0ZDftiyfy3kUd8azQ/7u7+N8c7fZFx/Yyr/cbh\n6P5bHFU2/am7PwnNnR9jcBZ8Gs6CS9FiqCnjPYpudB9mthjSfWeWkWp0as+XKvqBaoET4/028Lsw\nVA4DDq7dp8WcBbETfBzKc70MOYwOjMXlkWhRd5vHDmCD7keg614LB019rgxEi+WFkbG4EaqUXNox\nMwHaUT0PwMyeROHl68V82dndn4jPRs37BuiThYFH3f0IADPbCdgrnvGfoR7MF8AoWeuOsmLUny9t\n0CPBcODX7v49ADM7ArgvZF0ItWs5DcC1s/xhvC+5gzkhcsh9WslRW49U+vButJ5aDz0vRxmvBfXf\nBGgj5UzkhFkY+KGZTenuB6ECh3d6tCNyFTgk3pe+J9uJNyCOuekv1K9zK2o5X7XPpkc9JkF5X28C\nK5eWeSzfpSr6MTWRU4JuujeAVUrLF/KsiIo4rAcsGOeqfI0FkTECCp36FJiltMwhT4/a+/2BPekq\nOlF5lg+sXVM83wGFJg5D3vuJOubIFMD68f5IFGmwfGnZYyyn7m7s437cMt6vhHJHi+ephTxTETnR\nwM4ojHzGOB6IQuZ+0qT50c136Fn/WTu/HQpt/R0NKFqCWipsgPL/+nfIPhhYBhkAK6BUj0bkTqHq\nwQ8CU8Rxr5rcA2vXPYJ2Bldv0lwJPbFA7XhBtDu/HpGXGeebkoc+WV3eumw1PbgpzS2QdQFwYu14\nCCo+tWPtXFPmRpWXO0PIvVDtsx2Rw2vq2rnicgOzIAdRddyzDXqkJu86yGFeP3dEjPWEtXPFxzrk\nGIDqbaxLVz50fbyrwlIXo3S9VRom/xHAkrXjOUJvbF7pkzjfCP3X9leGEH8JEfZ0IwoLGW5m15ry\nW6tcn6tRlVDQwmNdV4++0mE6vWrvp4lwEVw5gpOhgg5VqNFDwLfc/aYGyL0iCj97D1Ux/bmZre/K\nMV4WKbcn4/LDgW+4e/Fy4xGqc66pvy6oGt43gBVMLQzeQLleq0SYFB6arBSRh3QdyrWs2p9Uc2SG\n+KxPXH4ssJJ3eQ9LeTnnAe4Bdo0xJ+T5POb1NWhRClpQrOXuNzdgXs+FdqVWjlN3AP8ENjazGd39\nbVRoZaXavVrcK2tms5jZJrVQ0M/MbIL4Ob2ZbRmXPocKwu3v7teXkhdGtVy4Gi3efozy6SrZZ0TV\n4ydx7Vq+icLmGhHG6tpJuAu408wmde1CfGZm0wL3m9lQU6u2P6CqnNfH75XcMZnKzCaOw4+JHtEA\n7v4Q0ntroUr31U5n6VxXTC2I9gB2MKX9AKN0yTSo6jOoOMxSwKGl54mZzWZmm5nZd+LUISiPfm8A\nd/8zevZsZkpXaYoemR3YJ8b1H8hhvpIpXxB3PwXp9Xo+dOnno6GoowPM7LiQ6bO4H2egoXrEzAbV\n7scHgNUq/Q3g7vsAjwKrxvWNifJx93fQGm9bYMUqhDj03z2o6BHoOf9NVwuo0vqvHsn6D+DU0NG4\nIiBGoOf+VHF9I/Tf14E0YL+clYFb3P1AV9+xJ4GDYkG6PHCMux8F4O5/cvffxPvSRTS2NbPepsIC\n1wNXmtkxZrYkqjB7uHeFjpzn7iMbosiGAGe4cgQORbvDB5vZWughsZu73xohJe979OdrAPOiaqFb\nmNlUoVjvRbvJG8fCeRK0o1JcecVC4ldoHgxHD4RNrauv5HLAJe5+PoC7v+TRVLuUMRghinui4i8f\nAhua2Sy1SxZCIfA/Ay3m3P3+eF/yfhyMPManeRT3cPWBq4ztXc1sARSe3QMZAcUx9Yu+GO2ibWrq\n1YmrKNPkNNBZYCpKcj7wM3ffHDkVZzGzPhGatiyq4HsNgLs/7A0oagOjLYQOQWHCd4RTBmSIH+/u\nd7jy/L9Teqxh1By5F4VO4u77AS9UcyV4EO2sVIu60s+Yem7dD1AO+nqmHpKY2SSoEmtVa+ERVCDr\nxsJzewgq1jUz8J0wqqZCPTunNrOj49LXUTGqvgXE/AKho28GXnb3v7pCtE9E7di2NuUNggrANaLI\nHoyapxejqrGDzOzc2sfL0EA9EnP4ctSb9kCknzcAjqobsWiOTAXNuB9hlEMJdL8NQJXt1zHVjFgP\nONbVExjgPI+6IYXvyTmBn5jZ4WY2t7v/BG143RjrFdAGUX9GLwSX/BewHMuxY8p7GYF2Fp6Ocz9F\nfUk3rjwp1k1ORClMlfG+haryrogKCb2GKhK+iW7+F+LazrzBopjZZihsdd3auTVQn8Cd3P3phhja\noxEPjiOQ8fEaCuH63Mw2QK2L1kALpZ+5+0XlJBVhgGyBHgpVXslNwF7u/kjHtY2Y2/Ggmh31GF0M\n7bC9Dlzq7k91XNsImWFUFMfPqzltZt9D7S0uRSGjS6C/xT+Bk9z9wlKyVpjZILR4u8Ddzwzj6iK0\nC/WQKf9yYne/vKigHZgK26zo7lfF8UNI572ICpGd5O4vxmeNmSMV4ag7AFUE3QjN9wXQs/qtuKYn\nqu5ceodqMIrSON3df2ld1fgnQAbKfKhwzAzA8ahgyWPlJP4iZjYUpXvMjgzxI1Ee5kzdze2Szx5T\ntMOi7r5LjPFByCkwEunBvVHaxzQoVeXiEnJ2YmYjkK440JTnOiNy0IAcMxMjg3s51M7qyu7/pfGP\nKSJsR+D7KEqpYjtX1Exj9Eg46G5DFewfRhFsa6NWfX9DUSkXo7H/DsqRvqWMtN0T9+NJwPaoMvIK\nyKF3n7v/I65pxJrVzKZG7SYPQWu8z1FkwSnA/6Ed7k1Q94bjUJTjo2Wk/XqSO7BfzqsonHUJMxsI\n4O4jkDdl7+qihiiwwbHwvAlVphyC8gbed/fX0cN5dbTwBxQSU0DU0TCzFcPoxt1/BQw2s3ohhLtQ\nY/mZ45rGGK9m1iMMq7+iEJetkCI718xGooqWP0TVZtd394sasGtirlDFk13hwtVOQ2+6qm1OHcZA\nI+Y2jPq7P+vuH0Skw1Vofm9kZhOawl37xLWNkDl4GcDMljSzS1Fe1bRokY+7H4Meduu6+4Wl50fQ\nC7W3ODPkqfpfzgLg7je7qlX2iEVpcWJh827NeB2OigathBYVU6KG901adPaKn73j1DA07me4+6po\nl/tOouBiyP1ZQ3Tgh6gtxy/DqD7IzPZFeYDboV23vVE/zwMaaLzOhu7BXVCI8Jsoiuazynit7QoB\nxZ89TwHzm9lcrrDVg5HDdA13f8jdv4UqO6/khSskd/AyUaEczYnD0E7yVq4KvtXxpt6QasOVDO5+\nK7FzjIztVYEhNeO1SeGgn6EIwWtdBUQvQevAPZGTYGWU6jEIRbIVN17juT2wdmoB4C53v9vd90cF\nkU5F4cQTQDPWrMGkqEDWKe6+C0oRmhC1zhyBHNQ7Az9CG2BpvP6XacTCo6mYWe/w1p+ICjmsFbsp\noDLwHxYTrgNTjsktyNN9NSqmcilSWBuZ2fSuUunnAAOa8JAAiJ2co4F3a6fXAKYys3MAqocFsAgN\nwcwGmFk/ojKyK8d1NvSgOBY96AYhBwju/pdq56fkIshU2bTqXVwtKqrWQ6+gtkTzo4qhg4sIOQZi\nkT9q7MKIvQ7psZPRw7sprXIGmFm/GO+PUdjz+sCr7v49V1XIW1F4fC93fzXmUOlFMiHD6ygUClRw\n4hMUCvUJjMozxVUZsvgCLsbws7rB4e5Xuvse8f5eFJ44dRw3QeYhKK91Flf/yx6okv3A+NxQGsXE\nwN1h7BafGzBKtgmBJc1sbVSQbCDKmV/BzHZ19wPQAm6Yu19R+pljZkNMLZ6qdU9PZLS+7u4vox3N\npYFDTFVaiy+WzWxyM5vMVEfhHlQQa2lTv/aP0eJ44djlrNImXo73jZgr6Nk+1BRddbu7bwKsCexm\nZmu5WsmdFd+v+PMx3vaI4x6oFdueqCf30cDfzOxsaMYY14ztj5COOyuO30K1Fi5CzvO/hGNsPy8c\nDg+jwm9vBM6zrvD3J4BeZjZtOAdOA/6E9EjTWve9ge7F9WG09cgAM1vc3X+E2ieu2gT993UkDdga\nZjalKVeReChfbmZXoRvnYGSUHGRqe3Ew8ogWx8ymQkrqBBTu8jdgQHjYjkE7lz83FX74P7Qr2ATF\nuyrKc9jI3X8bC/4BrhyZDYBpzOxKMzseGbWXlpS3wpT/fCHyuO0Zihi6qs2dgcJ2nkG5JxMWEbRG\n7KgOjh3XVYGTzWxXMxtamwsvoR2T01Ee8p9LyQvd3o+XApea2UbVNeEh/xiFSg33BrTm6Jgfe5jZ\nFGhOzIQWm0vFpbcip0HPbv+hgoSzoHIqVfOjPzChmS0O3GRmCxURrsZY5vUKHdctiPLWnuz2HyrD\nwsD8qDfjnGFU34BaimwW9+XMwHmosvanpfW2mU1q6lU8yBXFcTjSeZO6+26uaJM7UEgd7v5JLKRL\nGyY9US2Ig4FlwlD5C3IsrWTqjfkq0n39iTY5JQmn9A0oRPFqUz70r1G7rdXNbPYwYk+jIXnzoIKG\nZraTmX0fwJX3/1fkXK8cui8guRuxmzYGPbIbqqtwJVpXXexqh7It+i7FiTmyr0URL9Q27o1YM1XO\n/98Di1hXPj3xWcn7cUa09jsNGacLhF75HXLYbQEsZ2ZLoLm9X+XgLYmZTWdmS5vZPO7+N5R+MLz2\nvLkTORG2BBXEjPVsI5wdXzeyD+zo7AdMZma/QGEXh6Nk8jNRmNH2KF9tcWAzj6bgDZiYfYB73P30\neFAvh7zI0wKboYXokSifYDuP5PcGMAEK63s/PJ3nAX3N7BmUm7lyGC79gBM8emOWxFSQ4kK0qHgd\n5anNCzyOig+MRH1SD48d2jlqO51FiMXa9sB8ZvZLtIg7F4VTzmVmM7v7WWjHdUuUQ3h7A+Z2d/fj\nxKhoQh93PzecA6uinONRDcFLyd3N/NgYjeeFscgYgYpPfQv4JvCD8JwXxVQNtK+7vxD33LaAm9n5\n3pVL9yqaHwNRPvqDhcQFvtK8ni7myDCUV/X92IltCrcgD31P4LdmNoe732XKA7vIzJZDc3v7aneq\nJKZd9zORrpvG1N/6GuSY2cfM1nGFb78KDDGzSYF/NOD5WFWg/h3ScavHuTvM7B5U2GtZM/sjmk97\nuPuz5aQdtTt/GapMfg6K6pnS3W8ws7eRXlnfzO5FBsCWY/inxisxRy5F9+EGZjafu2/j7t8J1byN\nmd2P8nQ3RTmbRfkSPTIrcjJu6KqoXVUKf20M/9x4Ixzn5yNDcHMzmzXG+VBk1F6BxngipLMnRs+k\nJjAN8JC7nwNgCiPeG0VEjECdEbZD439IQ/Tf7Oie/A2wk6kg6o2oUNZWZjbQFQI/EhXznLD02u9r\njzegl09TXmhH+hcoT+OE2vmhSIktVEq2Mcg7AOXiTo0qPR6FckV/SVfhjEdQbuOayJgqLnfHdxiO\nPGzPo+rIs6GE95NLyzYGeXcADqsdb4o8+/3ieNr42XN8y/Ylcs+Kws3uIXoEIufBRihEvjfyNq9W\nWtaazGO6H5eJ+3HROK76fBbvBTeG+XEDMg5BD7s50a7V4g2S+zi0GFoO7aCtBmyIUhC2iGs2QQuM\nRvSLDpnGNq+PQ3lKcwBDS8vaIXdPVHDn1tDjO6AWDK+hBecsocPnLC1ryDsE7VZuEc+bA4Cj4rOp\n0GLzIRTy/DzKyywud8d32ADtUP0IRcisjlqHrYKK9JyKwv2aIOu6KDKp0oPPouf6hah1HMgBtiew\nXBwX1SPIQLoBFQcCOdZ/B2xQu2ZEzJ3raEC/6JpcY9MjR6OINmhI/87QEbchRyKhK05Fvc8nQulL\nv0QViR+t/w2a8EKbQNehHczfoY4Ii6G2P4fFNUb0BW7A3J4GhTJvG8d7hy6cMF7fQtE9v0AOvHVK\nj/H/wqu4AE150dUsuUcsfH6DKhNWzbePAZYpLWdN3nlCxoXieG6Uv3NGpWzj/AXANKXlrckzObGY\nRGHBEyEjtt4sfDBwOzBVaXm7kb8HXUZqL1Rp85pKwdJlyBY3SrqRfRbkWX4MmCzOTRbzaM7adVZa\n/q9wPx4NLFt6TP+N+TFBaRnHIvfYnAULokiDJas5Ulrmmoxjm9eNc9qFfL3j5whghnj/MsqBnby0\nfN3IuzGwe+14GKr+3fl3WA61nCkuc02uHvFzPdT2DpRu8wmjO5t6xc/ic7suC4pEOQEtondDKULT\nlpaxG5n7oK4HfWr6+xhUfbXz2omaMtY1mcamR2YvLV838s5czRW0cXE1Kmx4fKX3UFHGyZs21iHP\nCshBcBMwYZwbjKqBT11avg5ZZ0B9Z0HOx+fRbuwDqBtJdc1QYMEmjvfX8ZU5sIFHARBXHtL3Uf7i\nCNQfrrrRPhnbE/Qm4AAAIABJREFUvzG+MPU6PBO1w3kwQj3/hLyHPVA+IJFrNx8KwW0KBuxuatmy\nBzL4rnTlllSsFNc1LvzCVbSmKpLxKVp09nZ3N7NlgO+b2UQeGqxJuPszqFH8jcCJZjY92v2ZjNGL\nI3lp+b/C/bgxDcr7qvgK8+P/LKo7N4XaOO+EimjMa2azR5j2XSi/fmJ3f9QbUGilky+Z143DzOZB\nIXKgCvcnmdnDKHTxAOApM+tfhcM3hGuAa2vHDwKTRpoEES73jLv/xt0fKCLhmJkVwFVZ+P1IrVkR\n7WD1M7NlI4Xl87iu+NwO3VHJcr677+LqoXos2rlqRI/XOq5UiPvd/SPvKn71V7oq288daxeIZ3sT\nxrriS/RIk+7Fiufj5wAUDbE22gwYgEKicfd/uEKeGzPWZjanmV2J8kWvRg6Dal4MiZ+NWGtXuPtf\nUP45KDrpYndfH82Xw81sGVeRrDu8K9S8EeP9dSYN2Bodi+btkJI9GCmFLd39vqICdjEpal9wqqlt\nyHaRtzYHynU9xcxORUbuCC+cz1PHVSjjbhQu8pCr2mlV7Q8z2xYt4nZ293eKCVrDOtqEdCws+wLv\nmFp2/AL9Xd4bn/L9K7j7S8hD+yHy2p6Aer8+UVSwbmjL/fhvzI96xe3ifEXnXeOcBXXaNK9RzmVV\nAOtCNEfOcvdTw0BZ1NUOqDELIHd/36MPetAT7QJ+EDm7p5iK8DVmoW9mPcPAvtBUfbg/eu48Cpzo\n7sPQvP6HF66mXXv+9eo85+7P1z5fEhX/alTht1rtgXfjuNKJA1FV2YWRsTJ5XNeYuV2nDXqks86D\nu7/p7mfValZcDnxiHS2gGsT7KMLgU3f/EDnxzjOzo1A+74+rdWGTqOmHX7v73nHuZjTekxQT7H+Z\nf3frts0vlO9XhXr27ebzevjiqTQvJGpR4Jp4fw7KXTsO5Z8sjBqFL0VDQhk6/38UCroC8oAfVDs/\nGyo6NXfpMQ55JkXhUH1r56rQv/4o/Hki5EF8ggbkBhKhZ/G+z1iumxH131usATK38n5s4/z4ku9T\nH+fjUZX1Y4nQqcKytW5e12SqwsdnQgv6bwCnVWOOivNU73vQgDw7usJu6+Nute/SF4VbDgPuR71f\ni491x3eo0g2uI1IkkAG7RXfzqqCck6Md7SkqmWr3YpV72RMV9PoTDckvRjuTC4xt3qAaALeiCv3F\n50hb9cgYxrqaI1Y7txyqe9KIXO66fCgFZQNg+pgTs9auqWouLNv5nQrJPGGnbuhOL6N19mNEPY58\njd9XNbH+ZwgP50rAP1G4wlKo6uBHHdf1dO1MjKrE2oCqrKMwswtQTsx97j4ivMuboMXQj8pK1z2x\nuzof8oDfioo+nIJ66v4JFdP4gTdgB9PM5kZFEF5E7WVudveb4rMZ0U73ke5+v5ldAlzo7pcVE5hR\nrU82QsVUegHrAD9xtVro7vqJ3P29To/u+KSt92Mb5weMmiO9XDtnfV0e8Prn1Tj3QPfmL7xwSGgb\n53XI0Q/43N0/Cv18I9p96Aksj8L83kUG4GvekJ15M5sc5aWt6u6vxj3qMS8GevTlNrNH0ML6O+5+\nfcOejwugfNFr0DjfiFqDvVO7pqcX7vNaYWbHoeffku7+ZpybFtWC2AoZuLujEN2RhcQcRezuHYJ2\nnk5399/XPpsGzZ0zzGx1tEM13N1v7P5fGz+0WI986Vij3e0hqNr6Ie5+TQFRRyP0SE/gU3d/3cx+\nCCyCCgFuhtoTPY7+Hg/76BEexTCzAaiX7rnAta4e3dVzcTD6O7yCnGG/QLUBrism8P8ypS3oEi9g\nZZQv+jxjqRZGlxdxAmBgYZlHeb7j52TAJcDTtWu2QTdecQ9+N/Jvi7ywS6H+e9+P83MgY/ZemrPz\nOlHI852QbzO0gKiS9X+PQrOr67/gCS0o+9KoyMcLwCxjkosuT3nxudK2+7Gt8wMt2laJObIpqj79\nhV2I7uRtgOytmteoqN4lqEJ5VcW5qpa9OFq47YMWcefSvAr3x6GqmpPWzk2LduWHoqiJc4BhpWUN\n2YagvPiqCuvc8cw5IO7N91GBslPQAr+4zCF3pdMmR9VY/0hXAaGzgV1r1zaisn2HXjgJ+DEwbxxP\ngiomV5V8pyOe66V1SMjQNj3yVcZ6hzieDJipCWMNzIWq2V8Xz5nFap8ZKu71feAgGhLl0yH/Zijn\nfxW61t6V/lszjlcmihrmq9DfqbQA4/XLjq4MzkSG02p0U/WRrkXcIBSiW7zqX8h6LHBgHE8bC6Dr\n0e7r401ZUNTHHIVj/BSYGS2cR6LF9KjKrMAkpWXtkPk4YLqOsX8Y9QGuL+qKP5Q7ZJ8cVcZ7pnpo\n1Bc+8d2quT0Jqlo4calxrr1vzf3Y8vnRKmdBTZ42zevZgD8A3wbWR7uAwzquORkZXD2oVY0v/eJf\nM6gmqMa+sMxzoV3sc5ED92eMHta/IAoHnQs5UFcqPc4d8q8VY70N2il+HoWaD6pd0wjjtUPuocgp\n8AIq9LZIjPV6Y7i+uC5skx75T8a6sKwzoTDm9VAo9t7AvvU5APwA+HlpWbuRvfrbz4cM8L+hmht9\nUc/unetzpbS8/+uv4gKMty/adePMgWLwp4iH2QXApvHZTKjHXeV9G4SazS/fAPkXQwvkTUM5nFBb\nbOwAbN25SCooa3e5Anugnamba+d2J3rdNeFF7JDE+zOAyzo+/xYKGZmUBrSaqclVze0pQ66pUZXN\nR4h8KdQioG5YDYoH4gqFZW7N/djW+VEf73jfCmdBW+c10euydm5H4Lz4rMqR3h/Ys/S8GMN3+EoG\nVRPmNzJIbidataD6EOdQa8GGai480nGumOx0PberuXA80c8zjo8LeSvHQfEomW6+w2woFWg25Eg/\nGaVNzFefI6XlrP+t26RH2jrWIcs6wBG146WQ03QSup7ly6Ed5eI6pBv5hyLH3VKor/VvUZj2JLVr\nGjPe/8uv4gKM1y+rBdujwIEo8Xpi5CU/B+0Qvk6XV25gPBiL95oMxXUm0YcPhUjdFIqsUQ83Rl8o\nL0VXk/XFkfFRLTQ2RgZ5I/o0IkPqFqJJfJy7DTi1djwj8n4W35HqRv51Qv6zkMdzqhjjZ5G3805G\nDz0aWXput+l+bPP8oIXOgprsbZzXA1F9AkORJisBV3VcsxwNKepFiw0qZJTswujFeW4iogvoigC6\nlCiSVFjeISjnrwpf7YF2h/epyTsF8BwKVxwVqdQAueev6Yc5UX/USeN4CtRd4CoaUvioQ/7W6JG2\njjW1Hud0FabrjXq7Xk9X799+8R2Kr61DngmpPbOB7wGn1I6/g9Le1qehfdz/V1/FBRhvX1QetntR\nBchNkYdlYHy2FMqXWSmOewFHA8uUljvkGQpcjBonVw2qJ0Q5pb9syoKC0Y3XnePhcB1wZ5z7Ntq5\nuhWFZ8xbWuaQq3pAbBfH1Q7U4HiQXYYWpcsgo3uW0jJ3yD8v2jEZiJwaI4mwJ+CbaPdnlTjujQyX\nFQvL3Jr7se3zI2RtjbOgJnNr5jVyMu4AfLebz6YHLo33S6MFUbU4LR1+O4R2GlSD6VrQVw6aygA/\nia48tZniO8xYWuaQZ33Ub/Y5uiojLwO8AWwWx0ugoj2NyK9Du+2PoXSloTEHJop7ckO6qidvi57t\n3ygpbzfyt0mPtHKskXP0ZOAIajvD8VmvkLUPaiH2S2rRTIXlnhOtRa8Fjo5zw4DT0G53pVtuiO8w\nuLTM+ar9/UoLMM6/YNcEnAolja8fyqx6YA8jPEMd1xe7wWoyzI3Cnwai/NFjkPdwtvh8QhrkgavJ\nvxTwq9oC40rg9trnQ2hIzmuM4RvAyXHcG+2OrFq75vxQun8E1i0tczffYUXUUHttFKozc5yvnB11\nz2jPkmPftvvxazI/WuMs6JC7FfMaLYIeAvYC/ox6jHaO/3koDK1R7ZRop0E1J3KwXAL8iAhxp8sp\ncGTokQXRTtVcpWWuyT4QOXe/h6qxVgbJ3HFfVpXNVysta4fcZ6Fc7iOAoXFuC7Q7fxyq9Pz76rMm\nvdqiR9o61nE/PoAceGcAF3V8PjHqd70jcvCuXlrmkGtGtPbYIt7fhpwFg0LefVG1+CXQbncj9F++\nul5f2zY6VUl/M5vA3T82s0lQmMjUKO/r82gKfhjaWXm2/nuFZO7h0SzZzFaha7dyaZRI3hPYHHgH\nuMDdnywhZycdrU0GIgU8Bao0fH+cvwyYx91nLyboGDCzTVHhj63i9QHKpRoJHOrub5jZhMiI+nvp\ndhG1uV39HIIWPlMBa7n7n81sLfTA2NyjLUNJ2ng/VrRtflTUxnwqlJ/7InKAbeLuz5jZMOC3Hm2r\natf39wItXVo6rweiFhbnu/svzGwuYEvgCne/N9oRTYuiZd4F/s+j3VITCPk3Q8+WH6JF/avRJuoi\ntOBfFdje3W8oJmhgZrMj5+jRaDH8U5Ra81TtmgPRTs+kqM/4tSVk7STaoQxGi+PhKALiCOBT5NSd\nGvgYOfAeLyRmt5jZBijfeGq0yP812lF7HxkwcwGXe+FWOdBOPVKnZWPdGzmSfu3ux5vZxOj+vBI5\nTV9x97fN7DfIgbq1N6ANFECsN7Zy9x3i+EG0E/smav30XRQ9MysNaU2UdFDagh6XLxQ2dy2qHjYE\nTcTngP2AnZDXfIyVOMezrJOjhSXoRj+FrqbOOyDP/SzAQqgScVNCR+phw6vF95gXVazcjdGbVZ9L\nlHkv/aKjfQiwBtqJOCmOJ0Ne0L1KyzoG+YchY6QKaz0A5a5tG589ih7WxWXtmB9tuR9bOz/o2rWu\nqsROgnZ33qBrl2pJOkLQ6vdyQdlbNa/RDn2VO9cjxvl6VADmZKLCcBwPLy1vh+w9kaPxVmBAPGf+\nAbyGFs6zADMQO7NNeKHw63p+7gMoDH4XusJAd0OL/RXjuPi8DjmqEOcRwAzx/mXkkP5CMbUmvGr6\nYj3gmHh/AvAJcFjtuiqPuilj3So90vKxHlTJFc/wS2Osj6GrxsKRNKhgZ8i0OEpx2x/txP4KFUt9\noBpvouhXk8Y7X12vr/MO7IKoZ9aN6EH8KXrQvYUedu+i3YeRpXdMolH2Jigf4x5gA7To3A+4xdVA\n+QAUZrmlmQ129zdKydsdZrYnql65vbs/aWaLoIXEQ8D17v5EUQFrxK7IBkjhPgts6e7fNbOFgMfc\n/cO4bhtUJOSIkvOjEzNbHCnb41EY7sXuvo+ZbQysDnwIXO3u15We2xUtux9bPT8AzGw1FKp4EzK0\ne8f705GHeXvgh+5+VTEhO2jjvIauyJnYHVzetRPbAxmt97n7fmY2wN3faZjcvd39EzMbAVzo7n8x\ns5eB/six8VphEcdIPDNPj8MzULjw/Kja/XSoxdXNTRlvM5sHhekfa2Y7I2fe9KhHZj+0+z0D8M8m\nyFthZrN7RHqZ2Y+RvCOBl5Cz5gpUpRWP6LHStFiPtG6sOzGz4e5+ZbzfE4Xvb1P7vDHjDWBmK6BN\nl21R+s/7ZjYYGbbruvsrRQVMxk5pC/q/+aJr52EGtPDcNo7nQN64o2lgzmhN/j1RCOWeaNG5P12e\n2nWAM0rLOAa5l0DGB2gXYkEU5jIlCiXZmfA+N+VFV2GSV+naQanvJg8F/gSsXFrWDrnnQQ/mLeJ4\nQHyPn9Su6dv5fQrJ2tr7sa3zI2RbEBWd2C3mytFxbiYUCv1DonF86TlSk7k183os38E6jr9NFENq\n2ivGe7d4vzOKjHgYOTZ2Qzux/Zs61iF3vQ/zkPgO03Vc0wj5UYTJ2fF+MKqIu3vt80YVfkO78/2Q\nA3r+mAu3IOfXznHNESgtqLi8Nblbp0faOtYd3+ELY4lyj3+FcmAbMdY12eZEa9NeqMfr0XTVllkY\nhT9PVlrOfI391YuvAWbWH/V4e9rMFkWhcvcAe5nZhe7+hJl9hnY5NzWz59z99ZIydxI5r2shZfYK\nCmlYEZjXzJ5AIYyHlpOwizF40fqY2W5okTwLsDLK3T0QeN3dPxnPYn4Zb6NqfzOgcX7U3T12TmZD\nlSz3cfdbCsqImfVBhT5eNLMp0JjOC3xqZiPd/ZXY3XzKzCZx9+2BjwC6+RuNL5lbfz/SkvlRUcv3\nmgGFcP3Y3U83szmQ82sLlDf/f/XfKzhHWjevv4y6XGa2NCoCsmc5icbKsigdBZSTuS5wlrufCmBm\n13mBPOh/BXd/qXbYHxmGE3RcU2p+V/fjTMgQuRktjEFRJ5u68uV7Ao6MrCbRy90/MLO/Ah+7+7tm\n9gOUI31OXLO/u39aUMavix5pxViPjc6xNLOVkbP0B+7+zzJSjZX30bh/iubKe8B5kau7KpK7aWuS\npIOvRQixmc2Ikq5vQRUUt0c5o8ejPLv1QinMBnzq7o16WITivRyF3z5mZrugHLt3gRXQYvpkd7+r\noJjAFwo2LQa8gEJyhgNrAse7+x1mthfwsrufX07a0aktKmZGOwy9UGjlxcCN7n6omc2Keji+4Cr2\nULKol6GcjAXRfFgBOTLWQEbJdcBtrqIrA4EF3f32ErLWaev92Lb5ETJ35yz4EbAIsLC7vxcyb4KK\n2hxS+sHc1nldx8x6uvtn3Zzvgwp8HYtCtBtR+KMbg2oyYF93/04YUZN1GFR4Q8MUOzGz4Siq4EB3\nv7qwLP2Az939o7g3b0SL5Z6ooun26Ll+P/BaE50EZrYA2oW/Bsl7I4r+eqd2Tbfzf3zyNdEjrRjr\nOmN75plZL9Qx4xeoLc3VpZ+RIVel/+ZFnT3uQ3VadnD3p+Oa1dBu8d/c/c4myJ2Mna+FAQtgZruj\nioSHuvvBsVPSJ84tAgxr4sMCwFSR9QZgRBh/vVH+wxDUo2oxZAD8tPTis8LMvodaMNyNdly3rjxt\nZrYFUba+Ug5NIRY7I4Dn0ULiGDRPzgEeQeGt3/WonlyayMc4EXkFD3H3n8X5LdAD+zbgZnf/W5xv\nhNJt6/3YwvnRVmdBq+Z17GSvjULKR7r7y2OSKYzEPhFpUFruVhpU1pVXPNbxi++3N/CAu19T2OE4\nNzKkJ0I5xedYVPQ25WWehRbNS6AxP9rdHywhax1Tld7FUS/oe9G6YwnkqFsX6by70C7xa+5+QAk5\nu6OFemQILRxrM5sGmMbdfx/HX2bEThqOA4OikRCTI133qbu/bmY/ROuPN1H19SuBx1H49sNNW68m\nY6fVBmzHbuCiaGIegbwqF8b5gciYutLd7y0m7JdgZnsg78/l7v5HU0jxLsDWyCN+IrCrFyqs0c3O\n609QaOUvUfjWt1BFzjni3Bbu/scSso4JM5sFNaheHY3tuqjf3luhoHdBRbNuLSgm8IXx3hA9oP8C\n3OPuN8f57ZHRsrc3oNhA2+/HNs2POm1yFrR0Xs+EdnOuQYuhjVC17Ie6W8iVXiTX5GirQTUrsCFw\npisE9MuM2FHt50oRDqKL0XPxY9Ru6/hqTsc1J9PVv3bi+i5bKUxtn85GTq/PUATHft5VqG5BVNDu\nPNQfs5+7/7qQuKNoqR5p61jPiVreXQ+c45E2MxYHXiN2jGO8T0G67c8oB/138ZmhvNc/o97MkwLX\nekNa/CRfjVYbsABmtixapI0Mw++bKAdsQ6TQdkK5ah8UFPNLMbPpUF+yxZA3fAOUwF8piypev4Rs\n9YfFd1G4jiEFvCZqD/GhqaLbfWj34R8lZB0b1tWj8SlUdW5zd3/WzBat76g1aAG6BFow/x0p2v2R\nk+MCFLY9DfCUu79QSsZO2nw/tml+tNlZ0LZ5bWbfBtZ0903jeFfUzmUbd3+gvmCr3sfYD/VCIcQt\nNqiGoOffg8AfUDuRMRqx1XMxdn0o8YyMxfBe6Ll3SJzbEeUZb4Zy7T4xs/2BD939qPEtY3fE7tQl\nwCnufmHokV1RJFi1Yzl7XDOsKbuYFW3SI20da1NKwV4oyu5RVOH7pjEZsTX9NwnqsXp0IblnAq5C\nDrwH0AZLD3c/vJLZlGc8mbvvUULG5D+nR2kB/hPMbCiKtZ8buNDMNgoPyoaoyMp5wK1NXCx34ipI\n8RPgKGQYbu/ut1hQyngN2aqF8nAUdnYN2qXa0d1XDeN1B5TL0bMpxmssLKqQFlBY6IzA7mjx+Wzs\ndJ9oZtNXv1f4gVHJvCxSwNugXnArofnxDvA9FLpNEx7OFW27H9s4P+oymNmysfv6gbufjJxevzCz\nYaaQ1x8BBzfBeG3zvEYhZu+b2dShi49HUSZXmtkM3Rivg1DLiyKtzmKshwOXufsF7n4Z2kHeMh4n\nvePSl4Hp3f3zJhivwWQo/P1A1Hd5DzObOua71S+M8f40FssXop2U8U7og18Ap8f49gKeRk4B964C\nhnciI6Ap9EDOxUsBwkk3JQpxrebRi8ihN2qHO5+P/xatG+v4/z9D7ap2RbK/A6wSjun62tA6nHeX\no6rmpZgPtW+8PObAncDaoSsqPfJboG+nXklahDegFPK/80KVQK9FXm5QkZJriGbJwFTAjNX9VVre\ntr+AaZGC/VUcb44WDYcj5fYg0W6kSS+0Q3wCCo8D5e2egXqSbokqza5ZWs4OmZcEDgKWiuP10E7J\nsDieHRWkKC5rTeZW3o9tnB8h59CQ7TTUI7Aa52HAM8jrvE5pOTtkbs28jvlcyTkpypUa0XHNEdU5\n5LgDhf6NBJYtLP9AtANlqBjZSsBVHdcsB6xSeqy7kX3i+LlUPF+OJNrjABN0M943AysUmiM7oJz4\nzs+mBy6N90ujHfsecVy6vdlglKM4ShaizR1yNK4Z72eK+TNj6TnRIX+b9Egrxzrm7/x0tHeK8weg\njZZ5kAG+QO3zSv8tU0juCWrvp6zGO/4O1wMTxbl+qFVeUT2dr//s1co2OqYcr/lROMMGwB3ufr6Z\nfQ7sZMqJubC63mPGJv8+rqIluwGnmJpVn2tmD6MH+DvAZu7+WFkpR8dU4e9QtAjazMxud/flzewv\nqFLhLKgX4sjSoTohb5XLtSNqdF+1aLke5UEfamoFcFHtdxohNy28H9s2PyoiPHQEioC4w8w2ATY3\nM9z9YjNbBoUzvtAEuds2ryOU7wo0L+529zfNbARwQcz1U939TRS2OANop8LMJkJFkvZy9zsLyD0b\nKlzT091PQtXrQW0ingI+ieuWRn0Qz/CvUChpXPP/7Z132FxV8cc/kwokAUKooQUIHSkC0otIpIYO\nAZHmDwQlSBFpinSVpoD0IgjSiXSUptJBiNJ7VQEREASJEgjf3x9zbnKz2YQEkj33vM7nee6T3bv3\nfd/Zk7Nn75yZ+Y61EcgCkHRvCo5sikePXwSWNrPDJI1O0ZTLcfGejo63eV3gJenY08yWlLRn7ZLe\nwIdmtj4uBLd3+gxkXf+S3WcAb5rZU3jk+FW8JhPgA2C0eT3macCuTfleL3AdKXKs0+dxBL4x2sPM\n/iJpXwB5u6IL8Y3fI/HP7ebAw2Y2Hf5/sp8ydMxIdu9tZv8CLpH0aLL5o3RuOnwtXANvK7dfjnU6\nmIrk9qAn92Dc7tWcwID0eCi+QAyvXbc9sEJue7vqgS9cjwJb57blU+z8Ap7md2Dt3LW4rH71vHtu\nO5Md1dyerXbuJDyyMH16Ph0eHVwpt70tNhf5eSxpfrTY3Q1PyX4YOKV2fltccXPb3Da2mSMlzesF\n8NS3Xav3ULN1Xjyr4EzgFDzdb+Pazy5NpiwU3CH9M16v9jJwWsvrg/EU/vVxEZlGRF7TeD/JuPKZ\nv9ASPcOVWq8A/gMMq/2/XEmeyOtMwB14vTnAEsn+ldPzbmmuvII7AU0Z60Xx+uKv4WUeNwKLtFzz\nQzyCNpKGZJ4Uuo6UOtbdcHG3g9PzuYGHcPGm+nVfx8WRNmo5v2AmuxdP47g7nkF1ecvrffGMwT3S\n+r5h7rGO4/MfRYk4mdkmuCjJm8DrePRkZbwu80UlCfVg2mLeL+tsYB95fVUjqHZbzXsxLoCn+L2P\nq7M+k665GW98vpw1Q72ysnl9YH/gPuDfko41s0txhedh8n6e2aNpdUr7PJY4P5JNld1zAh9JetvM\nhuJ16E9IOjVdtz3wjFKrg5yUOq9TNPsQYD18Xp+K2/oaXgP2CC6YtSTwpKQ7au81y/tINWfX4VGH\ns2ycGNnVku5PUeO58fYc7wPflXRzp+1sh32KQFY6txpwO7ClpBtr4z2DpFEZbJ4BWEjSY2lsH8Ud\n7x7AC/im2HtmdgtwuqRrOm1jO8xsVzx99Yz0fCTwBPBH4DlJN6csqx/j/ye/y/3ZLHgdKW6sK9Jn\ncJSk89Lz7rg45/2Shqdzx+Otqy4zT5OwXN+V5jX9VwK3S/q5mfUFfoWXfdwPvC7pX2Z2B7AQ3vIx\n1Ia7AI12YG18dcdl8F3vLfAIxF7AsviO0WZ47deRak7RfpfGvIj/BTWnr2T1RfdVvP/scDNbHheW\n+jMukV41rF6+ujlqAuaKhL/Ao2h7AYMkrZ9eG4Hv+H81tzNV8uex5PkB5W0WQDnzuhUz2xOfwzPg\nQh/X4XN7WVxBuxG9uCtKdahg7DoyHI9G/T19RofjvV1XlacsLgvMJ+m6dLMMZE/FrfrULgqsnTYO\nuuFRwQckfd/MZkzj3gjHpCKN4Xnp6S+A5fASkP3wMpB5JN3SFLtLXUegvLEGMLNt8UyODST9I50b\ngEdmj5D0UH3TDvKXBZnZzPJ2dz3wyPcL+PfkGOBBSRcnp/tBSVfktDWYimgqhnOn5gHMhgupVAXv\nS+PpAf+H76osmM4vmf6dI7fNcWSfM1/F0/rWqp1bBF94D6ElhSejnfNREzkA1sVTjVbFd2gHpfPz\np3+XaYDNxX8eS5kfya7utcfL4CmLA/AUqCdwYYrewDD8Bmn+Bthc3LxOdvQFFkuPV8LTEb8FHFu7\nZi681m7e3PZO5D1UAkGLMi61tRtek3ZMej5j+rdJImqTFMiq24qnDTfG9nZjCWyHb3Jkt+1T7J6n\n9ngQLsA3T8s1Wca61HWkxLFOf3sBYJWWc8fiTmA9bftsMokzTeH72az2eH+81r8RYx3H1D2a3Ean\nB66wOZeHReDeAAAgAElEQVSZDcQFKbbD04u2kPSima0LnG9mAyW9kdHWICPmTIeLCewnT+vbyswu\nBxYGjsBT/j6a1O/pIF8APjCzfun5P4Hj8B3ar0p6OUUKDzGzPpJyytFXFPt5LG1+mPcMPMrGtTsR\nLhazGd7Pc6i8NcdguWDJIWpGpLvEeQ3QHzjFzH4KnA8sKk/9O7x2zaz4BkKvzpv36Whc9OlZSWfV\nzp2Hpw2j1CpHUs7I5VJmdmr1XC6GtT8wzMwOMLNZ0ksvAzPXbVWiowZ/CnV7Uqrzwbgif6ORt+2r\n6EebuZ1xrEtdR9rS5LE2F367D7g6ZflU9hyIp+XeZmbbmNkwXCAuWzvHT6MWDa5nmPwJ6GVmfZsS\nLQ6mIrk96Ekd+A7ycfgObR9gF/zDthV+4/w4DSl+jyPL/Gjd/f468BYutPJjYB+8714PYKbc9rbY\n2g+4E3eqpsMjgJcDX8LTQh/BU12z21qzuajPY6nzA4/2LYhHIgbivWn/kMZ6YLpmXTwaMTC3vS22\nFzWvqzmS5vJ/cUVbGD8CvimeltsYuyfzva2W7P5qbluSPfPgPXX/Ddzc8tpg4Dd4a5EJBLKacDAR\nUTc8E2J1XERmaG47p/A9bYYLwjVqbpe2jpQ41nhGwyF4W5yvpO/CTVqu2SW9fk2Bc3vdNE82ym1L\nHNPmaFwNbC23fjngGTx18Zv4l9838ZuJpfHUo6vVsBYXQWeozZN18C+054Df4Sk6b0h63szmAy7C\nxT+y163VbF5QHrHcGY9i/hRXrFwd/8L4G95v9/rcc7vUz2OJ86NOqqf7CZ7ivD2wDT7eJ+K99vbB\nUxVvyGZkosR5XSfVYa4AjMZrMY+Q9Kv0Wi88ffFdSdc0zO6xNekt53sDKwInA4dLur7jxrXBzFbG\ne0aeaWa3A59IGlJ7fXZcTXQp4HHVBLIy2TtBe5+J2WNmC+Ctq57OPUdq9bmTtMPMpsdrjUc24TNZ\n4jpS6ljX7JoB6J/m9pZ41smhaqmTN7Ne8tZVTbF7onakOtgF8bZFP1OqnW+C3cFUJrcHXT8Ytxu+\nPvASsGJ6Pj9+M3c+KSefBra4iKPj82UjPEXk63it1wm11zbHI4Kb57azxeZNcOn8pdLzndPz9dPz\nXoyrM83d8L7oz2Np86M23svhAkLzA8cAv8SjPNvg4k2nA0OaMEdqthczr2s2d0vjfFptPIcAL+LR\nkrWBS4FeTbAbWAyvC90ZmHtSNuF1bYtN6ppM76F/7fHtwG215/1y29cyfm3b+7Qbz6aMMV4ScQgw\n1+TYRaqdbspR0jpS6ljj2VPVd80CLa9tiUdiV06fgW3xDKXstuPZSCvUnk90vJPNs1fX5Z4rcUyj\nOZHbAMkXpdrj+fE0izVarhmE78RdiaeUNO6GOY7OHXifvR/ikcAhuPJcdVM3IC28G6bnjVi88MjC\nn2npi4o7WA/QnNSi4j+Ppc0PCt4sKGVet7G7e/r3O8APa+c3wKP1dwNb5bYz2VSqQ1UXyFqNlpR3\n3Im9Bk8NPZeaaExmu7cDLq493wtPR1y+Pnda5tFMZEyzTGvym8DNaZ5M0rECelT/Vo8zj3kx60jJ\nY423X7sUbxV2Ky0CgMAawBt43+Uhucc62bQ4Xv5zIbBu7fzExrtR35FxTJsju4iTmc0FfM3M+qRT\nHwPPS7orvd47nX8VXyh+IOm/apM6FXRtqiJ8M1sdV/JbEI9OHYangb5q3qN2deAKSTdlTkEbaGaH\n107NhfeAeyi93hNAnq74UzxVLSslfx5Lmx/J1l7gwhJmVjmrO0p6MJ1/BTgTeAc4PYlRZaXEed2K\nmX0RuNnMbsXTh5czs3mSKMxv8E2PLSVdVc2rzKwM/FnSgZL2B44HLjBv+STzXo3A2NRimdlM5v2C\nc1IXyDoTnytVmh+SvoJHlu8HbpT0Zi5DW3gKGGVmc6U14ufAOcA1ZjafxrUT6y5pjJnNDFwNvJ3R\n5lmBn+Mbd58A+5nZXGkujDeHk90fm1l/4DLc+e4oha8jRY11HUk34WN9A6nVXfU9lPgIV7nfSg3o\nl5rWtk3xja6RwHrmgpFMYrzHmFl/M9u38xYHnSKrA5tu2DbDxUj6mNmS+CI1v5ntByDpQ/OeoycA\nb0p6JpvBQVbSYrUSXqexB3AQ/mUwQtJfkuNyCl6v9kn1MzlsTfWVAFea2bzp8UuAzGyhtMh+ZGZr\nmtluki6XdH8OWytK/zyWND+gzM2CEud1Rf1GR9KfgD3xDYPn8BukA4AHzOwKYFYlJe2cc6RGcQ5V\nsvOveLTn28A1kkZWN/TpmsF4evymkq7OuVlgZouY2arp6V/wev8dqv9/SacCF+PZG61jfSVwmKR7\nM5hOsu8h4KeSHsCF6j7GHat50trYq43dlwOnSeroPCl5HYGyxrqV9L1zM55l8hPzOt7RtUs2BnaW\ndKMlshiaSGvbeXgWxFXAe7gTOyS97qFYpxrvmYBf41kTQVfl84ZwP+uB57M/gdcRzIBP0BPwlLkv\n4c3jz8AVTh/Gv+Cyh6zjyHvgvTzHAPun59sC9wIj8NqN7MqVeLrfO3gtY3d80f1leu1nwEl4/9T1\ngeeBdRpgc5f4PJYwP5Jd8+O9RpcAZsfb+PTEU533q103BBfjaUINUnHzumZ7laK9Du647gT0qb2v\ne/Fo4ZLAfLntTXYtAqyaHk+yX2p6XKWyzoynBq7RKVsn8R6WSXNiB3yj4Ou117rjAnBrVv9H1f9T\nBjsXxVO0d2gZ/5H4Rtgs6dwewI9q1/TBo8dZxpo2ddG111bFN76+j6dE/5hx9dz9gVty2F3qOlLi\nWLd5Dwvi/Vwr264EHkyPl6SWAp/z85j+/rxp/Rjc5vyh+P3JUnjv7mVrr1frX+N71sbxOedIlj/q\nO66D8BvKzYF902J2Bp6SsTR+g3cWcCSwQfq5RtT1xJH3wKMlz5KcqLRgLQUslJ7nXHR74f3STsBF\nYL4HzA3cBByXrtkDF+K5lFSHmXk8u9TnscnzI/394jYLSpzXbd7DxvgGwXa4w3o04xzbi4E5c9tY\ns7VIh6pmx+QKZPVswFgvgEdqdk3PDZg+PZ4Xj7CdSZv2Pmlt/EJGu9vWRdeuGQRcgdczDqu9vyuB\nL2ewuch1pMSxrtlltce90livWjt3I/AQ/p20Qe6xTjYtluy5HN98/lnL6/PjG5G/xqPfQ9P56dJ7\nWTP3e4ijA/Ok43/QbyRvT48vTR/2XdLzhfHdocOZsLC8kTfLceQ5cIXZkcBOuW2p2bQ43sh+JbyH\n3esklVtgTjxt5/ja9X3Tvzkd7i75eWzi/Eh2FbdZUOK8bvMeuuNOyJzAFrgDOzDdYHbH2ymtnNvO\nZGuRDlXreKd/SxDI+hquSD43foN/dloLTwRWwR3xNfGMibWq/5P6v5nsnqTQVDq3Gt7feKMWu2fI\nYG+x60hpY93G/iHAD/AI8tm0RLXxzYRlc9jWxtZuwAXAwen53LhTemHLdV8H3qelzyuwYO73EEdn\njo7WwKai/H3wuoc58GbVFwG7mdkSkp4DjgMWSuf6VT+rNDODAEDSjfgN/oFVXVhOe1LNxcW4WuIf\n8S+3J4DeZjavpL/jaYtfMrPz04+Ngqx1ul3289i0+QFgZksBN0l6Gb9hvgSvx/0zLlIyD+5cIWl3\nST+U9JtUS5hrjhQ3rydCd9w5OQq/+dxR0mvAUNw52VnNqbFbBbf3N6mW7izgF2Z2Ij5HhuFz5ylg\nN0k31Ob3Y5Iey2F0hRUmkCXpEnwD6QLg98C7eOT4JTyVdQZJd0o6Q9Id6WdU/zcTTzLxuuiqrvQD\nYBulekYYW5s8qpOGdoF1pJixnggf4vbNhq93l5vZ6Wb2SzPbDbhD0sNZLUzI9SlGkkS7JL2Kb3os\nYWan1i5dBl//qlrdbun6Fzttc5AH6+TakG6YDwUG4x+kvSU9aWY/wNOKvibpWTNbGJgu9xdx0HzM\nbDY1RLnSzE7AIySD8R3N5YHd8GjDtZJeSwIK8yipzObkf+Hz2JT5kcb6DHwn+Wo8bfg1PAK+axr3\nwbhi8ivAsZLez2VvndLmNYxrdG9mC+GbBG+bK1CfD+wj6TJzUa9f4FH6+7Ia3IKZ7Yl/BmcA7sHT\nypdNx0GS3spo3gS0brKY2aK4s70qcARwKl5//CSwl5JAVi7MrC8+X582F357BNgFGCTpwHTNXPjn\ndHe5GFWjMLNZ8Pl7r6Tjaud/AvwTj2YqnTPILlpX3DpSUeBYV+vf0rjzOqqaw2b2Hby+eC98jbm9\nKc5rhZlti6eXbyDpH+ncAHyT6QhJD9XeY/bxDvLQo1N/KCmdfWRmt+OL1i3A0wCSjjazj4FrzWxz\nSU93yq6gbBrinFQ3b0/jKXPXJufjD2Y2PR4x6WlmI9Ju4usZzQX+dz6PTZgfNV7Dd7+3ZPzNggvN\nrNosOBLfLMjuvJY4r2G8m7f18cj2O2Z2IR5d2xc4xszWwP8v9m2C89rGoToPb89Rd6hewHs4Tp/P\n0gmpjfc6eJrov4GrJD1jZi/iacOH4ZHk93M7r4mqvc/j+M38dpLOSPO6Yla8Z3Svdr+g06QMjj0k\nDQeQ9E8z2x+PpgGcK+mfwMu4ENnYG/omOFMUtI6UOtZ1G8xbaB2Kp2Yvb2bHSfoDcBuwtKQX8DT5\n7JjZArgGwX0AaYNxOeA+M1tZ0ptpE/J1vM61KVkQQUY6kkKcFrBPzGxFvEnycbjy5iFmNhuApJ/g\nKSYDOmFTEEwNWiIPD+JpUG+a2UkA8pS5EXgfx+7tf0tnic9jZ6k2C4DbcQGT16htFuAqnNea2WKS\nnmtCpLvEeV2Rbt5WxNM/N8MjgMvhjsotuEN1Dp5hMDbdLzP1fqnnA4tKOgOvP69olENVkcZ7Y1zg\n5p/A7sDBaQ69hKfi9pb0hKS/5LQV2rb3GSHpEfMWHP9J12yKr3/HpBv9rJjZPLgY0M5mdnN1XtLz\nuAP4ZeBoMzsF2A+v8c5OietIqWNdx8wG4vWu6+GttPrhugvgEdnVzGwOS/2Yc2JmiwD3AVeb2SbV\n+bRxdyVwm5ltY2bV2H+cx9KgaXQshTh9IRyKK6DNh4tPrIYvaqdXaQJBUAq1yMNX8HS5bpLON7Nl\n8NrStyR9L13biFTWivg8dobaHFkRv5n4AG/1cw9wVjUnUiT295LuyWetU/K8BjCv1f4ZsJqkxdO5\nIXh98SvApZJeyWjieNTGexc8zfx4SYcmh6rq77opXr/7A0nX5bS3FTPrjteMHo6nDO+PK2i/zjhB\nltPUnBpj0lxeARiNi6cdIelX6bVeuLDTu5KuaXHCsmBmK+MiO2emrJlPJA2pvT47Hv1eCnhc0h25\n7S51HSlxrFsx77N7IO5cfxsvk3g+ZUk8DvSQawBkJW0eHoxvXtyLt1D6fn2NS+viPHjK+XmSrs9h\na9BA1AGlKLyNxaXAjHj63MP4F9t8wB34F3N2Of044pjSA3dKHgXWxW+G9kpzexngMuDkdF32Pp41\nm+Pz2Nnx3hSvfT0Sv5k/BG9dcDgwe277JmJzUfOacZuxg9O/X8RT5U6sXbM+npo7KLe9beyfVL/U\nXnjvyc3q77UpR7LvF3hU+/e1/4NN8IhJ99w21myd3PY+VZ/Mxow10L/2+Hbgttrzfrntm4jNRa0j\npY51bf3rWzt3GfBGtd6lz+L9wAK57W2xfQZSX910P/JYtda1XNe4z2QceY/O/BHvTXcWvrtyN+P6\nMS6DCzuskHsg4ohjSg68tUVv4Fd4A/B1cYXFeWvXLA0sk9vWNrbH57FzY13UZkHh83oDPF118TTG\nX8TTcY+rXTMgt50tNhfnUNVulheqxjON/d+BbdPz1fGsjlVy29tie0ntffoCi6XHqwEDW16/HbgG\n7x19LjBbbpvrc6SkdaTksa7ZuCGejv1zXJRxrbSuXA7siEdeN8ltZ7K1T20dWaDltcqJXRlvKbYt\nrtfTqE2OOPIfnUwhHo6nMuwt6VYzWwvfDd9E0pMdMSIIpjJmdgS+uK6Gq1U+Y2Y7AG9IuiWvdRMn\nPo+dwcz64EJC/8FTFneS9EJKoxsAvCfpoZw2tqO0eW3euuVSvK51pLli5WhgEVy46U1J+zYw1a+7\npDHmyqAzSzoynd8AV+HsBZwk6aqcdlbU0kLHCmQBlUDW8sDRwG9xgayD5O2sGkGaI8cBwtOb++ER\nwXckfZBSoWeV9EYT5ol5e5bzcMdjCN7qaaSZ9ZD0cbrmaXyObynp6nzWtqeUdaT0sTazL+GCTMfi\nooyvAL/EW9F8A69Nfzp91zdhbm+IZ5tcgJcc7KpaWYe5yN5V+MbvJpJuzWFn0Gw6WcB9Jd6s+gAz\nWw/YmKTE2UEbgmBq8z5wDL6T/Iy5ct4BeNP7JhOfxw6QbowfY9xmwQuFbBaUNq974ylzs5nZAXiT\n+0eBE9LROMXKyqEys7EOVRKQeUfe//cWGuRQQVuBrEF4bXFvXPRoJB5RPlPSY7ntrv99SX8yb09U\nb+/zV2AdMxuvvU/usa6EpszsUsbVRY9MGx6VQzUYH/dNJV2fe6wnQuPXkdLH2swG4WN6t7wn9C14\ndtUueO35EfXrm2C3pJvSOn0DsI6kV8ysl6TR6ZKPgJ54NkQ4r0FbOt0Htg8ehegPvCrpwSYtBEHQ\njiSI0F/SI7VzY+etuRrhivju7RdxQZBGCa20Iz6PncHM5sAjPVW/yY2B7+aOTpU8r2uRwBlxVcqe\neFp8PzwSez9wEPDbpkQvobx+qa1YQQJZtTnS2t7nA/O2HRcDGwED8fY+2RWS69ikhaa646m5M0u6\nM4nhZHFOSl5HKkoZ65q91dyeDk9/PgDvKPA9SXcnm8/BN+72lvTvXLa2w7zf7854iURfYA1Jn9Re\nPxr4o6TrmjDeQTPpqAMbBKWRvpyvA3YFHq52ZNNrdZXQNYBRwMfylgzhCAZjadpmQVeY1+Z9DvcB\n3sNrF0/B66Q+TI7hZcBwNUDZGcp1qGp2D5YrmVapuI9I+m66Zn1ga+AoSS9nNHc8zNv7HIanO++F\n17kemt7PxfhG0t9z2tiKmXXDe10eD1yT0j6H4Bs0+wHv4q2KdpS358pG6etISWNdUfs8bgTsgdeM\nzg1sg2dE/ErSPcmJXUzSE/msnRAzWxDfXBwuabSZXYkLTa1oZksCCyopDYfzGkyKjvSBDYKCWRkX\nhnkV+IaZzVC9IK9d65Ye3yVpZG0Xugm9JYOGIOkDSXdIukbSg+lczi/loue1ma0CfB/vK/kQXvfV\nMzmvXwUuwqMojXBeobx+qRXJ7g2AW81scVyI7ABgFjM7Ll3zW+CAhjmv3fFsh6F470uA02uvfYLf\n8DcNkzQKeAZYBSClUe6Ji08djfeubYJDVfQ6QlljDYz9PK6HbyKdLGl0Wj8uAp4HdjOz1SWNaYrz\nWjmiib/hWScrAEjaGviHmT0EXEGtz6sSnbQ1KIeIwAZBG8ysZ/WlZWavAbMAy0t6or6zXLu+EmOZ\n4LUgaAolz+ta5KEnrmA6EI+e7IeLN72UdvffxJUtH21KpAfK7JcKRQtk9QLOBMYAg4HdUgR5E7w2\n887cc7oVK0RoquR1pKKUsW4lbQocDPwRr/NfG98MOxlvwbUZcJ2kx3PZ2I4U2V4JXzsGA5dJ+l3t\n9bXx3ssP57EwKI1wYIOgBTPrAQzDBT7+CpyNy7n/VtLwdE09Par6cp4JVw49TtJ7eawPgvZ0hXmd\nIg+r4RGTg/BI5qaS3k03SN8CdpH0r4xmtqVEhwrGRrvXB+7DNw7qAlnggZJHJvLjHaO2wbEQfiP8\ndoocnw/sI+kyM1sd71m7k6T7shqcaHWOrOF10SWvI6WNdUUbu3fFI8T/AG7BNQDWxtP4P2hSxLjC\nzNbElcoH4lkRA3AxyT54Cci5TdkkCMqgkyrEQVAEkj42syeA3+DpLCukXdinzOwCSTtXu8np+urL\n+XrgkNw3+UHQjtLntZkthreEOFqucLsK7szOntKGD8PTWBvhvE7EobqScQ7V88mhOgF3qBrhvNbs\nrgSyngT2xlP+LsUFmw4CBqshAlk1m8e29zGzqr3PvsAx5nWYawL7Ns15tQnrop8xsxfx/rSH4TWZ\n7zfBoSp1HSlxrGE8uzfAhZrG4DW7D+DR4r+Z2dy4U9hf0rsZzR1Lze6l8RT+lyTdmV77K74pdiIe\nMX4wnNdgSoka2CBoz3PAC8B/cQVF8Bu4ZczsV+BfzOnLeWbg1/iX891ZrA2CyaO4eW1m3VLa6rl4\n66fRyc7hjHNQtiIpO7fUW2WhxaG6HrjOzHbHx75yqE7D23Y0xqGCsTV2Q4GrcWGpXfGejVvIlVm7\n45GU1/NZOT7J5np7nyOA5fCb5Ftw5+QcPA26EXMEyq2LpsB1pNSxrq0jR+JtZ9bGMzkeT87rMOAm\n4Nj0PhpBbR05F8/aOCulCQPcBrwm6QVJJ0bacPBZiAhsELRBXgMzBJf9P93MZpF0oZntBNxtZotL\nesrMeuM3oUeG8xo0nZLmdeUEytsrvG1me+M79qub2WuS3pe0X7p2Okn/Te8x+05+G4dqEA3ul1rH\nxglkbYWLZO0GnCFpVIp0H03DBLLM2/vsDiwl6VngWfP+ulvg9l+azgPNmCMwgdDUqul0O6GpRikl\nl7SOVJQ61oll8U2kRXEBrIPSGtMd39DbX66g3KR1ZCAu9rYevo70Ax5LL38IrGbeYu5t1dSrg2By\niRrYIPgUzOXqT8YjEovgN29/Sq/NAMymhvQ+DILJpcnzuhbBXA93pP6KRzK74ym35wFXK/U3bNKN\nG5TVLxXKFMiq2Vxce58KK7Quuk6T15E6JY51SnvvhjvdiwLTA9+U9KKZbQbMIemsnDZODPMWSwcC\n9wLfxssknk8p3I8DPSS9ltPGoGwihTgIPgVJN+K7n/2B02tfzt0ljWrCl3MQTClNntc15/VHeD/X\npYAfSHoIOAoXMNm6SgVtgvNa2ZIcqvfx6M6rZnYijG3PcS2wMM1pIwKMN96HAovh4z4c2CA5r0Pw\njYNukh6tfiabwZTZ3qc2RxYyswGSRuNCNkOBszR+XfR/m+ZQtaOp60jpY21miwDH4IJ1p+PR7t8n\n53UNPBX6+YwmjkdtvPsCpDTsAXhN+vZpvL+Mry19wnkNPi8RgQ2CycTMesgFLBoV7QmCz0NT57WZ\nfQevX5wPd1q3qW6GzRUtR6t5LWc2wG82N8RvPJfFW3O8KemAdM0ASW/ns3JCzAWyjmCcQNapuEDW\nMPw9VAJZN2Y0czyssPY+tYjxWKEpoBKaWh5Pzf4tLjR1UJPGenJo0jpS+lib2VJ4T9SrJP0wnVsJ\n7/V6H7AkcFgD7d4QL5t4Da/ZXQLYBpgVuBHfYDpE0nXZjAy6DOHABkEQBNmp3XT2l/SOmR2Cpw//\nGxgm6fV0g7SIpJPyWjshpTlUMLanZH88MjwGT098Jr32UzxlcQDwC0m/bZjtRbT3qZPqog/Aa4wH\n4SnlT+J10QPwuugxalhddImUPtZmdgmerfEVJeVmM5sNL6OYXg0SbAIwsy/hGgXH4nXnrwC/xFv9\nfAMXznpaDavVDcolHNggCIIgKzXndWPcKTkUEDACeFbSt8wVLM/C+zPeks/a9pTkULXeQJrZ8vjN\n50XAFSkFunptrEBWTmpzpGrv0xOfD/3wjYP78fY+v1VD2vvUKa0uumRKHmsbv4fu5UBfYGtJo/Ja\nNnHMbBC+zj0n6eBUb3wSLox1mqSnMpoXdFGiBjYIgiDISnJMvoLXR10o6R3gX8A+wEAzuwnf2d+v\nKc5rreZrxiRW8yQutLIXnkK3Bd5mZLCkh5vmvJrZemZ2jpn9EL/R3B+vZdysqmNLfJjF0BaSzUW1\n9ym5Lro0uspYa/weusPwdfDGtMY0htp4T4dnybwIrG1mq6d6473wKPf+LetJEEwVIgIbBEEQZMfM\nTgDewJVCNwfWAF6VdLiZzYqrVjaqxUVyqPYB3gPuBk7BhY4+NLNFcQGq4WpQyxkAGyeQdQDefqa7\npC3TJsJP8Jv/C5qU5pci3D9jXHufHYAvavz2Pj+SdE1GMyeg1LroEulKY90Sib0K+IlcxC47tU2w\njYA9gC2BufF610HAryTdkxzxxSQ9kc/aoKsSEdggCIKgCdwGDAFux2+CHgTmNrO5JL3VQOe16pe6\nE/AQXvfVMzmvX8XTcRvVL7XGosD2eNRyfrxdDpJuB74LPNUE57UW5emJ1xMfA6yC9/PcIDmvC+Jp\n27tKuqb6mSaQ6qJPArZKaZT9geeAU/HMgp+lS/+ZycQuQ6ljbWYLmNkKredbIrFbNcV5hfFUy48D\nTpY0OtXkXoQrI++WIrFjwnkNphURgQ2CIAiyk1LkZgeQ9HKqy7wAGKoGtUFJN2/F9EuFsgWy0o3y\nanhE7SDcAdlU0ruprvFbwC6S/pXRzLaUVBddOiWNde3zuBqu/v0ecGg7Z8/GqTv3BHor9b7OSRJ/\nOxj4Iz7Ga+OZHCfjmwabAddJejyXjUHXJyKwQRAEQXbkPSNfBv6WUlkvBQ5uivMKZfZLbRHIOsrM\n+uNplu8ATyTndW08PffJjKZOgHl7n28AV0q6GLgDmBGY3cy2wSNu5zXFeS21LrpESh3rlBqstFac\nhPemHQjskqLIrdd+nD6zFwF9Om/xWFvGZjZI+gQv9zgRr0efH8+cGY6P/fHhvAbTmh65DQiCIAiC\nGj2B3nhLlz80IYJZUXOoqn6pq+DRwdlT2nDVL7URDhVMIJC1a4rAGl67e3QSyBpAswSyqvY+5+Lt\nfUYDSBpu3t5nX9zm76pB7X1qQlP1uugdGL8uenng/IxmdglKG2szGwy8Ien9FE0dBpwt6RwzuxaP\nxO5vZsdIeqKqgTWzmfGesD+S9EYm26tNsA1wbYIxwPHAA8A7kv5mZnMDQ4H+kt7NYWfwv0WkEAdB\nEATTnNpN0Bz4TeZE1WLNrKekj9LjbmnHPxtWcL9UKEcgq3XcrID2PnVKFZoqkdLGOqULC7hf0idm\ndgfn/ZYAABGrSURBVAAwJ27jW2Y2D3An7nAfl5zw/sBVwOGS7spmPGBm6wNHAXvjivB/BbZPa/ow\n4BDczqszmhn8DxEObBAEQdARzGwTPEo5A/BTSee0uaaKPPTBHd33W6/pFKU7VBXp5nM/YGbgBuDv\nwIrADye1kdBJahsc6+FOyV+B63GhqROA84CrqxrApmwSlFoXXSKlj7V5f9rHcXXkuXFn8DfArXj9\n/xnAXHgK7oVm9gtcDfzOTCaPxcwOAq7B07S/h4/3X8yFpjYB/i3p1iaNd9C1CQc2CIIgmOaY2RK4\n87cfMBNwFnC6pLNq19TT5n4P7CjpsUz2FulQtcMKEMiCMtv7QNlCU6VR+lib2aZ4dHglYB1gU2BB\nYA5c/GhdYExKLe6rzKJNZrYGrpczFHdep8czUF40s82AOepreBB0iqiBDYIgCKYpZjYQjzZMD7yY\n0uP2AE41s16Sfl5zXmfC0+a+k8t5hfEEm+oO1TLJoToKd6h6mNkFSuSy9dOQNAp42cx6JGfwDLzm\n9eW8lk1A1d5nPlwYZhvw9j5m9l1gdNPGucS66FLpCmMt6Voz+xhvvbWipBvMbDk8arwEvk5uka7N\n7bwugreu2gbfwLsL33R8MTm2xwLfzmhi8D9MOLBBEATBVKcekZT0WhILmh3YycyukHSfme0NnGVm\n16Z0tJmBG4EDJd2d0fyK4hyqT6FRAlm1KHd/Se8AfYFL8PY+m6nZ7X2KFJoqka421pJuNLNPgBfM\nbAlJfzazOfE60m3kfWyzYmZL4eJRV1X18Wa2BXBRcmyXxDfBbs9oZvA/TKQQB0EQBFOVmmOyLvAF\noBeedrsJnjb3CDBCrog7UxUxSTWy70n6Q2a7i+qXWrO7GIGsms0b4/07D8VFbkYAz0r6lnl7n7OA\nvdQcheQuURddAl19rM1sI2CUpN+n530kfZDZrLGY2SXAwsBXJL2Xzs2Gl1FML+mlnPYF/9uEAxsE\nQRBMdcxsHeAU4Od4/VQPYGtgLVwJ90E8ovKJXJWzKdHAohyqCitMICvZ8xVcSXZXSX80MwOWwmsE\ne+JRtSMl3ZjRzLF0pbropvO/NNbVRlJT3kO1TqTHl+OZEVunUoQgaAThwAZBEARTjZqTdBJe73pK\nOn8+LvixoZntAIyU9GRWY1sozaGqsMIEsmo2FdHep06buugihKZKJMY6Hy1O7CW4OvJG4cQGTSFq\nYIMgCIKpySy4IMlr+M49AJJ2MbNfpzS5i7JZN2k2wNMTnzKzHRnnUG3aYIeqOIGsGrfhTvfWeHuf\nB4EVzWyuSaVBZ6ar1UU3mRjrTKT1orukMZK+ZmZX4SJTD+W2LQggHNggCIJgKmFmcwGXm9lhuENy\nmZk9AtwJLI63i5jZzEY19MazCIeqiwhkgc+Lp2G89j574UJTjaBkoanSiLHOg5ktAAyQNJ5z2uLE\nbpXJvCBoS6QQB0EQBJ+ZuhCQeb/R7YCv4cqgfYEjgNdx1crDJV2fy9ZPwwrol1qqQNakMLMeeG10\n1d7nhswmAeXXRZdEjHVnqY33avga/R5wqKQn2lzbQ9LHZtYT6K3M7X2CAMKBDYIgCD4DZtZb0ofp\n8XLAo2nHfgZgS2BXvEfgC3jtaD9JTzdFqGRSNNWhqihNIOvTMLPpgS/jiqzZ2/vUKbUuukRirDtD\nraRgCF5jfC6wC3A3cImkP7W5tj++Hu4t6Y0shgdBjXBggyAIginCzGYH9gBuTemqlTro0HSz0xe/\n6Vwb+J6kW/NZO+U01aEqTSCrFuUppr1PKyUKTZVKjPW0xcwGA29Iej9FU88AHpB0jnkf2iOAfsAx\nkp5oEX67EviRUsufIMhNt9wGBEEQBMXxCS6qsrGZLSxpKPAfvOa1e0ox+zMu+JG1VUudFNHBzOZI\n9bptkfQf3Dn/Q/WjHTBvcpgl/TuBQBbw30ogqwnOK0ByXjcBbgJ+Z2a7tbsuzZmPzKyPmfVrivOa\nuA0YAtwODMKj23Onuui3wqGaqsRYT1vmAL6QNog+Ap4FFjezWdPYHgWsDGyRMmyqyOsIPPIdzmvQ\nGMKBDYIgCCab5Gy8BZwKzAnsbGaLSNoST2W9IgkIHQScIen+jOaOR8kOVXK4R5jZl3GBqW3NbCMz\n62dmX2KcQFZTnO2qvc+3gK/jKYrDzWz3lmvqUZ67ccelSdwJfBPYRtKRwEj8Jr8xQlNdiBjraYik\ne4DHgJeSY3oTHnFd08z64eP8PK72PCz92InAEZLuymByEEyUSCEOgiAIJotaSuhywLbA28DCeNrf\nhZKeNbN9gFnx1LRGCTZZYf1SSxbIMm/vcxjeCmU9eXufVfCNjws0YXufETT4RrnpddFdiRjraYuZ\nbYqXeKyEC79tim+AzQFsBqwLjEmpxX1DtCloIuHABkEQBJONmQ0F9sd36/+CR0kWAf6Gq98+Wru2\nEbWjUJZDVapAVuvfTzfKOwO/Aa6Q9K6ZrY5vHGyg5rb3mYCm1kV3RWKspz1mthFwPLCipA/SOvMm\n3uv1ZGALSU/ltDEIJkU4sEEQBMFkkcR4RuAqoU+b2Z7AXHgk9ou4Q3u8pHczmjmWEh2qUgWyatH5\notr7dAWhqVKIsW4WZrYBcD6whKR/JiGny/A2RVmyToJgcoka2CAIgmByGY1/bwxIz8/G087WAu4F\nLmua82pm65rZvmZ2IF47eiHuYG1lZv2Tk7qqpL+kH10T+H7GaGCRAllprNcBTgJG4QqyN+LCPDcD\nqwDDUnro+zD2/+i6XM4rlF0XXRox1s1C0m+A/wOWSc//DmwUzmtQAuHABkEQBJOFpHfwCOw6ZrZU\nipBchacTfxF4Jqd9dUp0qEoVyDKz7unhJsDZks6StDFen3t5qmH8A3CnpI8rh6QJaaFdRGiqCGKs\nm4ekGyX93swqf2BUVoOCYDIJBzYIgiCYEi7H00OPN7NjgFPw2tI5gMVzGlZRokOVnOcxNYGsZ4DZ\ngR2TE7s5cBcwG3CApJG5bG1DUe19KlJd9N7A9Hhf3fvx9O1vmtle6Zp6XfRVwHciQjXlxFg3myas\ngUEwJYQDGwRBEEw2kv6Gi3+cDLwHbIX3SR0ENKVPY3EOVYoYD8UjxmsBK+AtLeYCtjezpSWdJOkH\nkq43a0a7HCusvU/dDkmv4ems7wA7mdnMku7DHa09zGy+WjTwJlzpuZEqyU0kxjoIgmlFiDgFQRAE\nn5nkuPwY2F3SIw2wZy48SnwY3t7nMuBgvMfk4njd7kbAa02KNpQkkGWFtvcpVWiqRGKsgyCYloQD\nGwRBEHxmksPYS9IrGW0o0qGqY2b98Rrd70m6x8x6AqfjKcM3A3fnTqe0Qtv71El10acAPweG4rXF\nW+NR782BB4FzgU8kfdIk20sjxjoIgmlFOLBBEARBkXQFh6qOmX0XmAG4WtLjZrYesA/eY3dPSaMz\n2lZke5+KWn3lSXgN5inp/PnAHJI2NLMdgJFNSi0vkRjrIAimNVEDGwRBEBRHcqgONLNV0qkjgevT\nzfMo4Gq83czFwBqSXpX0NDRaqKTJAllFtvepUVxddMHEWAdBME0JBzYIgiAokdIdqgloqkBWqe19\nKkoTmiqZGOsgCDpBpBAHQRAERVFLUVwOGI47d7+U9KyZXY07t3fiKa9fb1jLmcmmCQJZNTGeqr3P\n28DCuEDWhWnM9wFmBR5oSn1xV6iLLoUY6yAIOk04sEEQBEExlOpQfRaaIJCV7BgK7A/0xtWQRwKL\n4LW5IyQ9Wrs2a31xV6uLbjIx1kEQ5CIc2CAIgqAoSnKoSqew9j5FC02VRIx1EAQ5iRrYIAiCoBiS\nQ3Ugnla7MnAH0A94HK+vG2ZmM1fXh/P6uRmN3ysMSM/PxkWl1gLuBS5rgvOa6HJ10Q0mxjoIgmyE\nAxsEQRCUREkOVfFIegePwK5jZktJ+gi4Co9+fxF4Jqd9FaULTZVEjHUQBLkJBzYIgiAohlIcqi5G\nk9v7VGnilajXtvgcmB3YMTlWmwN3AbMBB5Qq6tUEYqyDIGgCUQMbBEEQFIWZzQPsDqwA/AlvN7MD\n8APg0FyKvV0ZM5sRWBVYBrgJmAE4Bxgi6Y2ctkHURXeSGOsgCHITDmwQBEFQHE13qLoyTWjv02JP\nMUJTpRNjHQRBE4gU4iAIgqA4JL0n6beSjsVb5pwM7BDOa0d4GhjWBOc1EXXRnSPGOgiC7IQDGwRB\nEJRO0xyqLo2k13P3pq0TddGdI8Y6CIImEA5sEARBUDRNc6iCLDRaaKqLEWMdBEFWogY2CIIgCILi\nibrozhFjHQRBTsKBDYIgCIKgS9E0oamuTIx1EASdJhzYIAiCIAi6FGY2F9ArUsunPTHWQRB0mnBg\ngyAIgiAIgiAIgiIIEacgCIIgCIIgCIKgCMKBDYIgCIIgCIIgCIogHNggCIIgCIIgCIKgCMKBDYIg\nCIrHzGRmJ9ae729mh3/Kz6xtZqtOA1t2NrNTJ/Paa8zs/s/4d/49kfN7mNmONVsGfsbfP8jMvvZZ\nfjYIgiAIphXhwAZBEARdgQ+BLcxs1in4mbXxXpZTDTPrMQXXzgwsD8xkZgt+3t9XIelMSRempzsD\nn8mBBQYBU+zAmln3z/j3giAIguBTCQc2CIIg6Ap8DJwN7Nv6gpnNZmYjzOzBdKxmZoOAPYB9zexh\nM1vLzF4yZ2YzG2Nma6afv9PMFjazWVLE9FEzu9/Mlk6vH25mF5nZPcBFLX97IzO7byKO9RbA9cBl\nwLa1n7nAzM40sweA48ysr5mdb2aPpb+9Ze3aY8zskWTPHDV79jezrYAVgIvTe5zezJY3szvMbKSZ\n3ZxaoGBmg83stvS7/mRmCwE/AdZIP7tva2TZzG4ws7XT43+b2Ylm9giwysT+ThAEQRB8XsKBDYIg\nCLoKpwHbm9lMLedPBn4maUVgS+BcSS8DZ6bzy0q6A3gGWAJYHfgT7rz1BuaV9BxwBPBnSUsDhwAX\n1v7GEsC6krarTpjZ5sBBwIaS3mpj73bApenYruW1eYBVJe0HHAr8S9IX0t/+XbqmD3C/pGWAO4Hd\n6r9A0lXAQ8D2kpbFnfyfA1tJWh74BXBMuvxi4LT0u1YFXk+235XG52dt7K/TB3gg/fwDk/g7QRAE\nQfC5mOLUpCAIgiBoIpLeM7MLge8A/6m9tC6whJlVz2c0s75tfsVdwJrAAsCPcYfwDuDB9PrquAOM\npN+Z2QAzmzG9dp2k+t9cB49+flXSe61/KEVLFwbuliQz+8jMlpL0eLrkSkljavaPjdBKeic9HA3c\nkB6PBIa0G5caiwJLAbemsegOvG5m/YC5JV2dfv9/k42f8uvGYwwwYlJ/Z0p+WRAEQRBMjHBggyAI\ngq7ESXj09PzauW7AypVjVtHGQbsT+BZeM/pD4Ht4nexdk/F3P2h5/gKwILAIHgVtZRugP/BSsmNG\nPAr7/Yn8vnZ8JEnp8Rg+/TvdgCckrTLeSXdgJ4ePGT9za7ra4//WHO62fycIgiAIpgaRQhwEQRB0\nGST9E7gC+L/a6VuAvaonZrZsevg+UHfe/oinz36SnN2Hgd1xxxbckd0+/Y61gbfaRVcTr+DR2gvN\nbMk2r28HrC9pkKRBuJjTtm2uA7gV2LNmf/+JXNeO+nt8BpjNzFZJv6enmS0p6X3gb2a2WTrf28xm\nYMLxeRlY1sy6mdm8wJcm8jfb/p0psDkIgiAIJko4sEEQBEFX40SgLpr0HWCFJID0JC7eBC6gtHkS\nKVpD0ofAX4Gqrc1duAP3WHp+OLC8mT2KCxztNCkjJD2NO7xXJlEkwNvTAPPX/g6SXgL+ZWYrtflV\nRwP9zezxJJL05Um//fG4ADjTzB7GU3m3Ao5Nv+dhxqkw7wB8J723e4E5gUeBMUnYaV/gHuAl4Eng\nFDzS3e59j57E3wmCIAiCz4WNyz4KgiAIgiAIgiAIguYSEdggCIIgCIIgCIKgCMKBDYIgCIIgCIIg\nCIogHNggCIIgCIIgCIKgCMKBDYIgCIIgCIIgCIogHNggCIIgCIIgCIKgCMKBDYIgCIIgCIIgCIog\nHNggCIIgCIIgCIKgCMKBDYIgCIIgCIIgCIrg/wHX0NbhBTP1/gAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jpI0KfoM5lCZ", + "colab_type": "text" + }, + "source": [ + "# Parameter Efficiency\n", + "\n", + "No surprises here, exactly as per the EfficientNet paper, they are in a class of their own in terms of parameter efficiency.\n", + "\n", + "The test time pooling effectively increases the parameter efficiency of the ResNet models, but at the cost of both throughput and memory efficency (see later graphs).\n", + "\n", + "I'm not going to repeat the FLOP differences as there are again no surprises, same as paper barring differences in the models being compare to. If you are looking at FLOP counts for the EfficientNet models, do keep in mind, their counts appear to be for inference optiized models with the BatcNorm layers fused. The counts will be higher if you're working with trainable models that still have their BN layers. You can see some counts I did on ONNX optimized models here (https://github.com/rwightman/gen-efficientnet-pytorch/blob/master/BENCHMARK.md)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "iE69A1asS4_n", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 621 + }, + "outputId": "ee70eb92-8618-42a5-a5af-d584821f471e" + }, + "source": [ + "params_effnet = np.array([results[m]['param_count'] for m in names_effnet])\n", + "params_effnet_tf = np.array([results[m]['param_count'] for m in names_effnet_tf])\n", + "params_resnet = np.array([results[m]['param_count'] for m in names_resnet])\n", + "params_resnet_ttp = np.array([results[m]['param_count'] for m in names_resnet_ttp])\n", + "\n", + "fig = plt.figure()\n", + "ax1 = fig.add_subplot(111)\n", + "ax1.scatter(params_effnet, acc_effnet, s=10, c='r', marker=\"s\", label='EfficientNet')\n", + "ax1.plot(params_effnet, acc_effnet, c='r')\n", + "annotate(ax1, params_effnet, acc_effnet, names_effnet, xo=-.5, align='right')\n", + "\n", + "ax1.scatter(params_effnet_tf, acc_effnet_tf, s=10, c='#8C001A', marker=\"v\", label='TF-EfficientNet')\n", + "ax1.plot(params_effnet_tf, acc_effnet_tf, c='#8C001A')\n", + "annotate(ax1, params_effnet_tf, acc_effnet_tf, names_effnet_tf, xo=.5, align='left')\n", + "\n", + "ax1.scatter(params_resnet, acc_resnet, s=10, c='b', marker=\"o\", label='ResNet')\n", + "ax1.plot(params_resnet, acc_resnet, c='b')\n", + "annotate(ax1, params_resnet, acc_resnet, names_resnet, xo=0.5, align='left')\n", + "\n", + "ax1.scatter(params_resnet_ttp, acc_resnet_ttp, s=10, c='#43C6DB', marker=\"x\", label='ResNet TTP')\n", + "ax1.plot(params_resnet_ttp, acc_resnet_ttp, c='#43C6DB')\n", + "annotate(ax1, params_resnet_ttp, acc_resnet_ttp, names_resnet_ttp, xo=0.3, align='left')\n", + "\n", + "ax1.set_title('Top-1 vs Parameter Count')\n", + "ax1.set_ylabel('Top-1 Accuracy (%)')\n", + "ax1.set_xlabel('Parameters (Millions)')\n", + "ax1.legend()\n", + "plt.show()" + ], + "execution_count": 12, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABB4AAAJcCAYAAABe5mduAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzs3Xl8VNX5x/HPk8meTELIoiIiiBsg\nMSAoiAsiEFSKdf25oCJCtbXFBaNQcWmLO4iiiFrcihut1qVWQwRFAatsUjdUUIMC6iSBkMmeTM7v\nj5mkAcIWgUng+3698iL33HvPfe6dAJlnznmOOecQEREREREREdkdIsIdgIiIiIiIiIjsvZR4EBER\nEREREZHdRokHEREREREREdltlHgQERERERERkd1GiQcRERERERER2W2UeBARERERERGR3UaJBxER\nERERERHZbZR4EBGRFs/MSht91ZlZRaPti3fxtRLM7J9mttrMnJn12ZX9b+WaV5lZbeh+SsxsmZkN\n2d3X/aXM7Egzq90N/caa2UQz+yb0TPLN7K9mdtCuvtZm1x1iZqt25zVERET2RUo8iIhIi+ecS6z/\nAr4HftWo7bldfTlgHnAhsGEX970t80L3lwK8APzDzBJ3pgMzizCzVvN/u5lFNtFmwKvAIOA8IBno\nAXwO9N+T8YmIiMiu0Wp+OREREdkaM4szs2lm9qOZrTGz+8wsKrRviJmtMrM/mdl6M/vOzM7bWl/O\nuXLn3FTn3AdA3Xaue5mZLdisbbyZ/T30/Zlm9qWZ+c3sBzMbs717cc4FgCeBRKCjmaWb2VtmVhCK\n/zUzO6DR9T40sz+b2UdAOdDOzK5sdN1VZjay0fH1z2OCmRWa2VozOz0U6zdmVmRmYxsd7zGzW8zs\n29Dxz5lZm9Du9wFPo9EnPULnXGlmX4Xi/beZHRhqjw2NIvmtmX0DfNbEIzgDOBE40zm3zDkXcM5t\ncM494JybGeqng5m9Ger/azO7rFG8L5rZhM3vt9H2T2Z2nZl9ZmYbQ/cTbWapwCvAIY3uJ3V7r5eI\niIhsnxIPIiKyN/gTkAl0B44h+Mn4jY32dwSigf2B0cAzZtZpF1z3FaCnmXVo1HYR8Hzo+yeBS51z\nXiALmL+9DkOjAK4ANgLfEfy/+lGgA1Af85TNThsOXAp4gZ+AH4HTgCTgKmCamXVrdHxHoIbg87g7\nFOe5BJ/hQOCO+mQBcAMwGDgBaB86r/76JwGBRqNPPjaz/wOuBX4F7Ad8DDy7WbxDCb5OPZp4BAOB\nBc65n7b6kOAfwFfAAQSf9xQz67eN4zd3LnAqcChwHHCRc64IOAv4ttH9FO1EnyIiIrIVSjyIiMje\n4GLgNudcoXPuZ2AicEmj/bXAn5xz1c65OcAcgm8+fxHnXAnwJnABgJl1J/jm/M3QIQGgm5l5nXNF\nzrmPt9HdyWZWTDBxcCbwa+dcmXPuZ+fca865CufcRuAu4OTNzp3hnPvKOVfjnKt1zr3unPvOBc0B\n3iOYOKhXBtznnKsFXiSYIJgUut7HwDcEkzgQTFyMc86tc85VEkzy/F9oSkRTrgImOue+ds7VhI4/\nwcz2a3TMHc65YudcRRPnpxJMnDTJzA4Djgb+6Jyrcs4tAZ5h09d7e6aEnmsBwdcqayfOFRERkZ2k\nxIOIiLRqoTfA+wOrGzWvBg5stF0QetPceH87Mzu80bD6wmaG8DzBehAQ/PT9JedcdWj7TOAc4Hsz\ne8fMem+jn/ecc22cc2nOuX7OuXmh+/Oa2ZNm9r2ZlQB5QNpm5/7QeMPMhpnZotBUhGJgwGbnFDjn\n6qeR1L/5/7nR/gogMfRsDwLeNLPiUF8fE/z9YWvTEA4GHm10fAHBxE/7rcW7mSKCIxm2pl0o/sZJ\ni81f7+1pPJqinOC0FhEREdlNlHgQEZFWzTnnCL6RPLhRcwdgbaPtNDOL3Wz/utCn8vXD6jd/M7+j\n3gQ6mVkXgiMf6qdZ4Jz7j3NuKMERBXmN9+2EcQTftPd2ziURnPaw+WgDV/+NmSUQnIrwFyDDOdcG\neKeJc7Yr9GzXAgNCSZH6r1jnXGHj6zbyAzBis+PjnHNLm4q3CXOAfpuNkGhsHZBuZnGN2hq/3mVA\nfKN9+2/jWpvbVlwiIiLSTEo8iIjI3uAF4DYzSzWzDOBmNq0rEAXcEioiOIDgigkvb60zM4tplKiI\n3ixpsYnQSIpXgKmh67wX6iPBzC4wsySCdRH8bKdY5VZ4CX4qX2xmacCE7RwfF4rDB9SZ2TB+2WoQ\njwJ3W2gpSzPLMLNfhfb5CBaX7LDZ8RPM7IjQ8Slmds5OXO/fwELgVTPLChW3TDaz35vZJcAq4FNg\nYuh16glcxv9e7+XAUDNrE6pT8YeduPbPQIbt5GoiIiIism1KPIiIyN7gVuALgksuLif4xvXeRvvz\nCQ73/4lgIcXLnXPfbqO/1QSnG6QSTCRUmNm2Pjl/nmBRxFmNpjAAjAz1tZFg8cdLd/yWGkwiOE2i\nCFjA/+pHNCk0EuEG4F+hc369vXO2416CoxDeMTM/8AHQM3StDaH9S0NTK7Kccy8ADwP/DE0NWU4w\n0bNDQqMsziQ4SuOfQAnwX+Ao4J3Q/vOArgRfz1lAjnOufnWRJwkmJ74H3iCYlNpR/wVeB1aH7qft\nTpwrIiIiW2HB/79FRET2TmY2BHjYOXdouGMRERER2RdpxIOIiIiIiIiI7DZKPIiIiIiIiIjIbqOp\nFiIiIiIiIiKy22jEg4iIiIiIiIjsNpHhDmBPS0tLcx07dgx3GCIiIiIiIrIbLF26tNA5lx7uOOR/\n9rnEQ8eOHVmyZEm4wxAREREREZHdwMxWhzsG2ZSmWoiIiIiIiIjIbqPEg4iIiIiIiIjsNko8iIiI\niIiIiMhus8/VeGhKTU0Na9asobKyMtyhyA6KjY2lffv2REVFhTsUERERERER2QYlHoA1a9bg9Xrp\n2LEjZhbucGQ7nHMUFRWxZs0aOnXqFO5wREREREREZBs01QKorKwkNTVVSYdWwsxITU3VCBURERER\nEZFWQImHECUdWhe9XiIiIiIiIq2DEg8iIiIiIiIistso8dBCeDwesrKyGr7uvvtuAObPn0+3bt3I\nysqioqKCnJwcunXrRk5ODo8++ih/+9vfttrnunXrOPfcc5sd0wMPPEB5eXnDdseOHTnnnHMatl96\n6SVGjBixzT6WL1/Om2++2ewYREREREREpHVTcckWIi4ujuXLl2/R/txzzzF+/HiGDx8OwOOPP876\n9evxeDzb7bNdu3a89NJLzY7pgQceYPjw4cTHxze0LV26lC+++IKuXbvuUB/Lly9nyZIlnH766c2O\nQ0RERERERFovjXhowWbMmMHf//53brnlFi6++GKGDRtGaWkpxxxzDLNmzeL2229n0qRJAKxatYqB\nAwdy9NFH07NnT7755hvy8/M56qijAAgEAuTk5NC7d28yMzN57LHHAJg3bx79+/fn3HPP5cgjj+Ti\niy/GOcfUqVNZt24dp5xyCqecckpDTGPHjuWOO+7YItaysjJGjhzJscceS48ePXjttdeorq7m1ltv\nZdasWWRlZTFr1qw98NRERERERESkJdGIh+ZISgK//3/bXi+UlPyiLisqKsjKymrYHj9+PKNGjWLB\nggUMHTq0YcpEYmJiw8iI22+/veH4iy++mHHjxnHWWWdRWVlJXV0dPp+vYf8TTzxBcnIyixcvpqqq\nin79+jF48GAAPv74Yz7//HPatWtHv379WLhwIWPGjOH+++/n3XffJS0traGf888/n0ceeYRVq1Zt\nEv8dd9zBgAEDePLJJykuLubYY49l4MCB/PnPf2bJkiU8/PDDv+j5iIiIiIiISOukxENzNE46NLXd\nDFubarFj4fhZu3YtZ511FgCxsbFbHJOXl8cnn3zSMPVi48aNrFy5kujoaI499ljat28PQFZWFvn5\n+ZxwwglNXsvj8ZCTk8Ndd93Faaedtkn/r7/+esMIjMrKSr7//vtm3Y+IiIiIiIjsPZR42Ec453jo\noYfIzs7epH3evHnExMQ0bHs8Hmpra7fZ1yWXXMJdd93VMI2jvv+XX36ZI444YpNjP/roo10QvYiI\niIiIiLRWqvGwF/B6vbRv355XX30VgKqqqk1WowDIzs5m+vTp1NTUAPD1119TVla23X79TYzmiIqK\n4rrrrmPKlCmb9P/QQw/hnAOC0ze21YeIiIiIiIjsG5R4aA6vd9vbzVBf46H+a9y4cTt1/syZM5k6\ndSqZmZkcf/zx/PTTT5vsHzVqFF27dqVnz54cddRRXHnlldsd2fCb3/yGIUOGbFJcst4VV1yxyfm3\n3HILNTU1ZGZm0q1bN2655RYATjnlFL744gsVlxQREREREdlHWf0n1PuKXr16uSVLlmzStmLFCrp0\n6RKmiKS59LqJiIiIiMjmzGypc65XuOOQ/9GIBxERERERERHZbZR4EBERERERkZ3y008/MXbs2HCH\nsUPMrL+ZZTba/ouZrTazOZsdN8LMPjCzhWbWM9TW2cyWmlmpmTW99F/wuKTQufPMbJGZnRpqvzS0\n/b6ZvWhmMdvoI8XM8szsvVAMmU0cM8HMRjTRfn3oGgvN7G9mFhVq7xlq+6DxeU3d62b9tTGzS7f2\nDHeWEg8iIiIiIiKyU/bff38mT57crHMDgcAujma7+gON3zQ/AmxSyM7MUoAxoWOHA1NDu34EBgEv\nbecapcBJzrn+wAXA3aH2BUBf59xJwPehvrfmYmChc+5k4ObQ14562Dl3knOuX2h7cOjPh0LX7A+M\nCSU3tnavjbUBLm203Z9Nn+FOUeJBREREREREdkp+fj4DBw7k888/59hjj+WMM87g0ksv5fbbb2/y\n+Hnz5pGdnc15553HzTffzA8//MAZZ5zBgAEDOOOMMygoKKC8vJzTTjuNk08+mf79+/P1118zb948\nTj31VM4//3y6d+/OP/7xD4Amz1+/fj29e/cGiDSzrqERABnACODm0GgEj3PuR6BusxCPBeY756qd\nc98BXjOLcc6VO+fWb+95OOfqnHP11feTgE9C7d865+ozLVVArZnFmNkCMzvSzPYPjYhIAVaEzgVI\nAXwAZnaSmX1sZv8CjtvK9atDxxrB9/mrQqMrEpxz34X2zw/dZ5P3ulmX1wPHhJ7ZxZs/QzNbZWZT\nQqMznjWzbeYWIrf3AEVERERERESaMn78eKZOnUqfPn0YPXr0No9dt24db7zxBlFRUVxwwQXccsst\n9OnTh9dee4177rmHiy66iJSUFN566y0A6urqWLduHcXFxeTl5fHzzz8zbNgwzjvvPHJycrY4f9Kk\nSUyePJmTTz65E/BX4DLnnM/MngZWOeee3UZ4qcCGRtvFQFuCIx52iJkdCMwCDgdGbrbvSGAIcKJz\nrsrMrgCeAjYC1zrnNpjZUuDPZvYZwREH9VM77gfOBH4AZm/j+jcTTBCsDB2bGrqPze/JduBe7we6\nOucGhvo+jEbP0Mwigb87564zs78Cw4BXtxabEg8iIiIiIiKyVQHneKfQz8A0L3MK/QxI8zbsW7Vq\nVf0oA4477jjWrFmz1X569epFVFQUAJ9++injxo0DoLa2lkMPPZQePXpwzDHHMHz4cFJTU/nTn/4E\nQFZWFh6Ph3bt2lFcXLzV8wFOOukkAA/wiXNu1U7c5nqCb/brJYfadphzbi1wgpl1BOYBbwCYWXvg\nGeAC51xl6NivzOw7oK1z7oNQFzcCLzvn7jezvsA04AwgyTn3faivRaE/TwAmhs4b6pwrdc7dYWZ3\nAg8TTEA8uZV7sqbazWwGcCjBaSVvbO92gUWh7z8CjtjWwZpq0QIUFRWRlZVFVlYW+++/PwceeGDD\ntpk1fJ+VlUV+fv4W548YMYJOnTo1HHP88ccDUFVVxcCBA8nKymLWrFnMnz+fbt26kZWVxdq1azn3\n3HO3GdeoUaP44osvmnVP8+bN44MPPmjYvv3224mPj8fn8zW0JSYmbrefO++8s1nXFxERERGRXePV\nn4q5Z5WPQR9+wz2rfLxT6G/Y17lzZ5YsWQLA4sWLt9mPx+Np+L5bt25MmTKFefPmsWDBAh5//HGq\nqqq4/vrrefbZZ0lPT2fmzJkABGcPbKqp8wGeeOIJgDLgUDOrX1Kzmu1/6P4RwaRBlJl1AEqdc1Xb\nOafBZlMVSgB/qD0NeBm4yjn3TaPjBwFRQKGZDatvBgpD3/sIjkIA8IeSFwC9AZxzC5xz/UNfpWYW\nG2p3BEdRlIeSHGVm1iFUbPIEgsmCJu/VOTcq1N/DTTyzzbcNqH++vYGvt/V8NOKhBUhNTWX58uVA\n8A16YmIiN9xwAxB8c16/b1vuu+++LRIJH3/8MUDD+VdddRXjx49n+PBgPZOXXtp2fZQZM2bs3I00\nMm/ePBITExuSIABpaWlMnjyZe+65Z4f7ufPOO/njH//Y7DhERERERGTnVdc5/rOhjFxfCUuKyzfZ\nNzDNy+qy4GCAO++8k5EjR5KWlkZycjIHH3zwDvU/efJkrr76akpLSwEYOXIkXbt2ZcyYMURGRlJX\nV8czzzzD6tWrd/j8Xr168fTTTwOsAS4HXjazgcDbwANmNhQ4H/gdwQKQXUIrW1zpnPvGzB4B3iP4\naf41EFytAvgn0BXoZmZvOuduayKko8xsChAg+D772lD77cCBwJRQAmUm8C/gDiAbqAXmmNkygoUg\nZ5rZSCAOuCnUx1jgX2a2jlBCo6lHYmbdCNV3AOpjvAZ4gWCi4BHn3IbQfW1xr5v5Cagws5cJFuPc\n/BnWAueY2b3AWuD1rcQFgAUTIvuOXr16ufqMXL0VK1bQpUuXMEW0qaYSD/V/mbZmxIgRDB06dJPE\ng8/n4/jjj6egoIBOnTrx29/+lvHjx5OcnMzxxx/PHXfcwdChQ/nss88IBALcdNNN5ObmEhERwejR\no/nDH/5A//79mTRpEr169SIvL4/bbruNqqoqOnfuzFNPPUViYiIdO3bksssu41//+hc1NTX84x//\nIDY2lj59+uDxeEhPT+ehhx5i7ty5ADz99NMsW7aMtm3bbnJvzz77LFOnTqW6uprjjjuORx55hJtv\nvpn77ruP7t27061bN5577rlN7rslvW4iIiIiInuDb8qqyPWVMKfQj7+2jvToSA5LiOaDDf9LPtx0\naAaD0oM1EGtqahqmT4wePZrs7Oztjqze3cxsqXOu1/aPlOYys1XOuUN39HiNeGiG799dyBczX27Y\n7nrJOXQ4pd82zmi+iooKsrKyAOjUqROvvPJKk8fl5OQwcWJwik/9m/QZM2YwadIk3ngjOD3nP//5\nT0OCovGUjccff5z8/HyWL19OZGQk69dvOpWpsLCQiRMnMmfOHBISErjnnnu4//77ufXWW4HgSIZl\ny5bxyCOPMGnSJGbMmMFVV121SQJl7ty5JCYmMnLkSB588MGG+VoQTCDMmjWLhQsXEhUVxe9+9zue\ne+457r77bh5++OEdGvEhIiIiIiLNU1IT4N0iP7k+PyvLqogy6Nc2kSEZSfRIjgPYao2HTz/9lGuu\nuYba2lo6duzIr3/9a2688UYWLVrUcEx0dDR5eXl7/L52FzO7nmAxxcbO3pHVL/ZVSjw0Q/nPhXzx\nt5dwgQDm8dBx8Mm77VpxcXHNnmqxo+bMmcNVV11FZGTwx6Ft27ab7P/www/54osv6NcvmFyprq6m\nb9++DfvPPvtsAI455hj++c9/bvNaY8aMISsrqyEhAcGkxNKlSxuK0lRUVJCRkdGsexERERERke0L\nOMfyjRXk+kpYsL6MGuc4NCGG33dMY0Cal6QozybH149wqP+zXs+ePZk/f/4mbffee+/uDT7MnHP3\nE1z1YZ+1M6MdQImHZjn8vKG8l/MXStf8SMIBGRx+3tA9ev3LL7+cjz/+mHbt2vHmm2/u9us55xg0\naBAvvPBCk/tjYoJ1VDweD7W1tU0eU69NmzZcdNFFTJs2bZP+L7vsMu66665dF7SIiIiIiGzhx8oa\n8gpKmO3z46uuxeuJ4Iz9ksjOSOKwhJjtdyDSDFrVohkiPB5OuncCACfdO4EIj2c7Z+xaTz31FMuX\nL99lSYdBgwbx2GOPNSQNNp9q0adPHxYuXMiqVcHVaMrKyvj6620WLcXr9eL3N1335Prrr9/keqee\neiovvfRSw4oX69evbygiExUVRU1NTfNvTkRERERkH1cVqGNugZ+cz9dyycereXbNBjrERTPhsP2Y\n1asjv++UrqSD7FZKPDTTEef/ilOm/oUjzv9VuEMBgjUeGi+7WV1dvcPnjho1ig4dOpCZmcnRRx/N\n888/v8n+9PR0nn76aS688EIyMzPp27cvX3755Tb7/NWvfsUrr7xCVlbWFkOv0tLSOOuss6iqCq5O\n07VrVyZOnMjgwYPJzMxk0KBB/PjjjwD85je/ITMzk4svvniH70dEREREZF/nnOPL0koe+NbH+Uvz\nuWvVz/xYVcOIg9rybM+DubtrO/qneYmO0FtC2f20qgVaHaG10usmIiIiIrKp4poAcwr85PpKyK+o\nJibCOLFtIkMyvGQmxRERXNJxr6ZVLVoe1XgQERERERFpxQLOsbi4nFxfCf/ZUEbAwZGJMVx7SDr9\nUxNJjNyzU8NFNqfEg4iIiIiISCu0pqKaXF8Jbxf4KaoJ0CbSw9n7tyE7w0vHeNVskJZDiQcRERER\nEZFWoiJQx3tFpeT6SvjMX0kEcFxKPEMykji2TQJREXv/VAppfZR4EBERERERacGcc3zuryTXV8K8\nolIq6xwHxUYxukMqA9O9pEbrbZ20bPoJFRERERERaYGKqmt5O1Qock1lDXERxilpXoZkeOmaGIvt\nA4UiZe+gxIOIiIiIiEgLUVPn+GhDGbkFJSzaUE4d0N0by4UHpnBSaiJxHi1/Ka2PEg8thMfjoXv3\n7tTW1tKpUydmzpxJmzZtdrqf/v37U1paSv2SoUuWLOGGG25g3rx5Wz0nPz+fDz74gIsuuqi54YuI\niIiIyC/wXXkVs31+5hT4Ka4NkBrl4f8OTCE73Uv7uOhwhyfyiyhd1kLExcWxfPlyPvvsM9q2bcu0\nadOa3ZfP5+Ott97a4ePz8/N5/vnnm309ERERERHZeaW1Ad74eSO///QHRv/3B179qZjuSbHcceQB\nPH9MR67okKqkg+wVlHhogfr27cvatWsbtu+77z569+5NZmYmt912GwBlZWWcccYZHH300Rx11FHM\nmjWr4ficnBzuuOOOLfoNBALk5OQ09PXYY48BMG7cOObPn09WVhZTpkzZzXcnIiIiIrLvqnOO5RvL\nuXvlz5y/JJ8Hvi2gMuD4bcc0XjymE7cdcQDHpSTgUf0G2YtoqkUzvf465OXB4MEwbNiu6zcQCDB3\n7lyuuOIKAPLy8li5ciWLFi3COcewYcN4//33KSgooF27dvz73/8GYOPGjQ199O3bl1deeYV3330X\nr9fb0P7EE0+QnJzM4sWLqaqqol+/fgwePJi7776bSZMm8cYbb+y6GxERERERkQY/V9U0FIr8qaqW\nBE8E2RlehmQkcXhCjApFyl5NiYdmeP11uPBCKC+Hp56CF1745cmHiooKsrKyWLt2LV26dGHQoEFA\nMPGQl5dHjx49ACgtLWXlypWceOKJjB07lptuuomhQ4dy4oknbtLfhAkTmDhxIvfcc09DW15eHp98\n8gkvvfQSEExWrFy5kuhoDd8SEREREdnVquvqWLi+jFxfCcs2VuCAHslxXH5QKie0TSBGhSJlH6HE\nQzPk5QWTDhD8My/vlyce6ms8lJeXk52dzbRp0xgzZgzOOcaPH8+VV165xTnLli3jzTffZMKECZx6\n6qnceuutDfsGDBjAhAkT+PDDDxvanHM89NBDZGdnb9LPtgpPioiIiIjIzllZVkWur4R3Cvz4A3Vk\nREcyvH0Kg9OTOCA2KtzhiexxSrE1w+DBEB8f/D4+Pri9q8THxzN16lQmT55MbW0t2dnZPPnkk5SW\nlgKwdu1afD4f69atIz4+nuHDh5OTk8OyZcu26GvChAnce++9DdvZ2dlMnz6dmpoaAL7++mvKysrw\ner34/f5ddxMiIiIiIvuYjTUBXvmxmCv/+z2//eQH3vy5hF5t4rmnSzue7Xkwlx2UqqSD7LM04qEZ\nhg0LTq/YHTUeAHr06EFmZiYvvPACl1xyCStWrKBv374AJCYm8uyzz7Jq1SpycnKIiIggKiqK6dOn\nb9HP6aefTnp6esP2qFGjyM/Pp2fPnjjnSE9P59VXXyUzMxOPx8PRRx/NiBEjuO6663btDYmIiIiI\n7IUCzrFsYzm5Pj8frC+lxsHhCTH8oVM6A9IS8UZ6wh2iSItgzrlwx7BH9erVyy1ZsmSTthUrVtCl\nS5cwRSTNpddNRERERMJhXWUNs30l5BX4KaiuxRsZwaA0L9kZSXROiAl3ePs8M1vqnOsV7jjkfzTi\nQUREREREZDsqA3XMX19Krs/Pf0sqiAB6tYnntx3T6JOSQHSEVqUQ2RolHkRERERERJrgnOPL0mCh\nyHeL/JQHHO1iohh5UFsGpSeRHqO3UyI7Qn9TREREREREGtlQXcucQj+5Pj+rK6qJjTBOSk1kSEYS\n3b2xmGl0g8jOUOJBRERERET2eQHnWLShnFxfCR8WlxFw0DUxlusPSefkVC8JkVoQUKS5lHgQERER\nEZF91vcV1cz2lfB2gZ/1NQHaRHk454A2ZKcncXB8dLjDE9krKPEgIiIiIiL7lPJAHe8VlpJbUMLn\n/koigD4pCQzJSOLYNvFEqlCkyC6lxEML4fF46N69O7W1tXTq1ImZM2fSpk2bne6nf//+lJaWUr9k\n6JIlS7jhhhuYN2/eVs/Jz8/ngw8+4KKLLtqk/dNPP+WSSy4B4Pvvvyc5OZnk5GRSU1MpKiraoj0t\nLY0ZM2bQpUsXjjjiCKqrqznppJN45JFHiIjQ0DQRERERCR/nHJ/5K8n1lfBeUSmVdY4OcVH85uBU\nBqZ5aRutt0Yiu4v+drUQcXFxLF++HIDLLruMadOmcfPNNzerL5/Px1tvvcVpp522Q8fn5+fz/PPP\nb5F46N69e0NMI0aMYOjQoZx77rmbHLN5e35+Pp07d2b58uXU1tYyYMAAXn31Vc4+++xm3YuIiIiI\nyC9RWFVLXmEJs31+1lbWEO9B4po+AAAgAElEQVQxBqR5GZLhpUuiCkWK7An6GLoF6tu3L2vXrm3Y\nvu++++jduzeZmZncdtttAJSVlXHGGWdw9NFHc9RRRzFr1qyG43Nycrjjjju26DcQCJCTk9PQ12OP\nPQbAuHHjmD9/PllZWUyZMmWX3UdkZCTHH388q1at2mV9ioiIiIhsT02d4/2iUv64Yh0XLcvnye/X\nkxrt4cZDM5h1TCeu75xBV2+ckg4ie4hGPDRDwDneKfQzMM3LnEI/A9K8eHbRP1qBQIC5c+dyxRVX\nAJCXl8fKlStZtGgRzjmGDRvG+++/T0FBAe3atePf//43ABs3bmzoo2/fvrzyyiu8++67eL3ehvYn\nnniC5ORkFi9eTFVVFf369WPw4MHcfffdTJo0iTfeeGOX3EO98vJy5s6dy5///Odd2q+IiIiISFO+\nLasit6CEuQV+NtbWkRbt4YIDU8hO93JgnApFioSLEg/N8E6hn3tW+bhnla+hbVB60i/qs6Kigqys\nLNauXUuXLl0YNGgQEEw85OXl0aNHDwBKS0tZuXIlJ554ImPHjuWmm25i6NChnHjiiZv0N2HCBCZO\nnMg999zT0JaXl8cnn3zCSy+9BASTFStXriQ6etf+I/zNN9+QlZWFmXHmmWfu8JQPEREREZGdVVob\n4J3CUnJ9JXxdVkWkwfFtExmS7uWYNvG77ANCEWk+JR6aYWCad5Okw8A07zaO3jH1NR7Ky8vJzs5m\n2rRpjBkzBucc48eP58orr9zinGXLlvHmm28yYcIETj31VG699daGfQMGDGDChAl8+OGHDW3OOR56\n6CGys7M36WdbhSebo77Gg4iIiIjI7lDnHMtLKsj1lbCgqIxq5zgkPpqrO6YxIM1LcpQn3CGKSCOq\n8dAMcwr929z+JeLj45k6dSqTJ0+mtraW7OxsnnzySUpLSwFYu3YtPp+PdevWER8fz/Dhw8nJyWHZ\nsmVb9DVhwgTuvffehu3s7GymT59OTU0NAF9//TVlZWV4vV78/l13DyIiIiIiu8PPVTX87Yf1XPLx\nam78Yh2LNpRzWkYS07u357HMgzjrgDZKOoi0QBrx0AwDQiMcGtd42JV69OhBZmYmL7zwApdccgkr\nVqygb9++ACQmJvLss8+yatUqcnJyiIiIICoqiunTp2/Rz+mnn056enrD9qhRo8jPz6dnz54450hP\nT+fVV18lMzMTj8fD0UcfzYgRI7juuut26f2IiIiIiDRXVaCOhRvKyPWV8PHGCgB6JMcxqkMq/dom\nEK1l20VaPHPOhTuGPapXr15uyZIlm7StWLGCLl26hCkiaS69biIiIiJ7J+ccK8uqyPX5eafQT2mg\njv1iIslOT2Jwupf9Y6PCHaK0YGa21DnXK9xxyP9oxIOIiIiIiLQIG2sCzC30k+sr4dvyaqLNODE1\ngSEZSRydFEeECkWKtEpKPIiIiIiISNgEnGNpcTm5vhI+2FBGrYPDE2IY0ymdAWmJJEaqZoNIa6fE\nQ4hzDlMGtdXY16YIiYiIiOxt1lZUM7vAT15BCYXVAZIiIxi2fzJD0pM4JCEm3OGJyC6kxAMQGxtL\nUVERqampSj60As45ioqKiI2NDXcoIiIiIrITKgJ1zC8qJddXwif+SiKAXm3i+V3HJPqmJBAVod/F\nRfZGSjwA7du3Z82aNRQUFIQ7FNlBsbGxtG/fPtxhiIiIiMh2OOdYUVrJWz4/8wr9VNQ5DoyN4ooO\nqQxK85IWo7ckIns7/S0HoqKi6NSpU7jDEBERERHZa6yvruXtAj+zC0r4vqKG2Ajj5NREhmQkcZQ3\nViONRfYhSjyIiIiIiMguUVvn+Ki4jFyfn482lFEHdPPGMvaQFE5OSyTeExHuEEUkDJR4EBERERGR\nX2R1eTW5vhLeLvRTXBOgbZSH89q1ITsjiQ5x0eEOT0TCTIkHERERERHZaWW1dcwr8pPrK2FFaRUe\ngz5tEhiSkcSxKfF4NJVCREKUeBARERERkR3inOOTkkpyC0p4v6iUqjrHwXHRXHlwKgPTvKRE6+2F\niGxJ/zKIiIiIiMg2FVTVkldQwmyfn3VVNcR7jIFpXoZkJHFkYowKRYrINinxICIiIiIiW6iuc/xn\nQxm5vhKWFpdTB2QlxXHJQSmc2DaRWBWKFJEdpMSDiIiIiIg0+KasilxfCXMK/fhr60iPjuTCA1PI\nzkiiXWxUuMMTkVZIiQcRERERkX2cvzbAO4V+cn1+VpZVEWVwfNtEhmR46ZmsQpEi8su0isSDmR0B\nzGrUdAhwq3PugdD+scAkIN05VxiGEEVEREREWpU65/h4YwW5vhIWrC+jxjk6x0dzdcc0BqR5SY7y\nhDtEEdlLtIrEg3PuKyALwMw8wFrgldD2QcBg4PuwBSgiIiIi0kr8WFnTUCjSV12L1xPB6fslMSQj\nicMSYsIdnojshVpF4mEzpwLfOOdWh7anADcCr4UvJBERERGRlqsqUMeC9cFCkR+XVGBAz+Q4Rh+c\nSr+2CURHqFCkiOw+rTHxcAHwAoCZnQmsdc79d1tL+JjZb4DfAHTo0GFPxCgiIiIiElbOOb4KFYp8\nt7CUskAd+8dEMuKgtgxK97JfjApFisie0aoSD2YWDQwDxptZPPBHgtMstsk59zjwOECvXr3cbg1S\nRERERCSMimsCzCnwk+srIb+immgzTkoNForMTIojQoUiRWQPa21jqk4DljnnfgY6A52A/5pZPtAe\nWGZm+4cxPhERERGRPS7gHB9uKOP2r37k/5Z+x6OrC4n1GNceks7fe3Vk3GH7kZUcr6SD7HI//fQT\nY8eODXcYO8TM+ptZZqPtv5jZajObs9lxI8zsAzNbaGY9Q22dzWypmZWa2QnbuEZS6Nx5ZrbIzE4N\ntV8a2n7fzF40s60WVDGzFDPLM7P3QjFkNnHMBDMb0UT7A2b2YehrXKitfaiv+aH+em12zuVmVrPV\nB7cLmHOtZwCAmb0IzHbOPdXEvnyg1/ZWtejVq5dbsmTJbopQRERERGTPWVNRTa6vhLcL/BTVBGgT\n6WFgupfsDC+d4lUoUlq2QCCAx7PrV08xs6XOuV5NtN8OrHLOPRvaPgCIAx53zg0MtaUAc4E+wIHA\nTOfcCaER97HA/cAM59yCrVw7AohwztWa2SHALOdc79D3q51zATO7F/jKOffEVvr4PZDqnPuTmfUH\nfuuc+7/NjpkArHHOPb1Z+2HOuZWhOBYCw4FCIMY55zOzrsBjzrkTQ8fHAi8BXZxznbfxWH+RVjPV\nwswSgEHAleGORUREREQkXCoCdbxXVEqur4TP/JVEAMemxPOH9CSOS0kgKkKjGmTPyc/PZ9SoUTz4\n4INcfvnlpKenk5qayiGHHMLtt9++xfHz5s3jrrvuIikpic6dO3P11Vdz1VVXUVFRQVxcHE8//TQJ\nCQmcc845lJeXY2Y8/vjjrFu3jr/85S+kpqayYsUKbr31Vs477zx++OGHLc4HPGa2GDgDSAMeBc4F\nRgAVZjYKONU596OZddwsxGOB+c65auA7M/OaWYxzrhwo31ZtQQDnXB1QF9pMAj4JtX/b6LAqoDY0\n6mEuMAooBl4HsoEVwOmhY1MAH4CZnQQ8CKwJ7VvDZpxzK+vjMLNaIOCc27j5tRttjwk9nwe2eWO/\nUKtJPDjnyoDUbezvuOeiERERERHZc5xzfO6vJNdXwryiUirrHO1joxjVIZWB6V7SolvNr/Wylxo/\nfjxTp06lT58+jB49epvHrlu3jjfeeIOoqCguuOACbrnlFvr06cNrr73GPffcw0UXXURKSgpvvfUW\nAHV1daxbt47i4mLy8vL4+eefGTZsGOeddx45OTlbnA8EgLHAMwTf/F8W+rT/aRqNeNiKVGBDo+1i\noC3w444+CzM7EJgFHA6M3GzfkcAQ4ETnXJWZXQE8BWwErnXObTCzpcCfzewzoA1QP7XjfuBM4Adg\n9nZiuBj41jmX36jNA0wF7ghtpwAnOefuNTMlHkRERERE9kVF1bW8HSoUuaayhtgIo39qIkMykujm\njWV7n76K7CrTpsFnn8Fpp8GwYVvuX7VqFb179wbguOOOY82aLT6Mb9CrVy+iooKrqnz66aeMGzcO\ngNraWg499FB69OjBMcccw/Dhw0lNTeVPf/oTAFlZWXg8Htq1a0dxcfFWzwdwzr1vZncBnzjnVu3E\nra4n+Ga/XnKobYc559YCJ4RGU8wD3oBgrQWCyZALnHOVoWO/MrPvgLbOuQ9CXdwIvOycu9/M+gLT\nCI7eSHLOfR/qa1HozxOAiaHzhjrnSs1sIHA58KvNQnsMeMs5V1/TYjxw787cW3Mp8SAiIiIi0oLU\n1Dk+2lBGbkEJizaUUwcc5Y3lggNTODk1kThPa6sPL62Vc7BgAYwdC4sXB9v+9jd44YUtkw+dO3dm\nyZIlHHfccSxevJgDDjhgq/02ruvQrVs3xo8fT48ePQCorq6mqqqK66+/HjNj4sSJzJw5k+7duzeZ\naGvq/GeeeYbQSIJFwFFm1ss5twSoZvvvgT8CJppZFHAAUOqcq9rOOQ1C0zLqjy8B/KH2NOBl4Crn\n3DeNjh8ERAGFZjbMOfc6YATrMkBwmkXb0Pd+M2vvnFsD9CY4emMB0L9Rf8cBfwFOc85VNGqfBPzo\nnHu4UbiHA380sz8CB5jZrM1rSewqSjyIiIiIiLQA+eVV5Pr8zCnwU1wbIDXKw/nt2jAkI4n2cdHh\nDk/2IYEAvPYa3HcffPghxMb+b195OeTlbZl4uPPOOxk5ciRpaWkkJydz8MEH79C1Jk+ezNVXX01p\naSkAI0eOpGvXrowZM4bIyEjq6up45plnWL169Q6fT7AI5AjgVCADeDk0CuBt4AEzGwqcD/wOuADo\nElrZ4krn3Ddm9gjwHuCAayC4WgXwT6Ar0M3M3nTO3dZESEeZ2RSC0z0igWtD7bcTLFY5JZRAmQn8\ni+C0h2yCdRfmmNky4CFgppmNJFj88qZQH2OBf5nZOkIJjSbUF6x8NXSdsQQTGdcAC81sHlDgnDvP\nOffr+pPMbNXuSjpAK1vVYlfQqhYiIiIi0lKU1gaYFyoU+WVpFR6DvikJDMlIonebeDyaSiF7UGVl\ncETDpEmwciUcckhwtEN6OowYEUw6xMc3PeKhpqamYfrE6NGjyc7O5txzz93zN8HWV7WQ8NGIBxER\nERGRPajOOT4pqSDX52f++lKq6hwd46K56uA0BqZ7aRO165cXFNmW9eth+nSYOhV8PujVC/7+dzj7\nbKifFRETExzpMHhw0zUePv30U6655hpqa2vp2LEjv/71r7nxxhtZtGhRwzHR0dHk5eXtobva/czs\nemDzp3G2c26nakLsCzTiQURERERkD/i5qqahUORPVbXEeyIYkBYsFHlEQowKRcoe9/33MGUK/PWv\nUFYWLBx5441w8snQmn8cNeKh5dGIBxERERGR3aS6ro6F68vI9ZWwbGMFDuiRFMflB6XSr20CsSoU\nKWHw3/8G6ze8+GIwwXDhhXDDDZCZGe7IZG+lxIOIiIiIyC62sqyKXF8J7xT48QfqSI+OZHj7FAan\nJ3FAbFS4w5N9kHPwzjtw773BKROJiXDNNXDttXDQQeGOTvZ2SjyIiIiIiOwCG2sCvFMYnErxTXk1\nUWac0DZYKDIrOU6FIiUsamvh5ZeDCYdly2C//eDOO+GqqyAlJdzRyb5CiQcRERERkWYKOMeyjeXk\n+vx8sL6UGgeHJcTwh05pnJLqJUmFIiVMysrgqafg/vvhu+/g8MODtRyGD990eUyRPUGJBxERERGR\nnbSusobZvhLyCvwUVNfijYxg6H7JZGckcWhCTLjDk31YQQFMmwYPPwxFRdC3bzD5MGwYRKikiISJ\nEg8iIiIiIjugMlDH/PWl5Pr8/LekAgN6tYnnqo5p9E1JIDpCUykkfL75JphgePJJqKwMJhpuvBH6\n9Qt3ZCJKPIiIiIiIbJVzji9Lg4Ui3y0qpTxQxwExkVx+UFsGpXvJiFGhSAmvJUuCK1S89BJERsIl\nl8DYsdClS7gjE/kfJR5ERERERDazobqWOYV+cn1+VldUExNhnJSayJD0JLonxRKhQpESRs7B7NnB\ngpHvvgvJyZCTA2PGQLt24Y5OZEtKPIiIiIiIECwUuWhDObm+Ej4sLiPgoEtiDNcdkk7/VC8JkZog\nL+FVUwMvvhgc4fDpp3DggTBpEoweDUlJ4Y5OZOuUeBARERGRfdr3FdXM9pXwdoGf9TUB2kR5OHv/\nNgzJSOLg+OhwhyeC3w8zZsCUKfDDD9CtGzz9NFx4IUTrR1RaASUeRERERGSfUx6o473CUnILSvjc\nX0kEcFxKAkMyvBzXJoFIFYqUFuCnn2DqVJg+HYqL4eST4dFH4bTTQLN9pDVR4kFERERE9gnOOT7z\nV5LrK+G9olIq6xwd4qIY3SGVQele2kbrV2NpGb76CiZPhmeeCU6vOOecYA2HY48Nd2QizaN/XUVE\nRERkr1ZYVUteYQmzfX7WVtYQF2GckubltAwvXRJjMX10LC3Ef/4TLBj52mvBKRQjR8L118Nhh4U7\nMpFfRokHEREREdnr1NQ5/rOhjFxfCUuKy6kDMr2xXHxgCiemJhLnUaFIaRnq6uCNN4IJh4ULISUF\nJkyA3/8eMjLCHZ3IrqHEg4iIiIjsNb4tqyK3oIS5BX421taRGuXhggNTGJzupX2cqvBJy1FVBc89\nF1yh4ssv4eCD4cEHg6McEhPDHZ3IrqXEg4iIiIi0aqW1Ad4pLCXXV8LXZVVEGhyfksCQjCSOaROP\nR1MppAUpLobHHgsmGX78EbKy4Pnn4bzzIFLvzmQvpR9tEREREWl16pxjeUkFub4SFhSVUe0cneKj\n+V3HNE5N85Ic5Ql3iCKbWLMmmGx47LHg8piDBgWLRw4cqBUqZO+nxIOIiIiItBo/V9Uw2+dndkEJ\nP1fVkuCJYEhGEkMyvByWEKNCkdLifPYZTJoUnFbhHJx/fnCFih49wh2ZyJ6jxIOIiIiItGjVdXUs\nWB8sFPnxxgoc0DM5jis6pNIvJYEYFYqUFsY5eP/9YP2Gf/8b4uPhd7+D666Djh3DHZ3InqfEg4iI\niIi0OM45VpZVkevz806hn9JAHfvFRHJJ+7YMTveyf2xUuEMU2UIgAK++GlyhYtEiSE+HP/85mHRI\nTQ13dCLho8SDiIiIiLQYG2sCzC30k+sr4dvyaqLMODE1WCgyKymOCE2lkBaooiJYr2HyZFi1Cjp3\nhunT4bLLIC4u3NGJhJ8SDyIiIiISVgHnWFpcTq6vhA82lFHr4PCEGMZ0SueUtES8kSoUKS3T+vXw\nyCMwdSoUFEDv3vCPf8BZZ4FHP7YiDZR4EBEREZGwWFtRzewCP3kFJRRWB0iKjGDY/skMSU/ikISY\ncIcnslX5+TBlCsyYAeXlcPrpcOONcNJJWqFCpClKPIiIiIjIHlMRqGN+USm5vhI+8VcSAfRqE8/v\nOibRNyWBqAi9a5OWa/nyYMHIWbOCCYaLL4YbboCjjgp3ZCItmxIPIiIiIrJbOedYUVrJWz4/7xX5\nKQ84DoyNYmSHtgxOSyItRr+SSsvlHMydGywY+fbb4PXCtdcGv9q3D3d0Iq2D/pUXERERkd1ifXUt\nbxf4mV1QwvcVNcRGGCenJjIkI4mjvLGYxqRLC1ZbG6zXcO+9wZEO++8Pd98NV14JbdqEOzqR1kWJ\nBxERERHZZWrrHB8Vl5Hr8/PRhjLqgG7eWMYeksLJaYnEeyLCHaLINpWVwZNPwv33B2s5HHkkPPFE\ncFpFjEqPiDSLEg8iIiIi8outLq8m11fC24V+imsCpER5OK9dG7IzkugQFx3u8ES2y+eDhx+GadOC\nq1X06wcPPghDh0KE8mUiv4gSDyIiIiLSLGW1dcwr8pPrK2FFaRUegz5tEhiSkUTvNvFEqlCktAKr\nVsHkyfD001BVBWeeCTk5cPzx4Y5MZO+hxIOIiIiI7DDnHJ+UVDK7oIT3i0qprHN0iIviyoNTGZjm\nJSVav15K67B4cbB+w8svQ1QUXHopjB0bnFohIruW/mcQERERke0qqKolr6CE2T4/66pqiPcYp6Z5\nGZKRxJGJMSoUKa2Cc/DWW8ElMefNg+RkGDcO/vAHOOCAcEcnsvdS4kFEREREmlRd5/jPhjJyfSUs\nLS6nDjg6KY5LDkrhhLaJxKlQpLQS1dXw4ovBhMNnnwWXwZw8GUaPDi6PKSK7lxIPIiIiIrKJb8qq\nyPWVMKfQj7+2jvToSC48MIXsjCTaxUaFOzyRHVZSAn/9K0yZAmvXwlFHwd/+BhdcEJxeISJ7hhIP\nIiIiIoK/NsA7hX5m+/x8XVZFlMHxbRMZkuGlZ3I8Hk2lkFbkxx9h6lSYPh02boRTToEZMyA7G/Sj\nLLLnKfEgIiIiso+qc46PN1aQ6ythwfoyapyjc3w0V3dMY0Cal+QoT7hDFNkpX34JkybBzJlQWwvn\nnBNcoaJ373BHJrJvU+JBREREZB/zU2UNs0OFIn3VtXg9EZy+XxJDMpI4LCEm3OGJ7LSFC4P1G157\nDWJjYdQouP566Nw53JGJCCjxICIiIrJPqArUsWB9GbkFJXy8sQIDeibHMfrgVPq1TSA6QoUipXWp\nq4N//Su4JOYHH0DbtnDbbXD11ZCeHu7oRKQxJR5ERERE9lLOOb4KFYp8t7CUskAd+8dEMuKgtgxK\n97JfjKrrSetTWQnPPhucUvHVV9CxIzz0EFx+OSQkhDs6EWmKEg8iIiIie5nimgBzCvzk+krIr6gm\n2oyTUoOFIjOT4ohQdT1phYqL4dFH4cEH4aefoGfP4BKZ55wDkXpXI9Ki6a+oiIiIyF4g4ByLi8vJ\n9ZXw4YYyah0cmRjDtYek0z81kcRIFYqU1umHH+CBB+Dxx6G0FAYPDo54GDBAK1SItBZKPIiIiIi0\nYmsqqpld4CfPV0JRTYDkyAh+vX8bsjO8dIpXoUhpvT79NFgw8oUXwDm44AK44QbIygp3ZCKys5R4\nEBEREWllKgJ1vFdUSq6vhM/8lUQAx6bE84f0JI5LSSAqQh8DS+vkHLz3XrBg5FtvBWs2XH01XHcd\nHHxwuKMTkeZS4kFERESkFXDO8UVpJW/9XMK8olIq6xztY6MY1SGVgele0qL1a520XoEA/POfwREO\nixdDRgZMnAi//W1wtQoRad30P5SIiIhIC1ZUXdtQKPKHyhpiI/6fvTsPj/HsHjj+nayThAhZCBGi\n4bUWFUtEZJImsYeqfY21fVMUJdXyUqq6qFhqq0qlmlJUFy+lqhotUWupXRW1FJGQBVkmmfv3xxPT\nhCh9f5WJOJ/rytXkmbmf58wTJXNyn3N0GFzL0NbDmXpl9eikyF08wjIzIS4OZs2C336DmjW1BpID\nBoCDg6WjE0L8UyTxIIQQQghRwuSaFLtSb7IpKZ1d129hAuqX1dOzSnmCXMvgYG1l6RCF+H9JSYEF\nC7QxmMnJ0Ly5Vl7RuTNYSx9UIUodSTwIIYQQQpQQZ29lsykpgy1XM0jNzcPV1poelV1o4+FMVQc7\nS4cnxP/bmTMwezbExsKtW9CxI0RHQ6tWMqFCiNJMEg9CCCGEEBZ0IzePhPxGkcdvZGOtA//yTrT1\ncKapiyPW8m5MlAL792v9G1av1nY09O2rTaioV8/SkQkhioMkHoQQQgghiplJKX5Jz2RTUgY/XrtB\ntklR3cGO56u58bR7Gcrbyo9o4tGnFHz7rZZw2LIFypaFl16CF1+EKlUsHZ0QojjJv2pCCCGEEMXk\nSraRb69m8E1SOpeyc3G0tiLMvSxtPZz5l5O9NIoUpYLRCGvWaD0bDh4ET0/t8+HDoVw5S0cnhLAE\nSTwIIYQQQjxEOSZF4rUbbExKZ39aJgpo7OxAZFVXAio4oZdGkaKUuHFD690QEwPnzkGdOvDhh9Cn\nD9jbWzo6IYQlSeJBCCGEEOIhOHUzm01J6XyXnEFGrgl3Oxv6eZUn3N0ZT72tpcMT4h+TlKRNp1iw\nAK5fh8BA7fP27cFK8mpCCCTxIIQQQgjxj0k35rE1OYNNVzM4dTMbWx0EVChDWw9nGpdzkEaRolT5\n9VeYNQvi4iAnB7p0gfHjwd/f0pEJIUoaSTwIIYQQQvw/5CnF/rRbbErKIPHaDYwKajrZM9LHjWDX\nsjjbWls6RCH+Ubt2aT0bvvgC7Oxg4ECtaWStWpaOTAhRUkniQQghhBDif/BHlpHNSel8czWDqzm5\nlLWxomPFcrTxcMbXSQraReliMsHGjVrC4YcfwMUFXn0VRo6EihUtHZ0QoqSTxIMQQgghxAPKyjOx\n/dpNNiWlcyA9Ex3QpJwjz1d3w7+8E3ZWUkohSpecHFixAt59F44cgapVYfZsGDJEG48phBAPQhIP\nQgghhBB/QSnFiRvZbLqaztbkG9zKM+Fpb0Nk1QqEu5fFw14aRYrSJz0dliyBOXPg4kV48kn4+GPo\n2RNs5Y+8EOJvksSDEEIIIUQRrhtz+e5qBpuSMjibmYO9lY7WrmVo6+5MA2c9VtIoUpRCf/wBc+fC\n4sVa8iEkRBuRGR4O8kdeCPG/ksSDEEIIIUS+PKXYk3qLTUnp7Lx+kzwFtcvYM6aGO0GuZShjI40i\nRel07JhWTvHxx5CXB927axMqmjSxdGRCiNJAEg9CCCGEeOydz8xhU1I6317N4JoxDxcba7pWcqGN\nR1mqO0qjSFE6KQU7dmgNI//7X3BwgOHDYexYqFHD0tEJIUoTSTwIIYQQ4rF0K8/EtpQbbEpK50hG\nFlZA8/JOtPUoS3MXJ2ykUaQopUwm+OorLeHw00/g6gqvvQYvvABubpaOTghRGkniQQghhBCPDaUU\nhzOy2JSUzraUG2SZFFX1tgzzdiXMvSwV7ORHI1F6ZWVppRTvvgsnT4KPDyxYAJGR4Oho6eiEEKWZ\n/OsqhBBCiFIvOSeXb69msCkpnYtZRhysdAS7laWtR1nqltGjk655ohS7fh0WLYJ58+DKFa1vw6pV\n0LUr2Mi7ASFEMZC/autNP0UAACAASURBVIQQQghRKhlNip+u32RTUjp7Um9hAhqU1dOnSnlau5bB\nwdrK0iEK8VCdO6eNw1yyBG7ehLZtIToaDAaZUCGEKF6SeBBCCCFEqXLmVjabktLZcjWDtFwTrrbW\n9KpSnnD3sng52Fk6PCEeul9+gZkz4dNPta979dImVDz5pGXjEkI8viTxIIQQQohH3o3cPL5P1hpF\nnriZjY0O/Ms70dbDGT8XR6zl17uilFMKvv9eaxj5zTfg5AQjR8Lo0eDtbenohBCPO0k8CCGEEOKR\nZFKKg+mZbEpK58eUm+QohY+jHf+u7sbTbmVxsbW2dIhCPHS5ufD551rCYd8+qFgRZsyA55+H8uUt\nHZ0QQmgk8SCEEEKIR8qVbCObkzL45mo6l7NzcbK2oo1HWdp6OFPLyV4aRYrHwq1bsGwZxMTA6dNQ\nq5bWy6F/f9DrLR2dEEIUJokHIYQQQpR4OSYTO65pjSL3p2WigMblHBhU1ZVWFZywl0aR4jGRnAzz\n52sfKSnQooU2HjMiAqxlk48QooSSxIMQQgghSiSlFKduZrMpKYPvkjO4kWfCw86Gfl7laePuTCW9\nraVDFKLYnD6t7W748EPIzIROnbQJFQEBMqFCCFHySeJBCCGEECVKmjGPrckZbEpK57dbOdjqdLSq\noDWKbFzOASt5lyUeI/v2aRMq1qzRdjT07w/jxkGdOpaOTAghHpwkHoQQQghhcXlKsS/1FpuuZrDz\n2g2MCmo52TPSx50QtzKUtZE95OLxoRRs3qw1jNy6FZydtWTDiy9C5cqWjk4IIf4+STwIIYQQwmIu\nZubwzdUMNl9NJzknD2cbKzpVLEcbD2eecLK3dHhCFCujEVat0nY4/PKLlmSYOROGD9eSD0II8aiS\nxIMQQgghilVmnokfr91gU1I6v6RnYQX4uTgSVd2ZFuWdsLOSUgrxeLlxA5Yu1Xo4nD8PdetCXBz0\n7g12dpaOTggh/v8k8SCEEEKIh04pxbEb2WxKSichJYNbeYrK9rYMrlqBMHdn3O3lRxLx+Ll8Gd57\nDxYuhNRUaN0aFi2Cdu3ASga1CCFKEflXXgghhBAPzfWcXL7NbxR5LtOI3kpHa9cytPVwpkFZPTpp\nFCkeQydPwqxZ8NFHkJMDXbvC+PHQvLmlIxNCiIdDcqlCCCGE+EfExcUxffp0ck2KxGs3mXz8Ej33\nnWXJ7ymUsbZmbA13VjXxIdq3Ik86O5TKpMPZs2dZt26d+esvvviCOnXqoNfrCz1v//79BAQE0LJl\nS+Li4szH27Rpg7u7O9OnT//L6/Tv3x+DwYCfnx+zZ88G4OeffyYgIIDWrVsTEhLC6dOn7xvvyZMn\nsbW1Zfv27Xc9Fh8fz2uvvXbX8U8//ZRWrVrRunVrOnbsSHp6uvm1h4SEEBAQwIwZM8zP37RpE/7+\n/vj7+/PNN98UGce8efPMnx84cIAffvjhvrE/in76SUsy1K6tJR0GDYITJ+CzzyTpIIQo3STxIIQQ\nQoh/RHJ2LntSb9J7/1kmn7jEsRtZdKvsQmxDb+Y18KJ9xXI42RTvjx55eXnFer07Ew+tW7fm559/\nxsvLq9DzRo4cSXx8PAkJCcybN4/r168DEBsby8yZM+97ndjYWBISEvjpp59YuHAhGRkZeHp6smnT\nJn744QfGjRvHlClT7nue119/naCgoL/1Grt27cr27dv54YcfeOqpp/j4448BmDBhAlOnTmXHjh1s\n3bqV48ePk5eXR3R0NBs3bmTjxo1ER0cX+T0pzYkHkwn++18IDAR/f0hIgIkT4ffftbKKmjUtHaEQ\nQjx8kngQQgghxN+Wl5dHnz59aNU6iO4jx+DiXZ0Pz6dwJD2LOmX0HO/bhpVPVWd4NTdeHxVFQkIC\nAFOnTsXf35/mzZuzYcMGAF577TX69u1LREQEjRo14vjx40VeMyEhgWbNmhEcHMygQYMAOHToEKGh\noYSEhNCjRw8yMzMBqFatGlFRUXTu3Bmj0cjQoUMJDg6mVatW7N69G4Bx48bh7+9PcHAwq1atAsDb\n25vnnnuOFi1aMG7cOIAi1yuliIiIICEhgVu3buHv78+ZM2eIiYlhw4YNGAwG9u3bh6ur6127HbKz\ns7l58yY+Pj7Y2dkRGBhojunOBMW92OV3HMzKysLb2xtHR0cqVapE2bJlAbC3t8fGRquojYqKYvny\n5ZhMJtq0acOuXbsA2LVrF5UqVSp0zaNHj9KsWTM6dOhQKIFS1LUBbt68Sb169QAtYRAYGAhAhw4d\n2LZtG6dOncLHxwcXFxdcXFyoXr06p06dKnS+mJgYLl68iMFgIDY2lpiYGGJjYzEYDObjUVFRhIaG\n0rFjR27cuPFA98jSsrNh2TKoXx8iIuDcOZg7V/vv669DxYqWjlAIIYrPI5F40Ol0/9LpdAcKfKTr\ndLrROp1upk6nO67T6X7R6XRf6HQ6F0vHKoQQQpR2SinmrlzNaWWLy8wPOVO/BXm5uRhcy9CjSnmm\n1fZEb6XD5o7pFAcOHODHH38kMTGRb775hjFjxmAymQBwd3dn3bp1REdHs3Tp0iKv+/nnnzN9+nS+\n//57YmNjAXjhhRf48MMP2bp1KwEBAebjly5dYsKECaxfv57Y2Fh8fX35/vvvWbt2LWPGjAFg48aN\n/Pjjj3z//fd0794dgKSkJKZOncrOnTtZv3496enpRa7X6XTExsYyfvx4hgwZwpgxY/Dx8WHs2LF0\n6NCBhIQEmjRpUuTrSElJwcXlzx9ZXFxcuHbt2t/+PnTv3p0aNWrQqlUrrK2tzcdv3rzJpEmTGD9+\nPKC9sV+8eDH//ve/efrpp2mev6f/jTfeYMKECYXO+corrzB37lw2bNhAuXLl7nnt2NhYGjRowI8/\n/mhOPNz+XhZ8TSkpKZQvX/4vX+vYsWOpUqUKCQkJDBkyhLFjxzJkyBASEhKoUqUKAIGBgWzZsgV/\nf/97/vkoKdLS4J13wMcHBg8Ge3v45BM4dQpGjYIyZSwdoRBCFL9HIvGglDqhlGqklGoENAFuAV8A\n3wL1lVJPAieBVywYphBCCFHq5CnFt1fTUUqx9tJ14i9cI/LAORYm7ifridqEuJXlwx4dcLO3pVl5\nJxyt7/7RQikFwIkTJ2jRogU6nQ4XFxc8PDxITk4GML9J9/b2JiUlpchYxo8fz7p16+jbty/Lli0D\n4MiRIwwYMACDwcDKlSu5fPkyAFWqVMHb2xvQdkWsWrUKg8FAz549SUtLA+Ctt95i8ODBREZGcuzY\nMfO6SpUqodPp8PLy4vr16/dc7+7uTnh4OAcPHqRHjx4PfE8rVKhAamqq+eu0tDQqVKjwwOtvW7Nm\nDWfPnmXDhg0cPXoU0HZn9OzZk5dffpm6desCoNfrGTRoEKtXr2bUqFEAbNiwAT8/P1xdXQud89df\nf6VZs2YA5gTFqVOnMBgMGAwG826FIUOGcOjQIbp162YuDbEqMIbh9mu612udNGkSBoOBSZMmPdBr\nLRjTiRMn/t6NKiYXL2oNIqtWhZdfhnr1YPNm2L8f+vQBW1tLRyiEEJbzKE61eBr4TSn1O/B7geM/\nAd0sE5IQQghROm1NzuDtU0m8fSrJfOxJZz29/Z7k8u4djH3Cg507d5qTC7eVK1eOy5cv4+7uzoED\nB+jfvz+1atXigw8+QClFWloaSUlJuLm5ARRqNHnnuW5zdXVl/vz5KKWoVasW3bt3p379+qxcuRJP\nT08AcnJyAArtAKhXrx6+vr7mnQ45OTkopQgNDaVTp05s376dyZMns3bt2rsaXiqlilwPcPjwYRIT\nE4mIiGDevHmMGjUKOzs7cnNz//Ke6vV6nJycOHfuHJ6enmzfvv2B+jEUjMloNGJnZ4der8fBwQEH\nBwdMJhP9+vWjS5cudOnSxfz8S5cuERsby3/+8x9effVVYmJiOHDgAAkJCSQmJnLo0CGOHz/OqlWr\n8PX1Ze/evTRv3pw9e/bg6emJr6+vuVQGtPKO2+UjLi4u3Lp1C4CGDRuSmJhIy5Yt2bhxI3PmzKFm\nzZqcOXPG3IDyzJkz+Pr63tU8s2DSoqh7uHfvXp544gn27NlDrVq1HvheFYcjR+Ddd7VdDXl50KOH\nloB46ilLRyaEECXHo5h46AWsLOL4YGBVUQt0Ot1wYDhg/u2HEEIIIe4v1K1soaRDXCNvvBzsyKvd\nh76bNxAUFETTpk2xt7cvtC46OpqwsDDq1auHh4cHAI0bN6Zly5b4+/tjMpmYNWtWoTec9xMTE8Pm\nzZsxmUyEhYXh7OzMggULiIyMxGg0AlqpQFhYWKF1w4YNY+TIkQQHBwPg5+fHjBkzaNeuHaC9kZ48\nefI9r1vU+mnTpjF8+HDi4+Px9vYmPDycwMBAGjRowG+//Ua3bt2YMmUKqampTJ06lT/++IPQ0FCi\noqLo2rUrc+fOpXfv3iiliIqKMpcjDBs2jMTERLKzs9m7dy9ffvnlXfHk5uYSHh4OaEmQHj164OPj\nw2effcaGDRu4cuUK8fHxNGjQgLlz5zJo0CDmzJlDixYt6NWrF19//TUTJ05k4sSJAERGRjJ06FCq\nVavGjBkzGDx4MK6uruak0J1mzpzJd999B2i7Nz788EMA3nzzTYYMGUJOTg7t2rWjTp065uNt2rQx\nf14wKXSbv78/zzzzDD179iQgIID58+dz+PBh5s+fD8DOnTtZsmQJdnZ2rF69+p7fq+KiFPz4I8yc\nCevXg6MjPP88jBmjlVgIIYQoTHev3yqURDqdzg74A6inlLpS4PhEwA/oqu7zgvz8/NTevXsfbqBC\nCCFEKfHt1fRCiYeXfT0Ic3cGtG39tra27NixgzfffJP169dbKkxRihkMBuLj4x+48ebDlJcHX32l\n9XDYtQvc3LS+DVFRcEfVihDCgnQ63T6llJ+l4xB/etR2PLQD9t+RdIgEOgJP3y/pIIQQQoi/J8St\nLJuS0jmfaWRYNVdC3MqaH+vVqxfJyclkZ2fz/vvv/6PXjY6ONk96AG37/ebNm//RazwKtm7dyrRp\n0wodmzx5MiEhIRaK6PGUmQnLl2slFadOQY0asHAhDByo7XYQQgjx1x61HQ+fAt8opZblf90WiAGC\nlFJXH+QcsuNBCCGE+Htm/5bEzus3We0ne8jF4+XaNVi0CObNg6QkaNoUoqPhmWegiIoRIUQJITse\nSp5HZseDTqdzAsKA5wocng/YA9/mN4P6SSn1vAXCE0IIIYQQpcTvv8Ps2bB0Kdy8Ce3baw0jg4Lg\njv6jQgghHsAjk3hQSt0EXO845muhcIQQQgghRClz8KDWMPLTT7UEQ58+MG4cNGhg6ciEEOLR9sgk\nHoQQQgghhPinKQVbt2oNIzdvhjJlYPRoePFFqFrV0tEJIUTpIIkHIYQQQgjx2MnNhc8+0xIOP/8M\nlSrBm29qYzFdXCwdnRBClC6SeBBCCCGEEI+Nmzdh2TKYNQvOnoV//Uvr5dCvH9jbWzo6IYQonSTx\nIIQQQgghSr2rV2H+fO3j2jUICIA5c6BTJ7CysnR0QghRukniQQghhBBClFq//abtbli2DLKyoHNn\nbUJFQIClIxNCiMeHJB6EEEIIIUSps2ePNqFi7VqwsYEBA+Cll6B2bUtHJoQQjx9JPAghhBBCiFJB\nKdi0SUs4fP89lCsH0dEwahR4elo6OiGEeHxJ4kEIIYQQQjzScnLg00/h3Xfh0CHw8tLKK4YNg7Jl\nLR2dEEIISTwIIYQQQohHUkYGfPABzJ4NFy5A/frw0UfQqxfY2Vk6OiGEELdJ4kEIIYQQQjxSLl+G\nefNg4UJISwODAZYsgbZtQaezdHRCCCHuJIkHIYQQQgjxSDhxQiunWL4ccnPh2We1CRVNm1o6MiGE\nEH9FEg9CCCGEEKJES0yEd96BdevA3h6GDIGxY8HX19KRCSGEeBCSeBBCCCGEECWOyQTr12sJhx07\noEIF+M9/4IUXwMPD0tEJIYT4OyTxIIQQQgghSozsbIiP10oqjh+HatW0fg6DB4OTk6WjE0II8b+Q\nxIMQQgghhLC41FR4/32YM0drHtm4MaxcCd26gY38xCqEEI80+WtcCCGEEEJYzIULWrJhyRJtPGZ4\nOHz8MTz9tEyoEEKI0kISD0IIIYQQotgdPgwzZ8KKFaAU9OypTaho1MjSkQkhhPinSeJBCCGEEEIU\nC6Xghx+0hpFffw2OjlqzyNGjoXp1S0cnhBDiYZHEgxBCCCGEeKjy8uCLL7SEw5494O4Or78O//43\nuLpaOjohhBAPm5WlAxBC/P+lpqayfPlyAC5fvoy/vz/BwcHk5OQ88DlGjBhB69atWbduHfHx8TRr\n1oxp06bx1ltvcejQoXuu69u37/8U87x58/6ndQ+y1vcvBrufPHkSW1tbtm/fXuRjLVu2xGAwEBAQ\nwMGDBwE4ffo0rVu3xmAwEBwczIULFwA4e/YsISEhBAQEMGPGjP/59QghRGmVmQmLFsG//gXdu8P1\n67B4Mfz+O0yaJEkHIYR4XOiUUpaOoVj5+fmpvXv3WjoMIf5RZ8+eZejQoWzZsoWVK1dy/Phxpk6d\n+rfOUatWLU6ePAlAmzZtWLx4MT4+Pg8jXEBLDpw6deqhrP2rx/v378+lS5d47bXXaNWqVaHHcnNz\nsba2RqfTsXXrVhYtWsSaNWsYN24cDRo0YODAgcTFxXHs2DHefvttevXqxQsvvEBgYCChoaHMnz+f\n2rVr/0+vSYiSbPZvSey8fpPVfg/v7wRRuqSkwMKF8N57cPUqNGsG0dHQpQtYW1s6OiFEaafT6fYp\npfwsHYf4k5RaCFEKxMTEsG/fPmrWrAlob6AvXrzI0qVL73rutm3bmDx5Mjqdjtq1a7No0SJGjRrF\n+fPnMRgM9O7dm127dtGnTx9eeukl1q9fz9ChQ2nVqhVz585lxYoVODo6EhkZycCBA81v8tPS0hg2\nbBgpKSkopViyZAm+vr4YDAYaNWrE0aNHycvL4+uvv2bBggVcvHgRg8FA//79sba25ssvv8TKyoqT\nJ0+yaNEiAgMDOXToEGPGjMFkMuHm5sZHH33EokWLCq0dMmRIkfdkzJgx7N+/n6pVq7J8+XKsrKzY\ntWsXlSpVwvoeP/XaFJjXlp6ezpNPPglAvXr1SE1NBeD69et4eHgAcODAAQIDAwHo0KED27Ztk8SD\nEOKxdvYszJ4NS5fCrVvQoYOWcAgMlAkVQgjxWFNKPVYfTZo0UUI8in7ful1tHDTG/PH71u3mx86c\nOaOefvpppZRSy5YtU6+//nqR5zCZTKpRo0YqNTVVKaXU6NGj1X//+1+llFJPPPGE+XlBQUHq/Pnz\nSimlBg4cqH788Ud16NAh1bp1a2U0GpVSSuXm5hZa9/LLL6uVK1cqpZQ6cOCAevbZZ83n+uKLL5RS\nSg0bNqzI6y1btkx17txZKaXUjh07zGsDAwPV77//rpRSas6cOeq99967a21RqlWrphITE5VSSg0d\nOtR8/U6dOqnk5GTzayrK3r17VYsWLVTlypXVTz/9pJRS6ty5c6pOnTqqQYMGqlatWub7V7NmTfO6\nDz/8UM2YMeMv4xLiURVz6orqvue0pcMQJdj+/Ur17q2UtbVStrZKRUYqdfiwpaMSQjyugL2qBLz3\nlI8/P2THgxCPiFtXkjny0RowmdBZW1M9POhvnyM5OZmzZ8/SuXNnAG7cuMG//vWvB1p79OhRWrVq\nZd4VcOeugUOHDrFt2zYWL14MFN490KRJEwC8vb1JSUkp8vxFPefIkSMMGDAAgKysLEJDQx8oVp1O\nR7NmzQBo3rw5J06cYMOGDfj5+eF6R0Fxx44duXHjBiNGjKBbt240adKEnTt3snv3bkaMGMHu3bt5\n+eWXmT59Ol27dmXlypW8+uqrLFiwACurP9vkpKWlUaFChQeKTwghSgOlYMsWrWHkli1QtiyMGQMv\nvgheXpaOTgghREkiiQchHhHeYYHaT3mAk6cHtbp3ND9mZ2dHbm7ufc/h5uZGjRo1WL9+PWXKlAHA\naDQ+0PXr1avHokWLyMvLw9raGpPJVOiNd7169fD39+eZZ54BKNTYUldgf63Kfw0F197rOfXr12fl\nypV4enoWOueda++klGLv3r00b96cPXv20LZtWw4cOEBCQgKJiYkcOnSI48ePs2rVKtavX29el5WV\nhV6vB8DFxQVHR0fz+dzc3ADw8PDg2rVrADRs2JDExERatmzJxo0bmTNnzl/fRCGEKAVyc2HNGi3h\ncOAAeHrC22/Dc89BuXKWjk4IIURJJIkHIR4R+2d/YE48tH5nElYFdhxUqlQJBwcHnn32Wdq3b3/P\nc+h0OmJiYoiIiEAphZWVFbNnzzb3Mvgr9erVo3PnzrRs2RInJycGDhzIwIEDzY9PnDiR559/nvfe\new+lFB06dGDcuHH3PN/tJEXPnj3v+ZwFCxYQGRlpTo688sorhIWFFVrbq1evu9bZ2Niwdu1aoqOj\nqVKlChERETzzzDNMnDgRgMjISIYOHUq1atUKrfvuu+94++23zbs5bicSJk2axHPPPYeNjQ1Go5H3\n338fgDfffJMhQ4aQk5NDu3btqFOnzn3voxBCPKpu3oTYWIiJ0aZS1K6tfd23L9jbWzo6IYQQJZlM\ntRDiEXDzylVin2hJjY6hVA5oSqOogYUSD0II8TDJVIvHW1ISzJ8PCxbAtWvQqpXWMLJDB7jPBjQh\nhLAImWpR8siOByEeAbtmvEduVjYBr4+nfM0aD7Tm6NGjREVFFTo2fPhw+vTp8zBCtIitW7cybdq0\nQscmT55MSEiIhSISQojS49QpmDUL4uIgO1sbhTl+PPj7WzoyIYQQjxrJUwtRwqX/foFfFn9M/cG9\nHjjpAFC3bl0SEhIKfZSmpANASEjIXa9Rkg5CWE5cXBzTp0+3dBgWdfbsWdatW2f++osvvqBOnTrm\n/jG37d+/n4CAAFq2bElcXJz5eJs2bXB3d7/vfezfvz8GgwE/Pz9mz54NwM8//0xAQACtW7cmJCSE\n06dP3zfekydPYmtry/bt283Hdu+Gbt2gZs14lix5jf794dgx+PxzLenwzjvv0Lx5cwICAhg5cqS5\nL09ycjI9e/YkJCSE8PBw8/ni4uJo2bIlAQEB7N+//64YUlNTWb58ufnrhIQEfvnll/vGLoQQ4tEh\niQchSrjEqTGg0+E/ebSlQxFCiEdOXl5esV7vzsRD69at+fnnn/G6Y8zDyJEjiY+PJyEhgXnz5nH9\n+nUAYmNjmTlz5n2vExsbS0JCAj/99BMLFy4kIyMDT09PNm3axA8//MC4ceOYMmXKfc/z+uuvExQU\nhFLw9ddgMEDz5vDddxARoU2pWLIECg5AeuaZZ9i1axc7duzgypUrbN26FYDRo0czefJktm7dyubN\nmwG4fv068+bNIyEhgfj4eEaNGnVXDJJ4EEKI0k8SD0KUYCnHfuXoR2to9MJAynpVtnQ4QghhlpeX\nR58+fQgKCmLChAn4+voWerzg10OHDiUhIQGAqVOn4u/vT/PmzdmwYQMAr732Gn379iUiIoJGjRpx\n/PjxIq+ZkJBAs2bNCA4OZtCgQYA2yjc0NJSQkBB69OhBZmYmANWqVSMqKorOnTtjNBoZOnQowcHB\ntGrVit27dwMwbtw4/P39CQ4OZtWqVYA20ve5556jRYsW5ga5Ra1XShEREUFCQgK3bt3C39+fM2fO\nEBMTw4YNGzAYDOzbtw9XV9e7djtkZ2dz8+ZNfHx8sLOzIzAw0BzTnQmKe7GzswO0aTze3t44OjpS\nqVIlypYtC4C9vb15rHFUVBTLly/HZDLRpk0bdu3aBcCuXbtwd69EVpYXAwZoPRtOnDhK1arNaNas\nA/b268gfgFRIzZo1zZ/fvk5eXh6HDx9m1qxZBAUFsXDhQgB2795NYGAgdnZ2+Pj4kJGRQXZ2dqHz\nxcTEsG/fPgwGA5988glxcXG88cYbGAwG8vLy8PX1ZcyYMQQFBdGvXz9MJtMD3SMhhBAlh/R4EKIE\nS5w8ExtHB5q9MtLSoQghRCFfffUVzs7OrFixgh07dvDpp5/ed82BAwf48ccfSUxMJC0tjWbNmtGu\nXTsA3N3d+eSTT1ixYgVLly7l3XffvWv9559/zvTp0wkPDze/+XzhhReIj4/H29ubuXPnEhsby4gR\nI7h06RITJkzA29ubxYsX4+vry9KlS7ly5Qpdu3Zlx44dbNy4kYMHD2JjY2M+X1JSElOnTqVixYrU\nqVOHyZMns2LFiiLXx8bG0r59e/MbYx8fH8aOHUt8fDxLly69531ISUnBxcXF/LWLi4t5TO/f0b17\nd7Zt28a///1v8zQegJs3bzJp0iRiY2MB7Y19SEgIO3bs4Omnn6Z58+akp8PgwW9w7doyLl9+iRo1\n4OOPYdWqV3j11bn4+/szbNiwv7z+tm3buHTpEq1bt+by5cscOnSIjz76iDp16hASEkJwcDApKSmU\nL1/+rtd6e0wywNixYzl69ChbtmwB4Ndff8XX15d+/foBkJubS48ePZg9ezbDhg1j3bp1dOnS5W/f\nLyGEEJYjOx6EKKGu7PuFk59twO+l53B0q2DpcIQQj6k8pbiQlQPAt1fTycuv5//1119p2rQpAM2b\nN0en093zHLd7AJw4cYIWLVqg0+lwcXHBw8OD5ORkAJo0aQJoOw5SUlKKPM/48eNZt24dffv2Zdmy\nZQAcOXKEAQMGYDAYWLlyJZcvXwagSpUqeHt7A9quiFWrVmEwGOjZsydpaWkAvPXWWwwePJjIyEiO\nHTtmXlepUiV0Oh1eXl5cv379nuvd3d0JDw/n4MGD9OjR44HvaYUKFUhNTTV/nZaWRoUKf//v+TVr\n1nD27Fk2bNjA0aNHAW13Rs+ePXn55ZepW7cuAHq9nkGDBrF69Wq6dx/FhAlQqdIGjh71o25dV0JD\ntQaS/frBb7/9SrNmzQDt+wpw6tQpDAYDBoOBU6dOAfDLL78wYcIEPv30U3Q6HeXLl6dy5co0bNgQ\nOzs7DAYDhw4deQJ5rgAAIABJREFUuudrHTp0KAaDgfnz59/3dep0ukIxnThx4m/fKyGEEJYlOx6E\nKKG2T3wbvWt5mowdbulQhBCPiTyluJxl5HyWkQuZRi5k5fBLeibnMo0AvH0qCYAwd2d8fX3ZsmUL\nQ4YMYc+ePdw5nrtcuXJcvnwZd3d3Dhw4QP/+/alVqxYffPABSinS0tJISkrCzc0NoFDi4l6jvl1d\nXZk/fz5KKWrVqkX37t2pX78+K1euNP8GPSdHS5IU3AFQr149866E289RShEaGkqnTp3Yvn07kydP\nZu3atXclUJRSRa4HOHz4MImJiURERDBv3jxGjRqFnZ0dubm5f3mf9Xo9Tk5OnDt3Dk9PT7Zv3/5A\n/RgKxmQ0GrGzs0Ov1+Pg4ICDgwMmk4l+/frRpUuXQjsCLl26xPz5sdSq9R98fV8FYqhb9wB6fQK2\ntokcOnSI8eOPs2rVKnx9fdm7dy/Nmzdnz549eHp64uvray6VAS0RMXjwYNauXWv+/un1emrUqMH5\n8+epWrUq+/bto2vXrvj4+DBp0iSMRiOXLl2iTJky2NvbF9oR8scffxS6Z3feQ6VUoZjatm37wPdK\nCCFEySCJByFKoPPbdnL2mwSCZk3G3rmspcMRQpQiSilSjXn5yYUczmcauZBl5GJWDn9kGckt8J6/\nrLUVXg62hdaHuml/J3Xp0oU1a9YQFBRE06ZNsbe3L/S86OhowsLCqFevHh4eHgA0btyYli1b4u/v\nj8lkYtasWVhZPfjmy5iYGDZv3ozJZCIsLAxnZ2cWLFhAZGQkRqOWHHnllVcICwsrtG7YsGGMHDmS\n4OBgAPz8/JgxY4a5zCMrK4vJkyff87pFrZ82bRrDhw83l3mEh4cTGBhIgwYN+O233+jWrRtTpkwh\nNTWVqVOn8scffxAaGkpUVBRdu3Zl7ty59O7dG6UUUVFR5nKEYcOGkZiYSHZ2Nnv37uXLL7+8K57c\n3Fzz1IicnBx69OiBj48Pn332GRs2bODKlSvEx8fToEEDevSYy7PPDuLq1Tk4OLSgRo1eTJjwNUOG\nTAQmAhAZGcnQoUOpVq0aM2bMYPDgwbi6upqTCncaPXo0qampDBw4ENB2onTo0IG5c+fSr18/jEYj\nISEhPPXUU4DWYyIoKAidTsfcuXPvOl+lSpVwcHDg2WefJSoqirCwMEaPHs369etZvXo1NjY2rF27\nlujoaKpUqUJERMQ9v1dCCCFKJt29fqtQWvn5+am9e/daOgwh7kkpxaetupD++wUG/7odWwcHS4ck\nhHgEZeaZuJCfXLiYZeR8Zg4XMrXdDLfy/mzOZ6uDKno7vBxsqaK3paqDHV75/3W2sWJLcoZ5pwPA\ny74ehLk7A9q2fltbW3bs2MGbb77J+vXri/11iruZTLBuHbzzDuzcCa6uMGIEvPACuLtbOrq/z9fX\n11ziIYQQD0Kn0+1TSvlZOg7xJ9nxIEQJc3rDFv5I3EvYknck6SCE+Et5SnElO1dLKuQnFy5mGjmf\nlUNyTuExku52NlR1sCXUrSxVCyQZPOxtsP6L/gwh+TscQt3KsiU5w/w1QK9evUhOTiY7O5v333//\nH31t0dHR5kkPoG2/vz2i8XGydetWpk2bVujY5MmTCQkJueu5WVkQHw/vvgsnToCPD8yfD4MGgaNj\ncUUshBBC3E12PAhRgiiTiY8bh2O8lUnk0QSsbW3vv0gIUaoppUjNzdN6LuQnFW73X7izNKJMfmmE\nl96Oqg62eDnYUVVvS2W9LXpr6SddWl2/DosXw9y5cOUKPPUUREfDs8+CjfyKSQjxGJIdDyWP/HMk\nRAlyfNU6rv5yjA4rF0rSQYjHTGaeiYv5pREXCjR3PJ9p5OYdpRGV9XZU1dvhX97JnFzwcrCjnI3V\nX06XEKXL+fMwZw4sWQI3bkCbNlrCITgY5I+BEEKIkkQSD0KUEHlGI4n/mYl7w7r8q0cnS4cjhHgI\nbpdGXChQGnE7yXA1p/AkBHc7G7wcbAlxK2Puu+DlYEfF+5RGiNLv0CGYORNWrgSloHdvGDcOGja0\ndGRCCCFE0STxIEQJcWTZKlJ/O8sz6z9C9ze6vAshShalFGm5pgJJhRxzicSlLCPGAqURTtZWVHWw\npaGzQ36JhNZ3obLeFgcpjRAFKAUJCVrDyE2bwMlJaxg5ejRUq2bp6IQQQoi/JokHIUoAY2YmO6fO\npnJLP3zaP23pcIQQDyDrdmlEgYkRF/L7L9woUBpho4PKeq3vgn95J7zyJ0h4OdjiYmMtpRHiL+Xl\nweefawmHvXvBwwPeeAOefx4qVLB0dEIIIcSDkcSDECXAwYUfceOPy3RYuUDehAhRghQsjdBGUv7Z\nd6HI0gi9LcFuZQr1XZDSCPG/uHUL4uJg1iw4fRpq1oT334cBA0Cvt3R0QgghxN8jiQchLCw7PYNd\nb86nettgvFq3sHQ4Qjx2lFKkFyiNOF8gyfBHVk6h0ghHayuq6v8sjbg9krKKlEaIf0hyMixYoI3B\nTE6GFi208ZgREWBtbenohBBCiP+NJB6EsLB9MUvISrlOqzdetnQoQpRq2XeWRtxOMmQaybhHaUTz\n8o7mvgteeltcbKU0QjwcZ85ATAzExkJmJnTqpE2oCAiQCRVCCCEefZJ4EMKCbl1NYe+s96nVvSMV\nn2pg6XCEeOTlKUVSdq6510LBkZRJd5RGuNlZ46W3I8itDF56O6rmN3espLeV0ghRbPbt0yZUrFmj\n7Wjo10+bUFG3rqUjE0IIIf45xZZ40Ol0dkB7IBCoDGQCh4ENSqkTxRWHECXJ7jffI/dWJi2njbd0\nKEI8UtKMeebkQsGRlBezjBjVn7URjtY6vPR2NHDWF2jqqO1ekNIIYSlKwebNWsLhu+/A2VlLNowa\nBVWqWDo6IYQQ4p9XLIkHnU73H6Ar8AOwD/gW0AO1gDk6bd/qOKXU4eKIR4iSIP38RQ4sXE69yB64\n1va1dDhClDgFSyMuFCiNuJBlJCP3z9IIax1UttcSCk1dHLWdC/nJhfJSGiFKEKMRVq/WEg4HD0Ll\nytq0iuHDoVw5S0cnhBBCPDzFtePhF6XU6/d47B2dTucJVC2mWIQoEX6aNgeUwn/KWEuHIoTFmMyl\nEQV3Lmj/TcrOpUBfR1xtranqYEeQaxm89H8mFzylNEKUcDduaL0bYmLg3DmtjGLZMujTB+zsLB2d\nEEII8fAVS+JBKfXVncfySy9slFK3lFKXgEvFEYsQJcG1k79xeNkqGo+IxNlb9tWK0i/NmMfF26UR\n+ckFbWqEkZwiSiPqldXTxv3PvgtVHOxwlNII8Yi5cgXeew8WLoTr16F1a21iRfv2YCV/nIUQQjxG\nLNJcUqfTDQL6ANY6nS5RKTXJEnEIYSmJk9/FRm9P81dHWToUIf4xOab80ohMI+eztGkR5/ObO6bf\nURrhaW9LVQdbmro4an0X8ps7SmmEKA1OnoRZs+CjjyAnB555BsaP10ZjCiGEEI+j4urx0F4p9XWB\nQ22UUmH5jx0EJPEgHhtJBw5zYtU6Wkx6EUcPN0uHI8TfYlKKqzm5hZo6ns/M4WKWkStFlEZ4OdgR\nWKFMob4LlextsbGS5IIofXbt0no2fPGFVkIRGQljx0KtWpaOTAghhLCs4trx0FSn0w0D/pPfQPKI\nTqd7HzABx4spBiFKhO0T30Zf3gW/cc9bOhQh7indmFeo38KFzBzOZxm5mFm4NMLBSkdVBzvqlNET\n7p4/NSK//4KURojHgckEX3+tJRx+/BHKl4dXX4WRI6FiRUtHJ4QQQpQMxdXjYapOp6sMvK7T6YzA\nZKAC4KiU2l8cMQhRElzYvpszX28l8O2J2JdztnQ44jGXYzLxh7k0onCSIa1AaYQVUFlvi5eDLU3K\nOZqTC1Ud7KggpRHiMZWTAytWaBMqjh4Fb2+YMweGDIEyZSwdnRBCCFGyFGePh+tAFFAP+BBIBGYV\n4/WFsCilFNtfeRMnz4o0HjHI0uGIx0TB0ogLt5s75n9eVGlEFQdbAm6XRuT3XZDSCCH+lJYGS5Zo\nSYY//oCGDSE+Hnr0AFtbS0cnhBBClEzF1eNhKtAq/3qfKaU66nS6rsDXOp0uVim1ojjiEMKSzm76\nnovbd/P0whnYOjpYOhxRymTk5pmbOl7I/HP3wsUsI9mmwqURXvmlEWHufyYXqujtcLKR0ggh7uXi\nRZg7F95/H9LT4emntZGYYWEgm36EEEKIv1ZcOx46K6Ua6bT9uPuA95RSn+t0uv8C0tZflHrKZGL7\nxLcpV6MaDYb0tnQ44hGVY1JcyvqzqWPBvgupuXnm51kBnnqtHOKpAqURXg52uEpphBB/y9Gj8O67\n2q6GvDxtZ8P48fDUU5aOTAghhHh0FFfi4ZhOp1sIOALbbx9UShmRcgvxGDj52XqSfj5Mu4/nYW1n\nZ+lwRAlmUorknNz8aRFGLuYnFy5kaqURpgLPrWBrjZfelpYVnPBysKWqXmvuWMneFlspjRDif6YU\nbN+uNYxcvx4cHOC557QJFT4+lo5OCCGEePQUV3PJ3jqdrjFgzJ9qIcRjw5Sby47/zMStfm1q9+5i\n6XBECXHjjtIIbeeCNpYyq0BphD6/NKJ2GT2h+aURXg62VNHbUsbG2oKvQIjSJy8PvvpKaxj500/g\n5gZTp0JUlPa5EEIIIf43xdXjoYVS6qe/eLwM4K2UOloc8QhRnI58tIbrJ0/T+csPsbKWN4olXVxc\nHBcuXGDSpEn/73PdLo34s6ljfolElpFU492lEVX0tjQq52Duu+Clt8PVrvhLI86ePcsvv/xCREQE\nAK+99hqrVq2iYv5swO+++w5ra2v279/PyJEjUUoxfPhwIiMjizxfTEwMX375JXl5eTzxxBPExsaS\nm5tLREQEmZmZ5ObmMmXKFNq1a/eXcRmNRurWrcvAgQPv+v5cuHCBfv36kZCQUOj4yZMniYyMxM7O\nDqPRyMKFC2nYsCFZWVkMGTKEc+fO4e3tTWxsLHq9nrNnzzJ48GCys7Pp0KEDr7766l1xxMXF0bVr\nV5ydnUlNTWXdunUMGDDgAe+uKImysmD5cq2k4tdfoUYNWLgQBg4ER0dLRyeEEEI8+oqr1KKPTqeb\nCWxE6/FwFdADvkBw/n/HFVMsQhSb3Kwsdk6NwbPFUzwREW7pcMRDoJQiOSePC1k55mkRWnNHI5ez\njYVKI8rnl0b4l3cyj6P0crDF8z6lEXl5eVgXY9Lq7NmzrFu3zpx4AJg4cSL9+vUr9LyRI0cSHx9P\nlSpVaNGiBZ07d6Z8+fJ3nW/EiBGMHTsWgAEDBrB582bCw8P54IMPqF69OsnJyQQEBNw38fD+++9T\nu3btv/VaatSowY4dO9DpdGzdupXp06ezZs0a4uLiqF27Np988gnTpk0jLi6O559/ngkTJjB16lQC\nAwMJDQ2la9eud10zLi6O0NBQc+Jh+fLlknh4RF2/DosWwbx5cOUK+PnB6tXQtStInlgIIYT45xRL\nC3Ol1CjgGbSRmv2BmcCrQAPgI6VUoFJqV3HEIkRxOrj4YzLO/0GrGROkoV8JlJeXR58+fQgKCmLC\nhAn4+voWerzg1wMHD2H5xs1suZpBl7EvU7WxHx4NGtNsdiy995/l2bETGD6wP1P792JJlzDKXzlH\nnyrlecW3IvMbePFlUx/W+PnQOfkUq3p3ZHG/rnzw0gi8Hew4fuQwoaGhhISE0KNHDzIzMwGoVq0a\nUVFRdO7cGaPRyNChQwkODqZVq1bs3r0bgHHjxuHv709wcDCrVq0CwNvbm+eee44WLVowbpyW0y1q\nvVKKiIgIEhISuHXrFv7+/pw5c4aYmBg2bNiAwWBg3759ALzzzju0atWKefPmAZCdnc3Nmzfx8fHB\nzs6OwMBAc0x3ssvva6KUwmQy4evri62tLdWrVwfAwcEBKyvtn6PVq1czZMgQAKZMmUJMTAwAN27c\nYOPGjTz77LPm8964cYMOHToQGhrKjBkziry2jY2N+f+99PR0nnzySQC2bdtGx44dAejUqRPbtm0D\n4MCBAwQGBgLQoUMH8/Hbtm7dyoEDB+jevTsjR44kJiaGffv2YTAY2LBhA6+99ho9evSgQ4cONG/e\nnKNHZSNfSXTuHIwZA1WrwsSJWqPI77+H3buhe3dJOgghhBD/tOLa8YBSKhlYlP8hRKmXk3GDXW/M\nwzs0EO/gAEuHI4rw1Vdf4ezszIoVK9ixYweffvopKTm5/H4rh1UXr5NqzGPM4QuczzKyMzmDw6ev\nYpP0Pad+3E77pZ/hnpvJJ7068n6vZ9noXhYcq7N43lxWrlzJ/u++IrJd4F3X/Pzzz5k+fTrh4eGY\nTNp+iBdeeIH4+Hi8vb2ZO3cusbGxjBgxgkuXLjFhwgS8vb1ZvHgxvr6+LF26lCtXrtC1a1d27NjB\nxo0bOXjwIDY2NubzJSUlMXXqVCpWrEidOnWYPHkyK1asKHJ9bGws7du3x9fXlzFjxuDj48PYsWOJ\nj49n6dKlAFSvXp0pU6aQlZVFp06daNy4MU888QQuLi7m1+Xi4sK1a9fuea/feOMN4uLiqFmzJlWr\nVi302JgxY4iOjgagR48efPvtt4wePZrTp0/z1VdfATBz5kxGjx7NxYsXzes++OADWrVqxSuvvMIn\nn3xyzzf5+/btY8SIEZw7d47PP/8cgJSUFPPujIKx376Ht49fvny50LlCQkJo1KgR8fHxeHl5cfbs\nWY4ePcqWLVsA2LNnD+XLl2f16tXs2LGDV199lS+//PKe90UUr4MHtf4Nn36qjcDs3RvGjYP8fJQQ\nQgghHhIZ2i7EQ7JvzgdkJl8jcMYES4cigDyl+PZqOiaTic8vXWdf6k3W7T/EDZ/avHrsDxbbVeJK\nTh6x51LYmpzBB+dSzE0e/cs7UcvRjsiqFeivy2B4uIGPm1QnpnkdalbxpKWNEU+9La2aNUWn0+Ht\n7U1KSkqRcYwfP55169bRt29fli1bBsCRI0cYMGAABoOBlStXmt/sVqlSBW9vbwAOHTrEqlWrMBgM\n9OzZk7S0NADeeustBg8eTGRkJMeOHTOvq1SpEjqdDi8vL65fv37P9e7u7oSHh3Pw4EF69OhRZMyu\nrq7odDocHBzo2rUre/fupUKFCqSmppqfk5aWRoUKFe55/ydOnMjJkyfx8fEhLi7OfPz111/H2dmZ\nQYMGmY9FR0czd+5cJk6ciE6n48qVK/z888+EhYUVOufJkydp1qwZAM2bNzcf79ixIwaDgc8++wyA\nJk2asHPnTr744gtGjhwJUCj+grHf3nlR8Phnn32GwWAw75C4n4IxnTx58oHWiIdHKdi6Fdq2hUaN\ntOaRL74Ip09rfR0k6SCEEEI8fMW240GIx0lmyjX2vvs+vs+0o1LTRpYORwCbktKZffoqb59KMh9L\ncnQlbddO2rbtiv70MZysdXSoWI4cXSZvNPUhyMOVl91scHd3ZcWvx2jg7EC5qnUZv3wZSinS0tJI\nSkrCLb/dfcFyGqXUXTGA9iZ+/vz5KKWoVasW3bt3p379+qxcuRJPT08AcnJyAAr1dahXr555V8Lt\n5yilCA0NpVOnTmzfvp3Jkyezdu3au8p6lFJFrgc4fPgwiYmJREREMG/ePEaNGoWdnR25ubnm9amp\nqbi4uKCUIiEhgcjISPR6PU5OTpw7dw5PT0+2b9/OlClTinzNWVlZ6PV6dDod5cqVwzG/W9/8+fP5\n9ddf+eijj8zPNZlMvPDCCyxbtoyXX36Zb7/9lkOHDnH16lXatm3LxYsXyc7OpmHDhtSsWZO9e/fy\n9NNPs2fPHvM51q9ff9e1QdvBcPvaQUFBfP311zRq1Iivv/6aoKAgABo2bEhiYiItW7Zk48aNzJkz\nhzp16tCtWzfzOQvenzvvFcDevXsZMmQIe/bsoWbNmkXeE/Hw5ebC2rXaSMz9+6FiRZgxA55/Hopo\nRSKEEEKIh0gSD0I8BLvfXkhOxg1aTY+2dCgiX4Oy+kJfv13HkyoNh/Hi4O0cH9Wfpk2b4uroQL2y\nei6k2VDGxpro6GjCwsKoV68eHh4eADRu3JiWLVvi7++PyWRi1qxZhX5Lfj8xMTFs3rwZk8lEWFgY\nzs7OLFiwgMjISIxGIwCvvPLKXb/dHzZsGCNHjiQ4OBgAPz8/ZsyYYW7ImJWVxeTJk+953aLWT5s2\njeHDh5vLPMLDwwkMDKRBgwb89ttvdOvWjSlTpjBr1ixOnDiBUgqDwUD79u0BmDt3Lr1790YpRVRU\nVJGNJQFeeukljhw5Yu7vMHXqVJKSknjxxRfN/SlAm5bxxhtvEB4eTmRkJJmZmUycOJF33nmH0NBQ\n4M+pI506dSIjI8NcmlG/fv0ir/3dd9/x9ttvm5M4c+bMASAyMpLBgwcTGBiIl5eXeffJm2++yZAh\nQ8jJyaFdu/9j797jc67fB46/3rPN5jBzmEhNaiYhytiR3ZvN2UisUjQslUMhllo55ZSy4kcRc4jy\nlXTQJIdsMvPF5CyblETOc9rMTvf798dnu1k21Jf73riej4eH3Z/78/58rnuT3Nd9Xde7HfXr17/m\nml27dqVv3774+fkxZswYnJ2defLJJ+nfvz9gzJ5o164dp0+fLlTdIazj0iWYNw+mTIHffwdPT5g9\nG557DpycbrxeCCGEELeeKu5TudtyM6U2A3OBxVrrC1a78VW8vLx0cnKyLW4t7hIXjx5jrkcAnuEd\nabdgqq3DEfn+czSNOYevzCB43aM6oW4u5OTk4ODgwMaNG5k4cWKhT8uF+KdGjx6Nh4fHNTuAiNvv\n1CmYMQOmT4czZ8DXF6KiICwM/kFuUAghxB1AKbVNa+1l6zjEFdaueHge6A3sUEolAfO01j9aOQYh\nbqvN46ZizsvDb/Rrtg5FXMWncnnmHE7jDY/qmIHgahUBePrppzl9+jRZWVnMmjXrlt4zKiqq0E4P\njo6OrF69+pbeoyRJS0uja9euhY6FhYVZttIU4nY4eBBiYowqh8xMI9EQFQX+MtNXCCGEKDGsWvFg\nualSZYAwYDqQjVEF8X9a63PXXXgLSMWDuJ3OHTzEvIcDefTF52g1fbytwxFX+TMzm947DvNm3Xss\nSQchROmVnGzsUPHll2BvDz17wmuvQRHdMUIIIe4yUvFQ8lh9xoNS6hGMqodOwLfAZ0AAsA543Nrx\nCHErbRz5PnaODvi89aqtQxFCiDuO1rBqlTEwMj4eKlWC4cPhlVfg3nttHZ0QQgghimPVxINSagtw\nCaPCYaTWOjP/qY1KKSmKFKXaqV372L/4G5qPGED5GtVtHY4QQtwxcnLgP/8xKhx274ZateD99+GF\nF8DFxdbRCSGEEOJGrF3x8JzWushNzbXWYVaORYhbKvGtyZSt5EKz4S/bOhQhhLgjXLwIc+bABx/A\nn39CgwawYAE8/TQ4Oto6OiGEEELcLGvPee6plHIteKCUqqyUGmPlGIS45f7alMxv362hWdTLOFV2\nvfECIYQQxTp+HKKjwd0dhg6FBx+EFSuMaodevSTpIIQQQpQ21k48dLx6gKTW+izGrAchSi2tNYlv\nTqLcPW48/kpfW4cjhBClVkoK9OsHtWvDxIkQEgKbN0NCArRvD0rZOkIhhBBC/BvWbrUoo5Ry1Fpn\nAyilnAD53EKUan+s3cCfCZsI/r9xOJQvZ+twhBCi1Nm0yRgY+e23ULYs9Olj7FDh4WHryIQQQghx\nK1g78fAfYI1Sam7+4z4Yu1oIUSoVVDu41L6PRi/0sHU4QghRapjNRvvE5MmQmAiVK8Nbb8HAgVBd\n5vMKIYQQdxSrJh601hOUUruBVvmHJmutV1gzBiFupV+/XsmJ5J20nf8B9mXL2jocIYQo8bKy4LPP\njB0q9u832iqmTjWqHCpUsHV0QgghhLgdrF3xgNb6O+A7a99XiFvNnJdH4luTqVK/LvWfe9LW4Qgh\nRIl2/jzMmgUffgjHjkGTJvD559C9O9hb/V8jQgghhLAmq/6vXinVDPg/oD5QFlBAltZaduEWpc6+\nhV+S9ssBwpbNxq5MGVuHI4QQJdKRI0ZFw6xZxvaYoaHGlpghITIsUgghhLhbWPszho+A5zBmPTQH\nIoDaVo5BiP9ZblYWm0bHcI9XYzyeaGfrcIQQosTZswfef9+oajCbITwchg+Hxx6zdWRCCCGEsDZr\nb6dpp7VOAey11jla69lAByvHIMT/bNcnn3HhjyMETBiBko/shBACAK3hp5+gceP5NGo0jqVL4eWX\n4ddfjQTE3ZR0OHToEMuXL7c8/vrrr6lfvz5OTk6Fzvv555/x9/fHz8+P+fPnW463adMGNzc3xo0b\nd9379OzZE5PJhJeXFx988AEA27dvx9/fn5YtWxIcHMxvv/12w3hTU1NxcHAgMTHxmucWLVrE6NGj\nrzk+efJkvL298ff3Z9CgQWityczMJDQ0lICAAHx8fFi5cmWhNfHx8SilOHLkyA1jEkIIceewdsVD\nhlLKEdiplJoAHAOkRl2UKjkZl9g8bir3B/lRO6SFrcMRQgiby8uDb74xdqjYsgUqVoTgYPjiC6ha\n1dbRGfLy8ihjxba4gsRDWFgYAC1btmT79u00bNiw0HmDBg1i0aJF1KpVCx8fHzp37kzlypWJjY1l\n7dq1N3yDHhsbi6OjI7m5udSvX5/IyEhq1qzJDz/8QMWKFfn+++8ZNWoUCxcuvO513nnnHQIDA//R\na3ziiSeIiooCIDw8nHXr1tGyZUtmz57NAw88wOnTp/H396ddO6MyUGtNTEwMXl5e/+g+QgghSj9r\nVzxE5N9zIJAH1AW6WTkGIf4nP0+dw6WTp6XaQQhx18vMhI8+ysPVtQfdugWSmjoCNzcP3n8fgoKM\npIOHh4fl/MjISBISEgAYM2YMvr6+eHt7s2KFscHV6NGjefbZZwkLC6NJkybs37+/yPsmJCTQvHlz\ngoKC6N27NwC7d+8mJCSE4OBgwsPDyczMBKB27dr079+fzp07k5OTQ2RkJEFBQQQEBLBlyxYAhg0b\nhq+vL0F9N4oqAAAgAElEQVRBQSxZsgQAd3d3XnzxRXx8fBg2bBhAkeu11oSFhZGQkMClS5fw9fXl\n999/JyYmhhUrVmAymdi2bRtVq1a9ptohKyuLjIwM6tSpg6OjIy1atLDEdN99993Uz8DR0RGAy5cv\n4+7uTrly5ahRowYVK1YEoGzZstjnT+/s378/n376KWazmTZt2rB582YANm/eTI0aNQrdc9++fTRv\n3pwOHToUqty4Wt26dS1fF9zHwcGBBx54AABnZ2fs7K78U3Pp0qW0adOG8uXL39RrE0IIceewWuJB\nKVUGGK21vqy1Pqe1fltr/YrWOtVaMQjxv7p89hxbJ3/MQ2Gtudenqa3DEUIIm0hLg3HjjK0wBwz4\nFmdnF778cj3fftuJcuVyyX8vXKwdO3awYcMGkpKSWLVqFUOGDMFsNgPg5ubG8uXLiYqKYs6cOUWu\n/+qrrxg3bhzx8fHExsYCMGDAAObOncu6devw9/e3HD927BgjRowgLi6O2NhYPDw8iI+PZ9myZQwZ\nMgSAlStXsmHDBuLj4+nevTsAJ0+eZMyYMWzatIm4uDguXLhQ5HqlFLGxsQwfPpy+ffsyZMgQ6tSp\nw9ChQ+nQoQMJCQk0bVr0/y/OnDmDq6ur5bGrqytpaWk3/4PI1717dx588EECAgIKVXVkZGTw1ltv\nMXz4cABiYmKYOXMmL7/8Mq1atcLb2xuA8ePHM2LEiELXfOONN5g6dSorVqygUqVK173/+vXrOXbs\nGC1btix0fMiQIZaKiJycHObMmUO/fv3+8esTQghR+lmt1UJrnaeUelAp5aC1zvkna5VS9YAlVx16\nEBgJfJp//AHgEBCutT57ayIW4lpbJ39M1oWL+I+LsnUoQghhNcuXw+rV0LixMTQyNhYyMqB9e7jv\nvgM0a9aMJ5+E3Fzv61aCaa0BSElJwcfHB6UUrq6uVK9endOnTwNY3qS7u7uzZs2aIq8zfPhw3n33\nXRYsWEBwcDB9+/Zl79699OrVCzA+/Q8JCQGgVq1auLu7A0ZVRFJSEj/88AMA58+fB2DSpEn06dMH\nOzs7hg8fToMGDahVqxY1atQAjOqDs2fPFrvezc2N1q1b8/XXX7N48eKb/r5WqVKFc+fOWR6fP3+e\nKlWq3PT6AkuXLuXSpUu0bNmSp556ikceeYScnByeeuopXn/9dR555BEAnJyc6N27N1FRURw7dgyA\nFStW4OXlRdW/9cQcOHCA5s2bA+Dt7c2RI0f49ddfiYyMBGDOnDl4eHiwa9cuRowYwXfffVfoZ//O\nO+/g4uJiqUj55JNPeO655ywVGkIIIe4u1p7xcBDYoJT6FsgoOKi1nna9RfkDKZuApXLiKPA1MAL4\nUWs9SSk1Iv/x67cpdnGXSz92gp+nzqF+jydwa1Tf1uEIIYRVfP01PPMMZGUZj8uUgeeeg2HDoGFD\nWLbMg7Vr1xIZ2ZetW7dakgsFKlWqxPHjx3Fzc2PHjh307NkTT09PZs+ejdaa8+fPc/LkSapVqwZQ\n6M3r369VoGrVqkyfPh2tNZ6ennTv3p2GDRuyePFiatasCUB2dnZ+vFcqABo0aICHh4el0iE7Oxut\nNSEhIXTq1InExERGjhzJsmXLrkmgaK2LXA+wZ88ekpKSCAsLY9q0abzyyiuWuQvX4+TkRPny5Tl8\n+DA1a9YkMTGRUaNGXXfN32PKycnB0dERJycnnJ2dcXZ2xmw289xzz9GlSxe6dOliOf/YsWPExsby\n9ttv8+abbxITE8OOHTtISEggKSmJ3bt3s3//fpYsWYKHhwfJycl4e3uzdetWatasiYeHh6VVBuDX\nX3+lT58+LFu2zPLzA5g+fToHDhxgwYIFlmN79uzh4MGDfP755+zatYuePXuycuXKa9pPhBBC3Jms\nnXg4nP+rXP6vf6MVcFBr/YdSqjNgyj++AEhAEg/iNtk8fhrmnFz8xrxm61CEEOK2OnsWfvgBvvsO\nvvrqStIBjKTDVZsv0KVLF5YuXUpgYCDNmjWjbNmyha4VFRVFaGgoDRo0oHr16gA89thj+Pn54evr\ni9lsZsqUKYVmAdxITEwMq1evxmw2ExoaiouLCzNmzCAiIoKcHKOo8o033iA0NLTQuhdeeIFBgwYR\nFBQEgJeXFxMmTLAMP7x8+TIjR44s9r5FrR87diz9+vVj0aJFuLu707p1a1q0aEGjRo04ePAg3bp1\nY9SoUZw7d44xY8bw119/ERISQv/+/enatStTp07lmWeeQWtN//79qVy5suVeSUlJZGVlkZyczDff\nfHNNPLm5ubRu3RowkiDh4eHUqVOHL7/8khUrVnDixAkWLVpEo0aNmDp1Kr179+bDDz/Ex8eHp59+\nmu+//57o6Giio6MBiIiIIDIyktq1azNhwgT69OlD1apVCyUVrjZ48GDOnTvH888/DxiVKM2aNePV\nV1+1zMwA+PHHH/n4448t60wmEwsXLpSkgxBC3EVUcZ8mlFRKqbnAz1rr6Uqpc1pr1/zjCjhb8Phv\na/oB/QDc3d2b/vHHH1aNWZR+538/zNx6LWnU92lCPp5k63DEv/BnZja9dxzmzbr3EFytoq3DEaLE\nOXDASDR89x1s2GDsVFG9OjRqZDzOzoZy5WDxYsjfqMEiJycHBwcHNm7cyMSJE4mLi7PNixBCCCEA\npdQ2rbVsoVOCWLXiQSm1Brgm06G1bn2T6x2BMOCNIq6hlVJFZlG01p8AnwB4eXmVrkyLKBGSRk/B\nrkwZfN4ebOtQhBDilsjNhaSkK8mGlBTjeKNG8Prr0KkTNG8OdnZXZjy0bn1t0gHg6aef5vTp02Rl\nZTFr1qxbGmdUVJRlpwcwdnFYvXr1Lb1HabBu3TrGjh1b6NjIkSMJDg62UURCCCHEzbN2q8VbV33t\nBDwJZBVzblHaYVQ7nMh/fEIpVVNrfUwpVRM4eYviFMLi9N4U9i1chtewl6hwbw1bhyOEEP/auXNX\nWihWrjRaKhwdwWSCgQOhY0fI3wmxkLCwohMOBZYtW3a7Qmby5Mm37dqlSXBwsCQZhBBClFpWTTxo\nrTf/7dB6pdTfj13PM8DV46KXA88Dk/J///Z/i1CIa218+z0cK1ag+ev9bR2KEEL8Y7/+WriFIjcX\n3Nygc2ejqiE0FCpK95EQQgghbiNrt1q4XPXQDmgKVL7JteWBUODFqw5PAr5QSvUF/gDCb1GoQgBw\nbMt2fv16JX5jh+Fc9Z9vcSaEENZW0EIRF2ckG/bvN443bAjDh19pobhqswchhBBCiNvK2q0WezFm\nPCggF/gdeOFmFmqtM4Cqfzt2BmOXCyFui8Q3J+HsVpWmg2/qj6kQQtjE+fOFWyjS0sDBwWih6N/f\nSDYU1UIhhBBCCGEN1m61uN+a9xPif/HHjxs4/GMipg9G41ixgq3DEUKIQg4evNJC8dNPRqVDtWpG\nkqGghcLF5cbXEUIIIYS43azdavES8B+t9bn8x5WB7vm7TghRYmitSXzzXSrefy+NX+pp63CEEILc\nXNi06UoLxS+/GMcbNIBhw4xkg7e3tFAIIYQQouSxdqvFS1rrmQUPtNZnlVIvk7/VpRAlxcHlqzm+\nZTut57yPvZOTrcMRQtylzp+HVauMRMP3319poQgMhJdeMpINderYOkohhBBCiOuzs/L9Cn0Oo5Sy\nAxysHIMQ12XOyyMx+l0q13uIBs93t3U4wsrmz5/PuHHjbB2GTR06dIjly5dbHo8ePZr69etjMpkw\nmUzk5eUB8PPPP+Pv74+fnx/z588v9noxMTG0bNkSf39/evXqRU5ODpmZmYSGhhIQEICPjw8rV668\nYVw5OTnUrVu3yJ/PkSNHMJlM1xxftWoVPj4+BAYG0r59e86cOQNAXl4ew4YNIyQkBJPJxL59+276\nNU2bNs3y9d+/V7fCb7/B1KkQEmK0Tjz1lDG3oWNHWLoUTp+GNWvglVck6SCEEEKI0sHaiYc1SqnF\nSqlApVQg8Bmw1soxCHFd+xd/w5m9Kfi/Mxw7e2sXBQlxrYI3+tZS1Jvp6OhoEhISSEhIoEx+Lf+g\nQYNYtGgRCQkJTJs2jbNnzxZ5vYEDB/LTTz+xceNGAFavXo29vT2zZ88mMTGRuLg4Bg8efMO4Zs2a\nxcMPP/yPXkv9+vVZv34969evp2PHjnz44YcAfPLJJ3h6erJ27VoSEhJ45JFHbvo13erEQ14eJCbC\n668bbRMPPQSDB8OxY/Daa8ZzJ07AggXQrZvMbRBCCCFE6WPtxMNwYCMwJP9XIjDMyjEIUay87GyS\nRr5P9cca4vlkB1uHI26zvLw8evToQWBgICNGjMDDw6PQ81c/joyMJCEhAYAxY8bg6+uLt7c3K1as\nAIyqgGeffZawsDCaNGnC/oI9DP8mISGB5s2bExQURO/evQHYvXs3ISEhBAcHEx4eTmZmJgC1a9em\nf//+dO7cmZycHCIjIwkKCiIgIIAtW7YAMGzYMHx9fQkKCmLJkiUAuLu78+KLL+Lj48OwYcZfsUWt\n11oTFhZGQkICly5dwtfXl99//52YmBhWrFiByWRi27ZtAEyePJmAgADLm+6srCwyMjKoU6cOjo6O\ntGjRwhLT3zk6OgLG7BSz2YyHhwcODg48kL/NgrOzM3Z2xv+OvvjiC/r27QvAqFGjiImJASA9PZ2V\nK1fy5JNPWq6bnp5Ohw4dCAkJYcKECUXe293dnbJlywJQtmxZ7POTiUuXLuWPP/4gKCiIgQMHkp2d\nfVOv6fPPP+fo0aOYTCbGjx9/zfcqIiKCiIgI2rZtS2BgIMeOHSsyrgsXjOqFXr3gnnugRQv44AO4\n91748ENjcOTevTBpEvj7y9wGIYQQQpRu1k48OAAfaa27aK27AB9j/TkTQhRrd+xizv9+mIAJI1B2\n1v7PQ1jbt99+i4uLC+vXr6dTp07k5ubecM2OHTvYsGEDSUlJrFq1iiFDhmA2mwFwc3Nj+fLlREVF\nMWfOnCLXf/XVV4wbN474+HhiY2MBGDBgAHPnzmXdunX4+/tbjh87dowRI0YQFxdHbGwsHh4exMfH\ns2zZMoYMGQLAypUr2bBhA/Hx8XTvbrQGnTx5kjFjxrBp0ybi4uK4cOFCkeuVUsTGxjJ8+HD69u3L\nkCFDqFOnDkOHDqVDhw4kJCTQtGlTBg0axM6dO1mzZg3Lly9nw4YNnDlzBldXV8vrcnV1JS0trdjv\n2/jx4/H09CQtLY377y+8wdGQIUOIiooCIDw8HDs7OwYPHsz27dstr/O99967pipi9uzZBAQEsHbt\nWvz9/a/7cztx4gTTp0/n5ZdfBuDo0aPUrFmT+Ph4nJycmDt37k29ph49elCrVi0SEhKIjo6+5nsF\nUK9ePX744Qf69evHu+++a1n7++8wbZqx20S1ahAebsxtaN8evvgCTp0yWihefRUefPC6L0cIIYQQ\nolSx9jureKD8VY/LA+usHIMQRcq5lMl/35lKrRbePNDGZOtwxC2WpzVgfOq+5tQF8rTmwIEDNGvW\nDABvb2+UUsWu1/nrU1JS8PHxQSmFq6sr1atX5/Tp0wCWN57u7u6WWQJ/N3z4cJYvX86zzz7LvHnz\nANi7dy+9evXCZDKxePFijh8/DkCtWrVwd3cHjKqIJUuWYDKZeOqppzh//jwAkyZNok+fPkRERPBL\n/jYHtWrVokaNGiiluO+++zh79myx693c3GjdujU7d+4kPDy8yJirVq2KUgpnZ2e6du1KcnIyVapU\n4dy5c5Zzzp8/T5UqVYr9/kVHR5OamkqdOnUKzU545513cHFxsVR/AERFRTF16lSio6NRSnHixAm2\nb99OaGhooWumpqbSvHlzwPj5FejYsSMmk4kvv/wSgAsXLtCtWzdmzpxJ9erVAahSpQpt27YFoG3b\ntuzatavY1zR9+nRMJhORkZHFvr6rFcTk5eXNf/+bwogR0LChkUx49VU4ehSGDIENG4wWik8/he7d\noVKlm7q8EEIIIUSpY+1qA2et9cWCB1rri0qpclaOQYgibf+/uWQcO0HHL2Ze9w2oKJ3+ezYDgIm/\nnrQc8/DwYO3atfTt25etW7dakgsFKlWqxPHjx3Fzc2PHjh307NkTT09PZs+ejdaa8+fPc/LkSapV\nqwZQ6M/N369VoGrVqkyfPh2tNZ6ennTv3p2GDRuyePFiatasCUB2djaAZZYCQIMGDfDw8LBUAGRn\nZ6O1JiQkhE6dOpGYmMjIkSNZtmzZNX9+tdZFrgfYs2cPSUlJhIWFMW3aNF555RUcHR0LVX+cO3cO\nV1dXtNYkJCQQERGBk5MT5cuX5/Dhw9SsWZPExERGjRpV5Gu+fPkyTk5OKKWoVKkS5coZf+1Pnz6d\nAwcOsGDBAsu5ZrOZAQMGMG/ePF5//XXWrFnD7t27OXXqFG3btuXo0aNkZWXRuHFj6tatS3JyMq1a\ntWLr1q2Wa8TFxVm+zszM5IknniA6OrpQcsJkMpGcnIyHh4fl9+JeU5s2bRg4cKBlrb29PWazGTs7\nu2u+Vzk5MG9eMp9+2oqvv97KxYuebNtm7ELxwgvGgMiHHiry2ySEEEIIcceyduLhklKqsdZ6J4BS\nqglw2coxCHGNy+fOs/Xdj6jTPpj7AprbOhxxG9SvUHhb1JBqFTF36cLSpUsJDAykWbNmllkABaKi\noggNDaVBgwaWT8ofe+wx/Pz88PX1xWw2M2XKFMt8gpsRExPD6tWrMZvNhIaG4uLiwowZM4iIiCAn\nJweAN95445pP91944QUGDRpEUFAQAF5eXkyYMIF27doBxpv7kSNHFnvfotaPHTuWfv36sWjRItzd\n3WndujUtWrSgUaNGHDx4kG7dujFq1CimTJlCSkoKWmtMJhPt27cHYOrUqTzzzDNorenfvz+VK1cu\n8t6vvfYae/futcx3GDNmDCdPnuTVV1+1zKcA+PHHHxk/fjytW7cmIiKCzMxMoqOjmTx5MiEhIYCx\n68iRI0fo1KkTFy9eJDw8nDVr1tCwYcMi7z1jxgx27tzJpEmTmDRpEqGhoURHRxMVFUXv3r2ZOXMm\nVapUYeHChTf9mrp160aHDh1o164dvXv3Zu/egzRp0g1n51Fs3gxaH8Tevg1VqmQyefJinnlGqhmE\nEEIIcXdTxX0qd1tuppQ3sBj4A1DA/UAPrfVma8Xg5eWlk5OTrXU7UUokvvUum8dPo+f2VVRvUvQb\nGFG6jU89TvyZdMvj1z2qE+rmQk5ODg4ODmzcuJGJEycW+rRciKLk5cHmzfDdd8avvXuN4/Xrg51d\nBC+/HMmLLwYgm+IIIYQQtqGU2qa19rJ1HOIKq/6zSGu9WSlVH6iff2gfYN194oT4m4wTp/j5wznU\neypMkg53MA1UtLfjK686rD19keBqFQF4+umnOX36NFlZWcyaNeuW3jMqKqrQrgiOjo6sXr36lt6j\nJElLS6Nr166FjoWFhTF06FAbRXTrXLwIq1cbiYYVK+D0abC3h5YtITLSaKHw8ICICGjcGEk6CCGE\nEEJcxaoVD4VurFQg0APorLWuYa37SsWD+Lt1r45kx4z59P4lgcp1ZZT8ner57X9Qp5wjo+vVtHUo\nopQ4dMhINMTFQUICZGdD5crGLhSdOkGbNnDVJhhCCCGEKCGk4qHksepnMkopL4xkw5NANeAVINqa\nMQhxtQt/HGHXzIU07PO0JB3uYBdz8zh6OYc2bhVtHYoowfLyYMuWKy0Ue/YYxx9+GF55xUg2+PlJ\nNYMQQgghxD9llX8+KaXGAk8BxzFmPHgBW7TWsda4vxDFSRoTA0rhO3KwrUMRt1FqehYAnn8bMCnE\nxYuwZs2VFopTp6BMGaOFIibGSDZ4eNg6SiGEEEKI0s1an9sMAPYCHwDfa62zlVK26fEQIt+ZXw6w\nb8FSHh8cScX77rV1OOI2Ss3ITzyUL3uDM8Xd4I8/rlQ1XN1C0a6dkWho21ZaKIQQQgghbiVrJR5q\nAG2AZ4DpSqk1gLNSyk5rbbZSDEIUkjTyPezLOdN8xEBbhyJus9T0y9Qsa4+LQxlbhyJswGwu3EKx\ne7dxvF49o4WiY0fw95cWCiGEEEKI28Uq/8zSWucAcUCcUsoZCAMqA0eVUmu01r2sEYcQBU5s20Xq\nlyvwHTWUcm5VbR2OuM1S0rN4pKK0WdxN0tMLt1CcPGm0ULRoAVOmGJUNdevaOkohhBBCiLuD1T/f\n0VpnAkuAJUopV6DrDZYIccslRr+LU9XKNB3az9ahiNvsbE4uJ7Nz6VJB2izudIcPX6lqiI83Wihc\nXQu3UFSubOsohRBCCCHuPjYtLNVanwPm2jIGcff5c/0mDq1KIPD9tynrIrsc3OksgyVlvsMdx2yG\nrVuvJBt27TKO160LAwcayQZ/f3BwsG2cQgghhBB3O+loFXcVrTWJb06iQq0aNO7/vK3DEVaQmp6F\nAuqWl1aLO0FxLRQBAfD++0aywdPT1lEKIYQQQoirSeJB3FV+W7GWv5KSCZ31Lg7OzrYOR1hBSsZl\n7nd2oLy9na1DEf/Sn38WbqHIyoJKlQq3UFSpYusohRBCCCFEcWyeeFBKBWmt420dh7jzabOZjdHv\n4urxAA16P2XrcIQVaK1JTc/i8UrlbB2K+AfMZkhOvpJs2LnTOF63LgwYYOxCERAgLRRCCCGEEKWF\nzRMPwALA3dZBiDvf/iXLObXrF9p/PoMy8o7lrnAmO4+0nDzqyWDJEi8jo3ALxYkTRguFvz+8955R\n2VCvnq2jFEIIIYQQ/4ZVEg9Kqa+KewqQvQzFbZeXk0PS2+/h9mh9Hn4qzNbhCCtJybgMgKckHkqk\nP/+EuDgj2bBu3ZUWirZtjURDu3bSQiGEEEIIcSewVsVDEPA8kPG34wrws1IM4i62d94Szh08RJfv\n5qPspNf/bpGSnoUd8FA5STyUBGYzbNt2pYVixw7juIcH9O9vJBukhUIIIYQQ4s5jrcTDZuBiUbMc\nlFIHrRSDuEvlZGayacwH3OvnxYMdQmwdjrCilPTLPFDOEacykmyylYwMWLv2SgvF8eNgZ2e0UEye\nfKWFQilbRyqEEEIIIW4XayUe2mmtdVFPaK2l4kHcVjs/WkD6X8dp//l0lLy7uWtorUnNyCKgSgVb\nh3LXOXKkcAvF5cvg4lK4haKqNNkJIYQQQtw1rJJ4KCrpoJRqq7X+wRr3F3evrAsX2TxxOg+0MXF/\noK+twxFWdDwrl4u5ZjzLS5vF7VbQQlGQbNi+3Tj+0EPw0ktGsqFFC2mhEEIIIYS4W9lyV4sJgCQe\nxG21LeYTLp85S8D4120dirCylHQZLHk7XbpUuIXi2DGjhcLPD95910g2PPywtFAIIYQQQgjbJh7k\nn6Pitrp06gzJU2bh2a0D9zR91NbhCCtLzcjCQUEdGSx5yxw9eqWq4ccfC7dQdOwI7dtLC4UQQggh\nhLiWLRMP/W14b3EX2DLx/8i9lInfO1G2DkXYQEp6FnXKlcXRTnKc/5bZDD//fCXZ8PPPxvEHH4QX\nX7zSQuHoaNs4hRBCCCFEyWbVxINSqizwIhAAaKWUF/CJ1jrLmnGIO9+FP4+y46NPeeT57lR92MPW\n4QgrM2vNgYzLtKpW0dahlDqXLhnVDN99ZyQcCloofH1h0iQj2VC/vrRQCCGEEEKIm2ftiocFQBYw\nO/9xj/xjT1s5DnGH++/YD0Fr/EYNtXUowgaOXs7hUp7Gs4KTrUMpFf7660pVw9q1RgtFxYqFWyiq\nVbN1lEIIIYQQorSyduLhUa31I1c9XqOU2mflGMQdLi31IHvmLaHJgAhcat9n63CEDaSkG0VU9WRH\niyJpbbRNFFQ1bNtmHK9TB/r1M6oaWraUFgohhBBCCHFrWDvxsFMp1UxrvRVAKdUU2G7lGMQdLmnk\n+9g7lcX7zUG2DkXYSGr6ZcraKWqXk3fOBTIzC7dQ/PWX0S7h6wsTJxrJhkcekRYKIYQQQghx61k7\n8dAI2KyU+i3/cR3gF6XUdkBrrR+3cjziDnNyxx5SlizHO/oVyt/jZutwhI2kZGThUb4sZe7yd9HH\njhVuocjMNFoo2rQxEg3t2oGb/GcihBBCCCFuM2snHjpb+X7iLpMY/S5OlV3xGvaSrUMRNpKnNb9m\nZNGhuoutQ7E6rWH79itVDcnJxvEHHoDISCPZEBgoLRRCCCGEEMK6rJp40FofVEo1AFrkH9qgtd5r\nzRjEnetI4hZ+/34dLSa9iZNrJVuHI2zkj0vZZJnvnsGSmZmwbt2VZMPRo0a7hI8PTJhgJBsaNJAW\nCiGEEEIIYTvW3k5zINAf+Cb/0BdKqRla64+sGYe482itSXxjIuVr3sNjg/rYOhxhQykZ+YMlK9y5\ngyWPHYMVK4xkw5o1RvKhQoXCLRTVq9s6SiGEEEIIIQzWbrXoBzTXWqcDKKUmAEmAJB7E/+TQD/Ec\nTdxCq48m4FDO2dbhCBtKTb9MuTKKWk4Otg7lltEaduwwEg3ffXelhaJ2bejb90oLRdk7N9cihBBC\nCCFKMWsnHhSQfdXjnPxjQvxr2mwmMfpdKtVxp1HfZ2wdjrCxlPQs6pZ3wq6U9xZcvly4heLIEaNd\nwtsbxo83kg0NG0oLhRBCCCGEKPmsknhQStlrrXOBhRi7WizLf+oJYIE1YhB3rtQv4zi5fQ/tFk6j\njEzNu6tlmzW/Xcqia01XW4fyrxw/biQZ4uKMFopLl6B8eaOF4p13oH17aaEQQgghhBClj7UqHrYA\nj2utJyulEoCA/OMvaa23WikGcQcy5+ay8e33qNqgHg8/08XW4QgbO3Qpi1wNnuVLR8+B1rBz55UW\niq35fxu6u0Pv3kZVg8kkLRRCCCGEEKJ0s1biwVIMrLXegpGIEOJ/tnfBUs6m/kbnb+ZiV6aMrcMR\nNpaSXjBYsuTuaHH5MsTHX2mh+PPPKy0U48YZyYZGjaSFQgghhBBC3DmslXhwU0oNLe5JrXWMleIQ\nd5Dcy5dJGj2Fmt6P8VBYa1uHI0qA1IwsKtrbUaOstcfXXN/x44V3oShooWjdGsaMMVoo7rnH1lEK\nIQyuDyMAACAASURBVIQQQghxe1jrX+dlgArIIElxC+2cuZD0I8dot+BDlHw8LICU9Mt4li9r8z8P\nWsOuXVdaKLbk13jdfz9ERFxpoXAquYUZQgghhBBC3DLWSjwc01qPtdK9xF0g+2I6m8dPw71VAO7B\nATdeIO54l/PMHLqUjU+tyra5/2VISLiSbChooWje3Gih6NgRHn1UWiiEEEIIIcTdx+ozHoS4FbZ9\nOJvM02kETBhh61BECXHwUhZmoF4F601iPHGicAtFRgaUK2e0UIweDR06SAuFEEIIIYQQ1ko8tLLS\nfcRdIPNMGsnvz8LjiXbUbP6YrcMRJURqwWDJ8revf0Fr2L27cAuF1kYLRa9eRgtFUJC0UAghhBBC\nCHE1O2vcRGudZo37iLvDlnc/IvtiOv7vDLd1KKIESUnPoopDGao63prdTebPn8+4cePIyoIffoCB\nA+GBB6BxY3jrLSPhMHYs7NgBf/wBH30E7drdWUmHQ4cOsXz5csvj0aNHU79+fUwmEyaTiby8PAB+\n/vln/P398fPzY/78+cVeLyYmhpYtW+Lv70+vXr3IyckhMzOT0NBQAgIC8PHxYeXKlTeMKycnh7p1\n6zJu3Lhrnjty5Agmk+ma46tWrcLHx4fAwEDat2/PmTNnABg8eDA+Pj74+PgwadKkQmvS0tKoUqUK\nixYtumFMQgghhBCieFZJPAhxq1w8eowd/zePR3o+SbUG9WwdjihBUjMu41nh1gyWPHkSNmyAzz+H\nqlWNhMK8efDYYzBnDhw7Bps3GwmIxo2tN7eh4I2+tfw98QAQHR1NQkICCQkJlMnfwnbQoEEsWrSI\nhIQEpk2bxtmzZ4u83sCBA/npp5/YuHEjAKtXr8be3p7Zs2eTmJhIXFwcgwcPvmFcs2bN4uGHH/5H\nr6V+/fqsX7+e9evX07FjRz788EMABgwYwH//+1+SkpL49ttvOXjwoGXNxIkT8fPz+0f3EUIIIYQQ\n15LEgyhVNo+bijkvD7/Rr9k6FFGCZOSa+TMz51+3WeTm5tG+fQ/q1AmkVq0R3HOPB3PnwtGj0LMn\n1KzpwenT8M03sGlTJPv3JwAwZswYfH198fb2ZsWKFYBRFfDss88SFhZGkyZN2L9/f5H3TEhIoHnz\n5gQFBdG7d28Adu/eTUhICMHBwYSHh5OZmQlA7dq16d+/P507dyYnJ4fIyEiCgoIICAhgS/6WGcOG\nDcPX15egoCCWLFkCgLu7Oy+++CI+Pj4MGzYMoMj1WmvCwsJISEjg0qVL+Pr68vvvvxMTE8OKFSsw\nmUxs27YNgMmTJxMQEMC0adMAyMrKIiMjgzp16uDo6EiLFi0sMf2do6MjAFprzGYzHh4eODg48MAD\nDwDg7OyMnZ3xv6UvvviCvn37AjBq1ChiYoxdl9PT01m5ciVPPvmk5brp6el06NCBkJAQJkyYUOS9\n3d3dKVvWmP9RtmxZ7O2NTsO6desCYGdnh729vSWZcvjwYY4dO4aXl1eR1xNCCCGEEDdPEg+i1Dh3\n8BC75yzm0X7PUqmOu63DESXIgYzLaMDzHwyWzMqCVauMFop77/2WlStdOHRoPS4unXB1zWXMGBg2\nDD7+2BgY6exceP2OHTvYsGEDSUlJrFq1iiFDhmA2mwFwc3Nj+fLlREVFMWfOnCLv/9VXXzFu3Dji\n4+OJjY0FjE/f586dy7p16/D397ccP3bsGCNGjCAuLo7Y2Fg8PDyIj49n2bJlDBkyBICVK1eyYcMG\n4uPj6d69OwAnT55kzJgxbNq0ibi4OC5cuFDkeqUUsbGxDB8+nL59+zJkyBDq1KnD0KFD6dChAwkJ\nCTRt2pRBgwaxc+dO1qxZw/Lly9mwYQNnzpzB1dXV8rpcXV1JSyu+u278+PF4enqSlpbG/fffX+i5\nIUOGEBUVBUB4eDh2dnYMHjyY7du3W17ne++9d01VxOzZswkICGDt2rX4+/sX/0MHTpw4wfTp03n5\n5ZcLHf/ss8948MEHLUmQMWPGEB0dfd1rCSGEEEKImyOJB1FqbBz5PnYO9nhHv2LrUEQJk5phDJa8\nUeJh4UJo1Qr8/KBaNWjbFubOhWrVDtCrVzP++gt27/bG1VXh7l50C4XWGoCUlBR8fHxQSuHq6kr1\n6tU5ffo0AE2bNgWMT9kLZgn83fDhw1m+fDnPPvss8+bNA2Dv3r306tULk8nE4sWLOX78OAC1atXC\n3d1Itu3evZslS5ZgMpl46qmnOH/+PACTJk2iT58+RERE8Msvv1jW1ahRA6UU9913H2fPni12vZub\nG61bt2bnzp2Eh4cXGXPVqlVRSuHs7EzXrl1JTk6mSpUqnDt3znLO+fPnqVKlSrE/g+joaFJTU6lT\np06heRDvvPMOLi4uluoPgKioKKZOnUp0dDRKKU6cOMH27dsJDQ0tdM3U1FSaN28OgLe3t+V4x44d\nMZlMfPnllwBcuHCBbt26MXPmTKpXr245b+3atcybN4+ZM2davsdKKerXr1/s6xBCCCGEEDfPWrta\nCPE/ObVrH/sXf0Pz1wdQoabsTygKS03PorqjPZUdiv8rbd486NPH+FopY8vLQYMgOBi+/96DtWvX\nUrNmXzZt2mpJLhSoVKkSx48fx83NjR07dtCzZ088PT2ZPXs2WmvOnz/PyZMnqVatWv71r2Qs/n6t\nAlWrVmX69OlorfH09KR79+40bNiQxYsXU7NmTQCys7MBLOX/AA0aNMDDw8NSAZCdnY3WmpCQEDp1\n6kRiYiIjR45k2bJl18y70FoXuR5gz549JCUlERYWxrRp03jllVdwdHQkNzfXsv7cuXO4urqitSYh\nIYGIiAicnJwoX748hw8fpmbNmiQmJjJq1KgiX/Ply5dxcnJCKUWlSpUoV64cANOnT+fAgQMsWLDA\ncq7ZbGbAgAHMmzeP119/nTVr1rB7925OnTpF27ZtOXr0KFlZWTRu3Ji6deuSnJxMq1at2Lp1q+Ua\ncXFxlq8zMzN54okniI6OLpSc2Lx5M2+//TYrV67EOb+sZdu2baSkpNC2bVt+/fVXypcvj6enpyW5\nIYQQQggh/hlJPIhSIfGtyZR1qUizqJdvfLK466SkX6beDaodPv/8ytdag4cHdOhgPO7SpQtLly4l\nMDCQZs2aWWYBFIiKiiI0NJQGDRpYPil/7LHH8PPzw9fXF7PZzJQpUyzzCW5GTEwMq1evxmw2Exoa\niouLCzNmzCAiIoKcnBwA3njjjWs+3X/hhRcYNGgQQUFBAHh5eTFhwgTatWsHGG/uR44cWex9i1o/\nduxY+vXrx6JFi3B3d6d169a0aNGCRo0acfDgQbp168aoUaOYMmUKKSkpaK0xmUy0b98egKlTp/LM\nM8+gtaZ///5Urly5yHu/9tpr7N271zLfYcyYMZw8eZJXX33VMp8C4Mcff2T8+PG0bt2aiIgIMjMz\niY6OZvLkyYSEhADGriNHjhyhU6dOXLx4kfDwcNasWUPDhg2LvPeMGTPYuXMnkyZNYtKkSYSGhhId\nHW2ZI9GlSxcApkyZQkREBBEREYAxs8PDw0OSDkIIIYQQ/wNV3KdxdyovLy+dnJxs6zDEP/DXpmQW\n+3UmYPzreL8pbRaisAs5eXRN/p2+7lV5plbRb3gB2reHgp0ay5WDxYshLOzK8zk5OTg4OLBx40Ym\nTpxY6NNyIYQQQghReiiltmmtZUJ0CSIzHkSJprUm8c1JlKtejcdfjQSMcu9PP/0UgOPHj1s+KS0o\nGb8ZAwcOpGXLlixfvpxFixbRvHlzxo4dy6RJk9i9e3ex65599tl/9ToKdgC4HWs9PDyuOXbhwgX8\n/PwwmUw0b96cH3/88abP0VozaNAgWrRoQceOHS2DAtPS0ujYsSMtWrRg0KBBxbYQWJtlvkP561c8\nHD8Ojz4KAwZcm3QAePrppwkMDOS1115j/PjxtzTGqKgoTCaT5Vfr1q1v6fVLmrS0tEKv12QyWXal\nEEIIIYQQdx+peBAl2qE1P7Gs9TMETXuHxwcZDfqHDh0iMjKStWvXsnjxYvbv38+YMWP+0XU9PT1J\nTU0FoE2bNsycOZM6derc8vgLeHh48Ouvv96WtUU9bzabMZvN2Nvb89tvv/HUU08V6n2/3jk//PAD\nS5cuJTY2lk8//ZR9+/YxadIkRowYQYMGDejZsyd9+vQhPDyctm3b/qvXdCt9fiSNuX+m8XWzOlS0\nL1PkOenpUKkSREfD2LFWDlAIIYQQQliVVDyUPFLxIEqsgmoHl9r38Wi/K5UGMTExbNu2jbp16zJy\n5Eg+/fRTIiMji7zG+vXrCQwMxGQy8dJLL1k+zf/zzz8xmUzMmjWLzZs306NHD7788ksiIiJITEwE\njL51b29vgoKCLEPvCqoLzp8/T3h4OK1atSI4ONjyxt9kMjF48GBat25Nq1atyMrKIiYmhqNHj2Iy\nmYiNjWX+/Pl06dKFrl270rBhQzZs2AAYk/RDQkIIDg4mPDyczMzMa9YWZ8iQIQQGBvLcc89hNpux\ns7PD3t4Y4XLhwgUeffTRa9YUd8769evp2LEjAJ06dWL9+vXXPW5rqRlZ1HJyKDbpALB1K5jN4Otr\nxcCEEEIIIYQQgCQeRAl1OH4jS1uFcyJ5JxXuv5e/kq5UqQwdOpSmTZty4MABy3C4OXPmXHMNrTWD\nBw9m+fLlJCQk4OzszIoVK/i///s/atWqRUJCAi+++CJNmjRh6dKldOvWzbJ2z549fPXVV2zcuJH4\n+Hiee+65QteeOHEiXbt25ccff+SDDz5gxIgRludMJhOrV6/moYceYs2aNQwdOtRyv4JBdgBfffUV\nn3zyCVOnTgVgwIABzJ07l3Xr1uHv709sbGyxa6+Wm5tLeHg469evx9nZmeXLlwNw9OhRAgICaN26\nNU888USRa4s658yZM5bhgK6urpw9exYwyuddXV0txwtaMGwtJT3rhm0WSUnG7z4+VghICCGEEEII\nUYjsaiFKpPSjJ/gz3ni3+FdSMpdOnP7H1zh9+jSHDh2ic+fOxjXT06lXr95Nrd23bx8BAQGWioCr\ntzMEozph/fr1zJw5E8ByHkDTpk0BcHd358yZM0Vev6hz9u7dS69evQBjZ4KC6f03opSyTNz39vYm\nJSUFgFq1apGYmMihQ4cwmUx07NiRyMhIfv31V7p168bAgQOLPKdKlSqcO3cOMCo7CpIQlStX5vz5\n87i6unL+/HmqVKlyU/HdTmnZuZzKzr3hjhabNkH9+lDMZgtCCCGEEEKI20gSD6JEunTilOXrCvfe\ng2f3jpbHjo6O5Obm3vAa1apV48EHHyQuLo4KFSoAWLYpvJEGDRrw8ccfk5eXR5kyZSztC1c/7+vr\na6kSuHqwpVLK8nXBDJW/b7NY1DkNGzZk8eLF1KxZs9A1b7RFo9aa5ORkvL292bp1K23btiUrK8uy\nJaSLiwsVK1YEKFQZUtw5gYGBfP3113Tp0oXvv/+ewMBAy/Hvv/+eHj168P3339O1a9frxmUNlsGS\nFZyKPUdrI/FQTNGHEEIIIYQQ4jaTVgtR4lw6eZpNYz/ArfEjALSc/BZ2V1Uc1KhRA2dnZ5588kny\n8vKKvY5SipiYGMLCwggKCqJVq1b88ssvNxVDgwYN6Ny5M35+fgQHB7Nw4cJCz0dHR/PFF18QHBxM\nUFDQDXeeKEhS/Oc//yn2nBkzZhAREUFwcDDBwcGWGQo3Wmtvb8+yZcsIDAzk4sWLhIWFsWfPHlq2\nbElQUBCdO3fmww8/vGZdcee0adMGBwcHWrRowWeffcbw4cMBY2eGzz77jBYtWuDg4FAidmZISb+M\nAupep9UiNRXS0mS+gxBCCCGEELYiu1qIEmf1C8PZO/8Leu5YzeF1G2nS//lCiQchCkT/8hfHs3KJ\nbeJe7Dnz5kGfPrBvn9FuIYQQQggh7myyq0XJI60WokQ5sW0Xu2MX03TIC1RrUI9qDW5+JkP//v0L\nHevXrx89evS4HWHaxLp16xj7t70gR44cSXBwsI0isi2tNSkZWTR3LXfd8zZtAldXuMnxHkIIIYQQ\nQohbTBIPosTQWrPu1ZE4V6uCz9uD/9HaRx55hISEhNsTWAlR0IIhDKeyczmXk3dTO1r4+sINRmUI\nIYQQQgghbhP5p7goMfb/51v+2riVgAkjcHKtZOtwRAmXkn7jwZLnzhktFjLfQQghhBBCCNuRxIMo\nEXIyLvHT8Heo/ngjGvZ+ytbhiFIgNSOLMgoeKudY7DmbNxu7WkjiQQghhBBCCNuRVgtRImyZNJ30\no8fpuGSmDJIUNyUl/TIPODtStkzx+dNNm4wWi+bNrRiYEEIIIYQQohCpeBA2d/73w2x9byYP93iC\nWv7NbB2OKAW01qSmZ1HvOm0WYMx3aNgQXFysFJgQQgghhBDiGpJ4EDa3ftg7qDJ2tHz3TVuHIkqJ\nY1m5pOeZ8axQ/GDJvDyj1cLPz4qBCSGEEEIIIa4hiQdhU4fXJXLgq+/xfmMgFe+719bhiFIiJf0y\nAPWus6PFvn1w4YLMdxBCCCGEEMLWJPEgbMacm0v8q6NweeB+mr72oq3DEaVIanoWDkrxQLniEw+b\nNhm/S8WDEEIIIYQQtiXDJYXN7Jy1iNN79hO2bDYOzs62DkeUIinpl3movCMOdqrYc5KSoFo1eOgh\nKwYmhBBCCCGEuIZUPAibyDyTRtLb7+Ee7I/HE+1sHY4oRfK05kBGFvXKX3+w5KZNRrWDKj43IYQQ\nQgghhLACSTwIm9g48n2yLlwkaOpYlLwzFP/AkcwcMs36uoMlT5+G1FSZ7yCEEEIIIURJIIkHYXWn\ndu1j18yFNH65F9UaPmzrcEQpk5KRP1jyOomH//7X+F3mOwghhBBCCGF7kngQVqW1Jv7VkZR1dcFv\nzGu2DkeUQqnpWTjZKe53diz2nE2bwN4evLysGJgQQgghhBCiSDJcUljVga++58+ETbSaMR7nKpVt\nHY4ohVLSL+NRvixlrtOik5QETZpAuXJWDEwIIYQQQghRJKl4EFaTk5nJ+tfGUq1RfR7t95ytwxGl\nUK5ZczAj+7ptFrm5sGWLzHcQQgghhBCipJCKB2E1ye/P5MIfRwiPX4qdvfzRE//cH5nZZGuN53V2\ntNi1Cy5dkvkOQgghhBBClBRS8SCs4sKfR9kycTqe3Tpwv0neEYp/JyX9xoMlN20yfpeKByGEEEII\nIUqGUpN4UEq5KqW+VErtV0r9opTyVUo1UUr9Vym1QymVrJRqbus4RdF+ihoPGgLfH2nrUEQplpqR\nRfkydtzr5FDsOUlJcO+94O5uxcCEEEIIIYQQxSo1iQdgKvCD1vphoDHwCzAZGKO1bgKMzH8sSpgj\nGzaT8p9v8Yp6GZfa99k6HFGKpaRn4Vm+LHbXGSy5aZNR7XCdU4QQQgghhBBWVCoSD0qpSkBLIBZA\na52ttT4HaMAl/7RKwF+2iVAUx5yXx7pX3qbCfTVpHtXf1uGIUizbbOb3S1l4XqfN4vhx+P13me8g\nhBBCCCFESVJaJvzVAU4B85RSjYFtwKvAYGCVUup9jCRKkW83lFL9gH4A7lJ/bVV75v6HUzv20mHx\nRziUl70Nxb/326VscjXUq1D8YEmZ7yCEEEIIIUTJUyoqHjASJI8DH2utHwMygBHAy8AQrfX9wBDy\nKyL+Tmv9idbaS2vt5ebmZq2Y73qXz50n8c1J1GrhTb2nwmwdjijlUtKzgOsPlkxKAkdHePxxa0Ul\nhBBCCCGEuJHSkng4AhzRWm/Of/wlRiLieeCr/GNLARkuWYJsGhND5pmzBE0di5KGe/E/Sk2/TCV7\nO6o7Fl+otWkTNG0KZYvPTQghhBBCCCGsrFQkHrTWx4E/lVL18g+1AvZhzHQIzD8WDBywQXiiCGd+\nOcCO6fN59IUe3PNYQ1uHI+4AKelZeFZwKjaJlZ0Nycky30EIIYQQQoiSplQkHvINAj5TSu0CmgAT\ngBeAKUqpncB7gINSKl4pVVYptUwplaCUaq6U+qy4i/7www8sXLjwHwezY8cOfvrpp3/1Qm60NiEh\ngcjIyGuOT548GW9vb/z9/Rk0aBBa62vOGTx48P+zd+dxVZZ5H8c/FyCLColSmpqZS66VJbmCHGnR\nxi00t1wf0WwsFW1ynLExnSwbn8m9XMYmc9Cc1HpGzdwFNRlLSzlGmpik4JKSgriy3M8fB06iqGjh\n4eD3/Xr1Opz73Nd1/85xeXW+XvfvolmzZjRr1oy3337befyHH36gQ4cOhIeH07dvX+fxt956i5Yt\nWxIeHk5SUtItvZ8rWZbFpqjXKVWmNC0n/PE3mVPubOezczh0/hJ1ylx7KcM338DFi+rvICIiIiJS\n3LhLc0ksy9oFBF9xeCvQGMAY0xOoa1nW68aYe4Egy7LyVkP0uta8bdu2vaV6du3aRXJyMq1atbpt\nYyMiIhg1ahQA3bp1Y+PGjTzxxBP5znnppZeYOnUqOTk5tGzZkq5du1KzZk1efvll3n//fe69917n\nuXv37mXjxo188cUXbN68mdGjR7N48eKbfj9XOrBiHT+ujaX11PGUvrvCr55PJPHsRXKAB6/TWHLb\nNsejggcRERERkeLFnVY85GOMmWiMiTXGxBlj+gGvA32NMfOAucDDuSseyhpjEnPHBB44cICwsDBa\nt27NsWPHmD9/PhMmTAAgNjaWsLAwbDYbL774IpZlkZSUROPGjenduzePPfYYU6dOBWDy5Mm8//77\n2Gw2UlJSsNlsREVF8fTTT/PEE09w8aKjEd6MGTMIDQ2lefPmzJs3r8CxBTlw4AARERE0atSIJUuW\nAFC7dm3n6z4+Pnh5XZ0b5Z3j4eGBl5cXnp6e/Pjjj5w7d47hw4cTFhbGsmXLnO+3Xbt2ALRq1Yrd\nu3f/il8Rh6yLF4kdOZ7y9WrzyJB+v3o+EYDvzzr+PF1vK824OLj/fqhc+XZVJSIiIiIiheE2Kx4u\nZ4xpCwRalhVmjCkNxAF/A6pYljXBGFMdmGdZ1pO55+cN/VNAQACxsbEA5OTkOOe0LIuoqChiYmK4\n6667GDFiBJ999hkNGzbk6NGjbNmyBQ8PD+rVq0dUVBQjR44kOTmZ1157zTmHzWZj6tSpvPDCC6xb\nt46aNWuyevVqNm/eTE5ODqGhoURERBQ49konTpxg3bp1nDt3juDgYLp06YKHhyMnio2N5ejRo9dd\nMbFw4UJq1KhB9erViYuL45tvviEhIQF/f39atGhBeHg4qampVL7sW1p2dvbVEwUEwJkzvzz394f0\n9Gte9+up8zh9IIkuaxbhWarUNc8TuRnfZ1ygQilPgq7TWHLbNggNvY1FiYiIiIhIobhl8AA8BIQZ\nY2Jyn/sAhVnT39Df39/5JO+LPMDJkydJSkqiU6dOAGRkZFCnTh0aNmxIvXr1KF26NACenp7XnLxx\n48YAVKtWjdTUVM6fP09CQgKtW7cGID09ncOHDxfqDT766KN4eXkREBDAPffcw4kTJ6hYsSLx8fGM\nHj2aFStWYIxh69atzgBj5cqVlC1blvXr1/PBBx+wYsUKAMqXL89DDz1ElSpVAGjUqBH79++nfPny\nnD592nnNAt/b5aFDQc8vk3H0OP+dMI2aHZ+m+tNh1zxP5GblNZa8lsOHISVFjSVFRERERIojdw0e\nvgXWWpY1HMAY4w08D1S9wbg9Z86ceSbvyeUrHoKCgqhRo4bzyztAZmYmKSkpBXbR9/b2JisrK9+x\ny8+zLIt69erx6KOPsmzZMowxZGZmUqpUKRISEq4ae6Vdu3aRlZXF+fPnOX78OHfffTeJiYkMGDCA\nZcuWERQUBEBISAgxMTHOcdu3b+cvf/kLn3/+OX5+fgDUqlWLc+fOcebMGfz8/EhISOD+++/H39+f\nqKgooqKi2LZtG4888kj+Ik6cuG6NV9oy+i1yLmVim/z6TY0TuZ6MrGySL2Ty5N3+1zxH/R1ERERE\nRIovtwweLMtaZYxpkbviwQKSgQ2FGDoxPT391bCwMDw9PVm0aJHzBWMMkydPpmPHjliWhYeHB1Om\nTCEgIKDAiVq2bMnMmTPZs2cPM2fOLPCchg0b8uSTT5J3PT8/P5YvX37V2EqVKl01tnLlynTt2pWD\nBw8yYcIEPDw8iIqK4vTp0/Tr5+id8Oqrrzp7NOSJjIwE4NlnnwXgnXfeoXHjxkyaNIlnnnmGzMxM\nBg0aRMWKFalYsSIhISG0bNkSb29v3n///V8mSkqCNm0K8ZE6HN3+NQkLltJk9MuUq1m90ONEbmR/\nXn+H6+xoERcHfn5wZXYmIiIiIiKuZwrakrEkCw4Otnbs2OHqMoo3ux3atoVz5+DSJcdjngJ6PFg5\nOSxq1oEzyUcZsG8z3v5lb3PBUpL9O+UU/ziUyrLgB7irVMG3OjVp4ggectu3/Grz58+nc+fOzuDR\nz8+Ppk2bAtCnTx8iIyOxLIthw4axa9cu7rrrLhYsWED58uXzzbNgwQJmzpyJr68vlStX5sMPP8TH\nx4dz584xbNgwDh48SHZ2Np9++imBgYGsXr2a8ePHAzBu3DjaFBD+TZ8+nWHDhgGOlVHp6em3tLuO\niIiISElljNlpWdaVOyKKC7ntrhYlxahRo7DZbM7/nn76adcWtHUr5H2J2bIFzp4Fy/rlvwIaS367\nYAnHvtpF6Nt/Uuggv7l9Zy9SycerwNAhOzub8+fhm29+2/4O8+fPJ/2y3+tVqlQhJiaGmJgY56qi\nNWvWcO7cObZs2UK3bt2YNGnSVfOEhIQQFxfH5s2bqVatGtHR0QCMHz+ebt26sWHDBmJiYggMDCQ7\nO5tRo0bx+eef8/nnnzNq1KgCG75Onz7d+fOuXbvYvHnzb/fGRURERESKgFvealGSFPRlxWVWrIBu\n3Rx7Eq5Z43i8gYvpZ9gyeiL3Nn2U+r273IYi5U7zfcaFfLdZJCUl0bVrV+rWrYuXlxc//niWrKxU\nPvvMIjJyLjVr1qRXr14cPnwYLy8vxo8fT7Vq1ejSpQv16tUjISGBvn37EhUVRVpaGoMGDSI1YgCB\nQAAAIABJREFUNRXLspg7dy6HDh1i165ddO3aleDgYGbMmMGxY8cICwujQoUKTJ48merVqxMbG0v7\n9u0B6NChA7Nmzbqq9ho1ajh/vnwL3PXr15OZmclbb71FWFgY48ePJzExkQceeIBy5coBUL16dRIT\nE6lTp45zjsmTJzu37+3Tpw/Tpk3jzJkzrF+/noULF9KrVy/q16/P999/j6+vL4sXL3b2rBERERER\ncRWteBCHDz6AiAh46CHHqodChA4A29+czrnjJ2g9/Q2Mh347yW8rLTObYxezqHPFjhZJSUm8++67\nVKxYkUqVOgMbmDZtCqNHj+bnn3/mxx9/ZPPmzWzatImQkBAAjh49yty5c9m2bRvTpk0DYOLEiXTu\n3JkNGzYwZYpjfHh4OI0aNWLJkiXMmDHDeb3Y2FgGDx7sXPGQmppKYGAgAOXKlePUqVPXfB979+5l\n9erVdO/eHYA9e/YQHh7Opk2bSEhIYPXq1fnmy5vz559/zjfPyJEjnasvIiMjGTlyJJGRkcTExDh3\nrQkNDWX9+vU0b96cefPm3epHLyIiIiLym9E3xTudZcHf/gYDBsATT8DGjZC7Y8aNnNr/Azun/IMG\n/btxb5NHi7hQudNkWxaLUxxfvM9kZZN9WT+ahg0bEhAQgN1uZ/Xqafj62hg/fjinT5+mQoUKDBo0\niD59+vDCCy9w5MgRAOe2uL6+vs6tY+12O9OmTcNmszF8+PB828teLm8XmTZt2vDjjz8C5NuONi0t\njcDAQDIyMpy3TW3duhWA5ORk+vXrx+LFi/H19XWObdu2LcYY2rRpQ3x8/FXb26alpVG+fHlee+01\nbDabc9vcG2nSpAkATZs2Zd++fYUaIyIiIiJSlIpl8GCMqWSMiTPGbDLG+BhjlhljYowxTYwxC68z\nrq0xps/NXu/X3Cd9o7ExMTEMHDjwquPbtm3joYcewtfXl+Tk5ALHTpo0iaZNm9KyZUuGDh3K5Y1A\nMzMzqV27NhMmTHAee+utt2jZsiXh4eEkJSXduPicHHjlFRg9Gnr2dNxqcRPLsmNGjsfL14fQiX8q\n9BiRwtp48gxLjqYBsPjIaTaePON8LS84qF+/ATk5o+jWzdF/YdWqVWRmZtK7d2+io6Np1aoVU6ZM\nAShwW9wGDRowatQoZ/+GVatWAfm3y83IyHD2WoiPj3eGEGFhYc7zV61aRVhYGGXLlnXOFRISwsmT\nJ+nSpQuzZ8+mZs2azuvabDbymtzu2LGDWrVqUbt2bQ4ePEh6ejrp6ekcPHiQWrVqMWHCBGJiYpx/\n1j0uW1lU0La+efN+9dVXPPjggzf/wYuIiIiI/MaKZfAAtAbWWpbVGigPBFmWZbMs60vLsnpda5Bl\nWasty/rXzV6sKIOHa2nQoAFxcXE0a9bsmudERESwfft2vvjiC44fP87GjRudr82ZM4e6des6n+/d\nu5eNGzfyxRdfMG7cOEaPHn39AjIzoV8/mDIFhg2D6Gjw9i50/QdXb+KHletp9pcoylS6p9DjRArr\nySD/6z4H6NVrDGlpH/Pf/4bTunVrpk+fzk8//UTr1q2x2Wy899579OzZ85rXGDNmDB9//DHh4b+M\nB+jcuTORkZH85S9/ISEhgeDgYFq1asXQoUOZM2cO4Fj9UKpUKUJDQ1m4cCGvvvrqVfOPGzeOlJQU\nRowYgc1mc25Z+/bbbzN27FhatWpFZmYmzz77LJ6enkycOJE2bdrQpk0bJk6c6AxYLte8eXMiIiJY\nvHgxLVu2ZO3atTz33HMcO3YMgLi4OJ544gk2b95cYOgpIiIiInK7FYvtNI0xE4EWgDcwG/gT4ANs\nACoCIcBuoD2wy7KsWsaYQGAeEATkAD2BtkBVy7ImGGPCgL8CFrAX+D1wf+nSpQ9GRETkazD38MMP\nc+bMGe6//35ng7ZGjRqRkJBAdnY2q1atwsfHhxkzZvDxxx+TlZVFZGQkAwcOvGps3n3WeWJiYhg/\nfjzlypXj4MGDjBkzhq5duzpft9lsREdHU7Vq1et+Rn369GHgwIGEhYWRkZFB9+7d6dq1K8nJybz2\n2mvMmTOHc+fOMWLECMCxrPy7774reLKzZ+G552D1anjzTfjTn6CAfw2+luxLl/jw4SchJ4d+ezbi\neROBhUhhrTuRzt8Sf3I+/2Ote3jq7oB850RHQ58+sHs3PPzw7a6w+Cns3yciIiIiJZm20yx+XL6r\nhTGmLRBoWVaYMaY0EAf8DaiSGyBUB+ZZlvVk7vl5Q/+EY1XEnNzjHpfNaYCpgM2yrDRjzBSgHbAn\nMzOTuXPn4uHhQb169YiKimLkyJHOL/B5bDYbU6dO5YUXXmDdunXUrFmT1atXs3nzZnJycggNDSUi\nIqLAsVc6ceIE69at49y5cwQHB9OlS5d8y6VvJDY2lqNHj9Iqd5vL//3f/yUqKoqUlBTnOampqVSu\nXNn5vKBt+AgIgDO/LFfHxwf+/OdC15Fn17vzObXvAM+umK/QQYpMeJA/606cIfHsRX5fPYjwAlY8\nbNsG/v7QoIELChQRERERkUIpDrdaPASEGWNigFU4VjpUKMS4hoDz3gPLsnIuey0IqA78J3feUKAq\ngK+v71UN5grSuHFjAKpVq0Zqaip79uwhISGB1q1b88QTT5Cens7hw4cL9QYfffRRvLy8CAgI4J57\n7uHEiRMFnpeYmOhsTJeYmAg47ikfPXo0ixcvxhjD8ePH+eabb3jqqafyjb2yMV2B7+3y0AHg4sVC\n1X+5cz+dZNu4yVRv25oa7Z686fEiheVpDPf5OoKtp+4OwLOAVTlxcdC0KVznj/IdJSYmRqsdRERE\nRKTYcfmKB+BbHCsXhgMYY7yB58kNCq5jD2AD9ueOuzxEOQn8ALS3LCsj9/VSQBUKUFCDtssb0VmW\nRb169Xj00UdZtmwZxhgyMzMpVaoUCQkJV4290q5du8jKyuL8+fMcP36cu+++u8DzatWqRUxMjPN5\nYmIiAwYMYNmyZc6Gdna7nRMnTtC2bVtSUlK4ePEijzzyCGFhYURFRREVFcW2bdt45JFHrlvTrdo6\n5m9knTuPbcq4Apv1idwuGRkQHw9jxri6EhERERERuR6XBw+WZa0yxrTIXZlgAck4ejvcyETgn8aY\n3kA2jrAib07LGDMSWJ5720UOMAJIL2iili1bMnPmTPbs2cPMmTMLvFjDhg158sknCQsLw9PTEz8/\nP5YvX37V2EqVKl01tnLlynTt2pWDBw8yYcIEPDw8+P777xkyZAi7d++mZ8+ePP/88/z+97/PNy4q\nKorTp0/Tr18/AF599VXatWvHk086VhrMnz+f5ORkOnToAEBISAgtW7bE29vb2cTut3R8Zzz29z+i\n8YhBVKhb6zefX+RmfPmlY2OWFi1cXYmIiIiIiFxPsWgueTsFBwdbedvN3XF8ffPfXuHvD+kFZjFX\nsSyLxaERnN5/kAHfb8HnrgBOnz7N8uXL6du3L8eOHSMiIgJfX1/WrFmDdyF7P7z88svEx8fzhz/8\ngfT0dKZPn0779u3x9vamXbt2PPTQQwWO69WrFwsXXnNn1WuaPn06w4YNu+lxhRlbq1Yt5y0yeb75\n5htefvllPD098fLyYt68edSoUSPfOWvWrOH111/Hx8eHMmXK8K9//YsKFSqQnZ3NH//4R+eKmffe\ne4/69evz9ddfO7dXfeGFF+jfv/8tvR93MOOHE2xKPcMnj9e46rU334TXXoOff4bAQBcUJyIiIiLF\nkppLFj8KHn5Do0aN4ssvv3Q+9/b2Zu3atUVyrVsSFQX/+Iej18NNNLcE+G7Rp6zq9TJPz/s7D0U6\ntidMSkpi4MCBrF+/no8++oi9e/cyfvz4m5r3wQcf5Pvvvwcc2xPOnj2bBx544KbmuBkFhQO/1diC\nXj927BhlypTB39+fVatW8dFHH/Gvf+Xf8fXQoUNUrFgRHx8f3nvvPY4ePcobb7zBrFmz8PT05IUX\nXsh3fsuWLYmOjqZKlSo0a9aMDRs2EFhCv3lfL3ho1w6SkuDbb29/XSIiIiJSfCl4KH5cfqtFSTJp\n0iRXl3B9druj/f9Nhg6ZZ8+xedQEKjZ+mIb/0915fPLkyezcuZPatWsDkJWVRUpKCvPmzbtqjtjY\nWMaOHYsxhrp16zJr1iyGDRvG4cOHsdls9OzZk+3bt/P888/zyiuvsHLlSgYOHEhISAjTpk1j0aJF\nlC5dmv79+9OvXz/nl/y0tDQGDRpEamoqlmUxd+5catWqhc1mu2pL1HfffZeUlBRsNht9+vTB09OT\n//u//3Pe+jJr1ixCQ0Ox2+2MGDGCnJwcgoKC+PDDD5k1a1a+sZGRkQV+ViNGjODrr7/mvvvuY8GC\nBfluvfHx8cHL6+o/ctWqVSvwnCVLltC8eXNat25NgwYNmDx5MpZlcfbsWWc4ExoaypdffkmbNm1u\n6tfU3eXkwH//CxERrq5ERERERERupDjsaiG3i90O17h14VoObfqCRS07kZFyjNIVgzgcG+d8beTI\nkTRu3Jj9+/czZswYIiMjCwwdLMsiKiqK5cuXExMTg5+fH5999hkzZsygSpUqxMTEMHjwYBo1asSS\nJUt47rnnnGP37NnDJ598whdffMGmTZvo3bt3vrknTpxI586d2bBhA1OmTGH06NHO12w2G2vXrqVm\nzZqsW7eOkSNHOq93eXDwySefMHfuXKZNmwbASy+9xD//+U82btxIy5Ytef/996859nJZWVl069aN\n2NhYZw+QPGfPnuW1117j1VdfveZnffz4cWbOnOns9ZGSksK9997Lpk2b8PX15Z///CepqamUK1fO\nOaZcuXL8/PPP15yzpPr+e8ctFurvICIiIiJS/GnFw53i+HE4ceKmg4eT9r2c3J0AQNKaWOr3ee4G\nIwqY4+RJkpKS6NSpEwAZGRnUqVOnUGMTEhIICQlxrgK4cptQu91ObGwss2fPBsi3ouDKLVELUtA5\n3377LX379gXgwoULzmaeN2KMoUmTJgA0bdqUffv2AZCZmUn37t354x//SP369QFo3749GRkZvPzy\nyzz33HOkp6fz3HPPMXv2bO655x7AsUVq27ZtAWjbti2ffPIJ/fv3z7dtalpaGuXLly9UfSVJXG7+\n1by5a+sQEREREZEbU/Bwp7DbHY83GTzU7RXB1jF/IzPjLGXuvYcHu7Z3vlbQNqQFCQoKokaNGqxc\nuZKyZcsCji/jhdGgQQNmzZpFdnY2np6e5OTk4HHZrSINGjSgefPmROSuub906ZLztSu3RAXyjb3W\nOQ0bNuSjjz7i3nvvzTfnlWOvZFkWO3bsoGnTpnz11Ve0bduWnJwcevfuzbPPPsuzzz7rPHflypXO\nn8+fP09ERARjxoyhadOmzuM2m40dO3ZQq1Yt56Ovry9lypTh0KFD3HvvvWzdupXXX3/9unWVRNu2\nORpKFjK/EhERERERF9KtFneKWwweSlcoz1NzHb0rWk16DY/LVhxUqlQJPz8/unTpQnZ29jXnMMYw\nefJkOnbsSOvWrXniiSf47rvvCnX9Bg0a0KlTJ1q0aEF4ePhVjRnHjBnDxx9/THh4OK1bt2b69OnX\nnS8vpFi8ePE1z3n33Xfp378/4eHhhIeHExsbW6ixXl5eLFu2jLCwMM6cOUPHjh355JNP+Oyzz4iO\njsZmszF06NACr7d7927efvttbDYbb775JuBoVrp48WJsNhtffvklgwcPBmDatGn07NmTsLAwhgwZ\nUmIbS15PXBw0a3bT7UpERERERMQFtKvFnWLAAPjsM8ctFzcpJzubXe99SKMh/fIFDyJFraBdLU6f\ndqx2+Otf4S9/cWFxIiIiIlIsaVeL4ke3WtwpbqGxZB4PT08eGzqgUOcmJCQwZMiQfMdeeOEFnn/+\n+Vu6dnG0ceNG/vrXv+Y7NnbsWMLDw11U0Z1l+3bHoxpLioiIiIi4BwUPd4LsbPj2W8hdql+U6tev\nT0xMTJFfx5XybsEQ19i2zXGLRW4fTxERERERKeZ0h/Sd4Icf4Pz5W17xIFKcxMU5fiv7+7u6EhER\nERERKQwFD3eCW2wsKVLcZGfDf/+rbTRFRERERNyJgoc7gd0OxkCDBq6uRORXSUiAM2fU30FERERE\nxJ0oeLgT2O1QsyaULu3qSkR+lW3bHI9a8SAiIiIi4j4UPNwJfsWOFiLFSVwc3H23I0cTERERERH3\noOChpDt3DhIT4eGHXV2JyK+2bZtjtYMxrq5EREREREQKS8FDSZeQADk5WvEgbu/kSdi///b0d5g/\nfz7p6enO535+fthsNmw2G++//z4AlmUxdOhQQkNDad++PT///PNV8yxYsIAmTZrQqlUrevTowcWL\nF/O9brPZGDhwYL7rtmjRgpYtW/L1119fNd/p06dZsGCB83lMTAzx8fG/+v2KiIiIiBQlBQ8lnXa0\nkBIiLs7xeHl/h+zs7CK51pXBQ5UqVYiJiSEmJobIyEgA1qxZw7lz59iyZQvdunVj0qRJV80TEhJC\nXFwcmzdvplq1akRHRztfW7lyJf6X7Ql66tQppk+fTkxMDNHR0QwbNuyq+RQ8iIiIiIg78nJ1AVLE\n7Hbw89NN8eL24uLAywvuvjuJxx/vSt26dfHy8uLs2bOkpqZiWRZz586lZs2a9OrVi8OHD+Pl5cX4\n8eOpVq0aXbp0oV69eiQkJNC3b1+ioqJIS0tj0KBB+cYfOnSIXbt20bVrV4KDg5kxYwbHjh0jLCyM\nChUqMHnyZKpXr05sbCzt27cHoEOHDsyaNeuqmmvUqOH82cfHBy8vx1+5OTk5vPvuuwwfPpylS5cC\n8OWXXxIaGoq3tzcPPPAAZ86c4eLFi/j4+DjnmDx5Mjt37sRmszFo0CDmz5+Pn58f8+bNY8OGDdSp\nU4cOHTrw9ddfc99997FgwQI8PJQvi4iIiIhrKXgo6ex2qF8fPD1dXYnIr7JtGzRq5MjRkpKS2LBh\nA2+99RaNGjWiR48e7N69m9GjRzNnzhx+/PFHtm7dijGGnJwcDh06xNGjR9myZQseHh7Uq1ePqKgo\nJk6cSOfOnfONX7p0KY0aNSI6OpqqVasCjusFBQWxZs0aIiMj2bBhA6mpqQQGBgJQrlw5Tp06dc3a\n9+7dy+rVq9myZQsAH374IZ07d8bX19d5zuXz5c35888/c++99zqPjRw5koSEBNavXw/A/v37qVWr\nFr179wYgKyuLbt26MWXKFAYNGsTy5ct59tlnf6NfARERERGRW6PgoaSz2+GZZ1xdhchNy7YsDl+4\nBMDnR9L56it/Bg50dJVs2LAhAQEB2O12YmNjmT17NgBeXl5UqFCBQYMG0adPH0qXLs3YsWMBqFev\nHqVzt5T1zA3iChpfkKCgIADatGnDSy+9BED58uU5ffo0AGlpaQQGBpKRkeFcBTFhwgRCQkJITk6m\nX79+LF68GF9fXy5cuMDChQtZvXo1W7dudV7j8vny5ixfvjwDBw4kMTGR5557zjn3tRhjaNKkCQBN\nmzZl3759N/6gRURERESKmIKHkuzECTh+XP0dxC1tPHmGr9POA/D6mjTOnQtw9nfICw4aNGhA8+bN\niYiIAODSpUtkZmbSu3dv+vfvT3R0NFOmTGHo0KGYArbCKGg8gLe3N1lZWQBkZGTg5+eHp6cn8fHx\nzhAiLCyMTz/9lGeffZZVq1YRFhZG2bJliYmJcc5/8uRJunTpwuzZs6mZe7vTwYMHOX36tLMh5dGj\nR5k3bx5dunThtddeIzMzk6NHj1K2bFl8fHyYN2+ec74jR44467qyTnA0vNyxYwdNmzblq6++om3b\ntrf+CyAiIiIi8htR8FCSqbGkuLEng/z5W+JPAKTtcdyS0KKFY5OWPGPGjOHFF19kxowZWJZFu3bt\n6NmzJz169MDT05NLly4xffr0a16joPF/+MMf6Ny5M5GRkbRo0YIOHTowePBg/P39McYwZ84cwLH6\nYeXKlYSGhhIQEJCv6WOecePGkZKSwogRIwDo06cPkZGR7NixA8DZSDJvZ4shQ4YQFhaGMYZp06Zd\nNV+lSpXw8/OjS5cuDBkyhKeeeoqoqChWrlzJxx9/jJeXF8uWLWPUqFFUqVKFjh073sInLyIiIiLy\n2zKWZbm6htsqODjYyvuf/hJv2jSIioKjR6FSJVdXI3JT1p1IdwYPe16vSJa9DCeOeFDAwgXJVatW\nLRITE11dhoiIiIhLGWN2WpYV7Oo65Bda8VCS2e0QFAQVK7q6EpGbFh7k2GryySB/7t1nYQsxCh1E\nRERERNyQ9lkryex2x20W+rYmbsjTGJ66O4BjxwzHD3nQorl+H9+IVjuIiIiISHGk4KGkysmBb79V\nfwdxe3FxjscWLW7/tefPn096errzuZ+fHzabDZvNxvvvvw84GjoOHTqU0NBQZ8PIKy1YsIAmTZrQ\nqlUrevTowcWLFwHo2rUrLVq0oGnTpsyfPz/fmO+//55SpUrl2/lCRERERMQdKXgoqQ4ehLNnFTyI\n24uLA29vePTRa5+TnZ1dJNe+MnioUqUKMTExxMTEEBkZCcCaNWs4d+4cW7ZsoVu3bkyaNOmqeUJC\nQoiLi2Pz5s1Uq1aN6OhoAN566y22bdtGbGwsEyZM4MKFC84xb7zxBmFhYUXyvkREREREbif1eCip\ntKOFlBDbtkFwMPj45D+elJRE165dqVu3Ll5eXpw9e5bU1FQsy2Lu3LnUrFmTXr16cfjwYby8vBg/\nfjzVqlWjS5cu1KtXj4SEBPr27UtUVBRpaWkMGjQo3/hDhw6xa9cuunbtSnBwMDNmzODYsWOEhYVR\noUIFJk+eTPXq1YmNjaV9+/YAdOjQgVmzZl31HmrUqOH82cfHBy8vx1+9tWvXBhzbYnp6ejq3/Ny+\nfTuVKlVybhsqIiIiIuLOFDyUVHnBQ4MGrq1D5Fe4eBF27oSXXy749aSkJDZs2MBbb71Fo0aN6NGj\nB7t372b06NHMmTOHH3/8ka1bt2KMIScnh0OHDnH06FG2bNmCh4cH9erVIyoqiokTJ9K5c+d845cu\nXUqjRo2Ijo6matWqzusFBQWxZs0aIiMj2bBhA6mpqQQGBgJQrlw5Tp06dc33s3fvXlavXs2WLVvy\nHZ84cSI9evTAJzddefPNN/nggw945ZVXfoNPUURERETEtRQ8lFR2O9SoAWXLuroSkVs2fbojfPC6\nxt9UDRs2JCAgALvdTmxsLLNnzwbAy8uLChUqMGjQIPr06UPp0qUZO3YsAPXq1aN06dIAzhUFBY0v\nSFBQEABt2rThpZdeAqB8+fKcPn0agLS0NAIDA8nIyHCugpgwYQIhISEkJyfTr18/Fi9ejK+vr3PO\nBQsWEB8fz0cffQTAZ599RnBwMBUqVLi1D01EREREpJhR8FBS5e1oIeKmPv0U/vxnx8/TpjmaS3bs\nmP+cvOCgQYMGNG/enIiICAAuXbpEZmYmvXv3pn///kRHRzNlyhSGDh3qvJ3hcgWNB8ctEFlZWQBk\nZGTg5+eHp6cn8fHxzhAiLCyMTz/9lGeffZZVq1YRFhZG2bJliYmJcc5/8uRJunTpwuzZs6lZs6bz\n+H/+8x8WLVrE8uXL8fBwtNzZtWsXMTExbNu2Dbvdzt69e/n3v//N/fff/ys/URERERER11DwUBJd\nuAD798Nzz7m6EpGbdvYszJ8PY8ZA7nd+LlyAtWuvDh7yjBkzhhdffJEZM2ZgWRbt2rWjZ8+e9OjR\nA09PTy5dusT06dOvec2Cxv/hD3+gc+fOREZG0qJFCzp06MDgwYPx9/fHGMOcOXMAx+qHlStXEhoa\nSkBAAAsWLLhq/nHjxpGSksKIESMA6NOnD5GRkfTq1Yu6devy9NNPA7Bw4ULGjBnDmDFjAOjfvz8D\nBw5U6CAiIiIibs1YluXqGm6r4OBga8eOHa4uo2h98w089hj8+9/QrZurqxEplCNHYOZMmD0bTp2C\nBx+EpCS4dAlKl4aPPrp28CAiIiIikscYs9OyrGBX1yG/0IqHkkg7WogbiY+HyZNh0SLHCoeICHjl\nFcetFcuXO1Y6PP20QgcREREREXel4KEkstsdew/mbtUnUtxYFqxZA++8A+vXQ5ky8OKLMHw4XNYC\ngY4dFTiIiIiIiLg7BQ8lkd0O9epdeysAERe5cAEWLnSscEhIgMqVYeJEGDwYcnekFBERERGREkbf\nTEsiux2eeMLVVYg4nTwJs2Y5ejj89BM8/DB8+CH06AHe3q6uTkREREREipKCh5Lm558dXfrU30GK\ngX37YMoUR8hw4QI884yjf0N4OBSwq6WIiIiIiJRACh5KGjWWFBezLNi82dG/YcUKR7uRPn1gxAio\nX9/V1YmIiIiIyO2m4KGkUfAgLpKZCUuWOPo37NwJQUEwdiwMGQIVK7q6OhERERERcRUFDyWN3e7o\n0le5sqsrkTtEWhr84x8wfTocPgx16sDs2dC3L/j5ubo6ERERERFxNQUPJY3d7ljtoBvopYglJcG0\naTBvHmRkQOvW8N578LvfgYeHq6sTEREREZHiQsFDSWJZsGeP45+aRYrI9u2O2ymWLnUEDN27w8iR\n8Nhjrq5MRERERESKIwUPJcmPP8KZM+rvIL+57GxYvtzRMPKLL+Cuuxy7UwwbBlWruro6EREREREp\nzhQ8lCRqLCm/sbNn4YMPYOpUOHAAqld3/DxgAPj7u7o6ERERERFxBwoeSpK84KFhQ9fWIW7vyBGY\nOdPRJPLUKWjaFCZOhIgI8NLfGiIiIiIichP0FaIksdsd/yQdEODqSsRNxcc7+jcsWgRZWY6g4ZVX\noEULV1cmIiIiIiLuSsFDSZK3o4XITbAsWLPG0b9h/XooUwZefBGGD4eaNV1dnYiIiIiIuDsFDyXF\nxYuwdy906uTqSsRNXLgACxc6VjgkJEDlyo7bKQYPhsBAV1cnIiIiIiIlhYKHkmLvXsfWA1rxIDdw\n8iTMmuXo4fDTT/Dww/Dhh9CjB3h7u7o6EREREREpaRQ8lBTa0UJuYN8+mDLFETJcuAAyqvS1AAAc\nRklEQVTPPOPo3xAeDsa4ujoRERERESmpFDyUFHY7lCoFDz7o6kqkGLEs2LzZ0b9hxQrw8YE+fWDE\nCKhf39XViYiIiIjInUDBQ0lht0O9eo7wQe54mZmwZIkjcPj6awgKgrFjYcgQqFjR1dWJiIiIiMid\nRMFDSWG3Q1iYq6sQF0tLg7lzYfp0SE6GOnVg9mzo2xf8/FxdnYiIiIiI3IkUPJQEp045vmWqv8Md\nKykJpk2DefMgIwNat3Y0kPzd78DDw9XViYiIiIjInUzBQ0mwZ4/jUcHDHWf7dsd2mEuXOgKG7t1h\n5Eh47DFXVyYiIiIiIuKg4KEk0I4Wd5TsbFi+3NG/4Ysv4K67HLtTDBsGVau6ujoREREREZH8FDyU\nBHa749unvnWWaGfPwgcfwNSpcOAAVK/u+HnAAPD3d3V1IiIiIiIiBVPwUBLY7Y7VDsa4uhIpAkeO\nwMyZjiaRp05B06YwcSJERICX/gSLiIiIiEgxp68t7s6yHD0enn/e1ZXIbyw+3tG/YdEiyMpyBA2v\nvAItWri6MhERERERkcJT8ODuDh927KGo/g4lgmXBmjWO/g3r10OZMvDiizB8ONSs6erqRERERERE\nbp6CB3enxpIlwoULsHChY4VDQgJUruy4nWLwYAgMdHV1IiIiIiIit07Bg7vLCx4aNnRtHXJLTp6E\n996Dd9+Fn36Chx+GDz+EHj3A29vV1YmIiIiIiPx6Ch7cnd0O990H5cq5uhK5Cfv2wZQpjpDhwgV4\n5hlH/4bwcPUIFRERERGRkkXBg7vL29FCij3LgthYx+0UK1aAjw/06QMjRkD9+q6uTkREREREpGgo\neHBnmZmwdy/87neurkSuIzMTlixxNIz8+msICoKxY2HIEKhY0dXViYiIiIiIFC0PVxdQWMaYcsaY\npcaYvcaY74wxzXOPD8099q0xZpKr67yt9u1zfKvViodiKS0N/vd/oUYN6NULzp6FOXPg0CEYP16h\ng4iIiIiI3BncacXDNGC1ZVnPGWO8gdLGmNZAJ+ARy7IuGmPucW2Jt5l2tCiWkpJg2jSYNw8yMqB1\na5g1y7EwxcNtoj4REREREZHfhlsED8aYu4BWQH8Ay7IuAZeMMb8H3rYs62Lu8Z9cVqQr2O3g5QV1\n67q6EgG2b3f0b1i61BEwdO8OI0fCY4+5ujIRERERERHXcZd/f30AOAF8YIz5xhgzzxhTBngQCDXG\nbDfGxBpjHi9osDHmBWPMDmPMjhMnTtzOuouW3Q516mjfRRfKzoZPP4WQEGjWDNascexOcfAgREcr\ndBAREREREXGX4MELeAyYZVnWo8BZYHTu8fJAM+BV4GNjrt6M0LKsuZZlBVuWFXz33XffxrKLmHa0\ncJmzZ2HmTEfu07kzpKTA1Klw+DBMmgRVq7q6QhERERERkeLBXYKHZCDZsqztuc+X4ggikoFPLIcv\ngRwgyEU13l7p6fDjjwoebrMjR+DPf4b77oOhQx07VHz8MezfD8OHg7+/qysUEREREREpXtyix4Nl\nWceMMYeNMXUsy9oHPAEkAAeA1sAmY8yDgDdw0oWl3j579jgeFTzcFrt3O/o3fPQRZGVBRITjlooW\nLVxdmYiIiIiISPHmFsFDrqHAwtwdLX4A/gfHLRf/NMbsAS4B/SzLslxY4+2jHS2KnGXB6tWOwGH9\neihTBl580bGyoWZNV1cnIiIiIiLiHtwmeLAsaxcQXMBLvW93LcWC3e5Y13///a6upMS5cAEWLnQE\nDgkJULkyTJwIgwdDYKCrqxMREREREXEvbhM8yBXsdmjYEK7upSm36ORJeO89ePdd+OknePhh+PBD\n6NFDG4eIiIiIiIjcKgUP7siyHMFDt26urqRE2LcPpkxxhAwXLsAzzzj6N4SHK9cRERERERH5tRQ8\nuKMjR+DUKfV3+BUsC2JjHbdTrFgBPj7Qpw+MGAH167u6OhERERERkZJDwYM7UmPJW5aZCUuWwDvv\nwNdfO7bDHDsWhgyBihVdXZ2IiIiIiEjJo+DBHSl4uGlpaTB3LkyfDsnJUKcOzJnjWOXg5+fq6kRE\nREREREouBQ/uyG6HKlW0xUIhJCXBtGkwbx5kZEDr1jBrFvzud+Dh4erqRERERERESj4FD+4oPl6r\nHW5g+3bH7RTLljkChu7dYeRIeOwxV1cmIiIiIiJyZ9G/+bqbzEz47jsFDwXIzoZPPgE/v1o0awZr\n1zp2pzh4EKKjrx86nDx5ku7duxMeHs7TTz8NgGVZvPzyyzRv3pzHH3+cjz76CID58+czYcKE69ay\nYMECmjRpQqtWrejRowcXL14EoGvXrrRo0YKmTZsyf/78fGO+//57SpUqxdatW2/9QxARERERESlm\ntOLB3ezfD5cuKXi4zNmz8MEHMHUqHDgAXl6OnwcMAH//ws0RFRXF2LFjadCggfPYt99+y7fffktc\nXBxnzpyhUaNG9OzZs1DzhYSE0KtXLzw9PRk1ahTR0dFERkby1ltvUbt2bS5cuEDDhg3p0aMHvr6+\nALzxxhuEhYXd9PsXEREREREpzhQ8uBs1lnQ6cgRmzoRZs3I4fbov/v6H+d3vHmPfPrjrrvk8//wy\nAJKTk5k+fTqhoaH079+fUqVKceTIEVJTU1m+fDkVKlRgz549vPPOOxw4cIDu3bszZMgQKleujLe3\nN5mZmZw5c4by5cs7r719+3Y6dOiQb+7L1ahRw/mzj48PXl6OP2q1a9cGwNvbG09PT4wxzvkqVaqE\np6dnkX5mIiIiIiIit5tutXA3djt4ekK9eq6uxGV274Z+/aB6dXj7bahT5z906lSG9PRY/vzn58jK\nygIgMzOTFStW8OmnnzJixAjn+AYNGvDZZ5/RsWNHPv74Y3766SfsdjvDhw9n3bp1LFq0iO+++47A\nwEBq167Ngw8+SKNGjXjttdecc1xr7ivt3buX1atX071793zHJ06cSI8ePfDx8QHgzTffZPTo0b/h\npyQiIiIiIlI8aMWDu7Hb4cEHIfcL653CsmDcOPjnPx3bYZYpAy++CMOHw9Kl3xMU1ASApk2bOlcR\nPP744wBUr16dtLQ051yNGzcGoFq1ahw4cIDAwEAqV67MI488AoDNZsNut3P48GFSUlJITEwkLS2N\n0NBQ2rZtW+DcGRkZtG/fHoAJEyYQEhJCcnIy/fr1Y/Hixc7bKcDR/yE+Pt7ZM+Kzzz4jODiYChUq\nFNXHJyIiIiIi4jIKHtyN3Q65X3rvJH//O/z1r46fS5WCuXPh+ecdz2vXrs26deuIjIzkq6++wrIs\nAHbu3AnAoUOHCAgIcM6VF0yAo4Gkr68vNWrU4PDhw9x3333s3LmTzp07c+LECQIDA/H09MTf359L\nly6RnZ1d4Nxly5YlJibGOe/Jkyfp0qULs2fPpmbNms7j//nPf1i0aBHLly/HI3c/z127dhETE8O2\nbduw2+3s3buXf//739x///2/7YcoIiIiIiLiArrVwp2cOePYouEO7O+QlPTLz5mZsG3bL887depE\nWloaYWFhfPrpp85+CqVLl6Zdu3Z06tSJd95557rzT5s2jd69e9OiRQsaN27MY489xpNPPklOTg4h\nISG0aNGCoUOHUrp06ULNPW7cOFJSUhgxYgQ2m433338fgF69enHy5EmefvppbDYbKSkpjBkzho0b\nN7J69Wqeeuop/v73vyt0EBERERGREsPk/evwnSI4ONjasWOHq8u4Nf/9LzRvDv/3f9Cpk6urua2W\nL4eePeHcOShdGj76CDp2vPb58+fPJzk5OV9fBhERERERKfmMMTstywp2dR3yC91q4U7u4B0tOnZ0\nhA1r18LTT18/dBAREREREZHiQ8GDO7HbHV0Vq1d3dSUu0bFj4QOH/v37F2ktIiIiIiIiUjjq8eBO\n7HZo2BA89MsmIiIiIiIi7kHfYN2FZTmChzvwNgsRERERERFxXwoe3MWxY5CaquBBRERERERE3IqC\nB3dxBzeWFBEREREREfel4MFdKHgQERERERERN6TgwV3Y7VCpEgQFuboSERERERERkUJT8OAu1FhS\nRERERERE3JCCB3eQnQ0JCQoeRERERERExO0oeHAHiYlw4YKCBxEREREREXE7Ch7cgRpLioiIiIiI\niJtS8OAO7Hbw8ID69V1diYiIiIiIiMhNUfDgDux2qFUL/PxcXYmIiIiIiIjITVHw4A60o4WIiIiI\niIi4KQUPxd3Zs3DggIIHERERERERcUsKHoq7hASwLAUPIiIiIiIi4pYUPBR3eTtaPPywa+sQERER\nERERuQUKHoo7ux1Kl4YaNVxdiYiIiIiIiMhNU/BQ3Nnt0KCBYztNERERERERETejb7PFnXa0EBER\nERERETem4KE4++knx38KHkRERERERMRNKXgozvIaSyp4EBERERERETel4KE4i493PCp4EBERERER\nETel4KE4s9vhnnsc/4mIiIiIiIi4IQUPxZkaS4qIiIiIiIibU/BQXGVnw7ffKngQERERERERt6bg\nobj64Qc4f17Bg4iIiIiIiLg1BQ/FlXa0EBERERERkRJAwUNxZbeDMdCggasrEREREREREbllCh6K\nK7sdataE0qVdXYmIiIiIiIjILVPwUFxpRwsREREREREpARQ8FEfnz0NiooIHERERERERcXsKHoqj\nhATIyVHwICIiIiIiIm5PwUNxpB0tREREREREpIRQ8FAc2e3g6wu1arm6EhEREREREZFfRcFDcWS3\nQ/364Onp6kpEREREREREfhUFD8WRdrQQERERERGREkLBQ3Fz8iQcO6bgQUREREREREoEBQ/FjRpL\nioiIiIiISAlyS8GDMaaSMSbOGLPJGONjjFlmjIkxxjQxxiy8zri2xpg+t3C9RsaYVrdY63XHxsTE\nMHDgwKuOX7hwgV69ehEaGkqvXr24cOHCVedMmjSJpk2b0rJlS4YOHYplWZw/f56nnnqKkJAQmjVr\nxueff55vzKZNmzDGkJycXHBBCh5ERERERESkBLnVFQ+tgbWWZbUGygNBlmXZLMv60rKsXtcaZFnW\nasuy/nUL12sE3FLwcKtj58+fT926ddmyZQt16tRh/vz5V50TERHB9u3b+eKLLzh+/DgbN27Ey8uL\nf/zjH2zdupWVK1cSFRXlPN+yLCZPnkxwcPC1L2y3Q4UKUKnSzZYsIiIiIiIiUuwUKngwxkw0xsTm\nrnLoB7wO9DXGzAPmAg/nrngoa4xJzB0TmLsSIjZ3ZUQlY0x/Y8xrua+H5b4WY4yZbRyqG2N2GmOi\njTFfG2PyvrWPBCJzz62S+zjVGLPWGLPBGOOTO+dQY8yW3DoHFjS2oPd34MABIiIiaNSoEUuWLAEg\nNjaW9u3bA9ChQwdiY2OvGle7dm3nzz4+Pnh5eVGqVCmqV68OgJ+fHx4ev3zES5YsoU2bNpQpU+ba\nH3ZeY0ljrvMrIiIiIiIiIuIevG50gjGmLRBoWVaYMaY0EAf8DahiWdYEY0x1YJ5lWU/mnp839E84\nVkXMyT3ucdmcBpgK2CzLSjPGTAHaAXuAe4FQIAf4Lve8yUBVy7ImXHaNGMuyoowxc4GnjDEHgLY4\nVjd4AFuMMZ9eObag1QYnTpxg3bp1nDt3juDgYLp06UJqaiqBgYEAlCtXjp9//vman1FsbCxHjx6l\nVav8CytGjBjBqFGjAMjMzGTevHmsXLmSpUuXFjyRvz9kZOR9SI7n6enXvK6IiIiIiIhIcXfD4AF4\nCAgzxsTkPvcBKhRiXEPgH3lPLMvKuSyUCAKqA//JPVYW2IcjePjOsqxzAMaY7OvMvzP38VBuPX5A\nfWBT7vEA4L5C1Mmjjz6Kl5cXAQEB3HPPPZw4cYLy5ctz+vRpANLS0ihfvjyJiYnOfhDz5s2jVq1a\nxMfHM3r0aFasWHF56MIbb7xBQEAA//M//wPA3Llz6d27N97e3tcuJC90yHPmTGHKFxERERERESm2\nChM8fItj5cJwAGOMN/A8UPUG4/YANmB/7rjLb+s4CfwAtLcsKyP39VJAFcAqYK5LBdR6+XkGx+qI\nb4AulmVZxphSlmVlGmPqFzA2n127dpGVlcX58+c5fvw4d999N2FhYaxatYpGjRqxatUqwsLCqFWr\nFjExMc5xiYmJDBgwgGXLlhEUFOQ8PnPmTPbv38+HH374y4exZw8HDhxg0aJFxMfH06dPHz7//HN8\nfX2vV5qIiIiIiIiIW7th8GBZ1ipjTIvcFQ8WkAxsKMTcE4F/GmN6A9k4woq8OS1jzEhgee5tFznA\nCOBa9xV8AbxsjGkIvHyNOvcYY9YDsbkrJc4bYzpeObZx48ZXja1cuTJdu3bl4MGDTJgwAQ8PD/r3\n78+AAQMIDQ2latWqfPDBB1eNi4qK4vTp0/Tr1w+AV199lccff5zhw4fTvHlzWrduDcCGDRuYNWuW\nc5zNZuNf//qXQgcREREREREp8YxlFbTAoOQKDg62duzY4eoyChYQkP/2CvV4EBERERERuSnGmJ2W\nZV1nK0G53Qpzq0WJYYyZVLZsWWw2GwDe3t6sXbvWtUVdTiGDiIiIiIiIlDBa8SAiIiIiIiIlhlY8\nFD8eNz5FREREREREROTWKHgQERERERERkSKj4EFEREREREREioyCBxEREREREREpMgoeRERERERE\nRKTIKHgQERERERERkSKj4EFEREREREREioyCBxEREREREREpMgoeRERERERERKTIKHgQERERERER\nkSKj4EFEREREREREioyCBxEREREREREpMgoeRERERERERKTIKHgQERERERERkSKj4EFERERERERE\nioyCBxEREREREREpMgoeRERERERERKTIKHgQERERERERkSKj4EFEREREREREioyCBxEREREREREp\nMsayLFfXcFsZY04A/9/encfKWZVxHP/+pJiyKAhowyaLgAiVtRAIaBAwIhJBg0ICCW4xGEwBNQjE\nYDFoIICIMaJIMajEooCAQIjKEvEPKmUtUBWCrIKlkbYie/v4x3tapqWXckPnzr29388/855l3nnm\nvTk5M889551HBx2H1AcbAfMGHYQ0YI4DyXEggeNgvNuiqt496CD0mnGXeJBWV0lmVdWUQcchDZLj\nQHIcSOA4kEYbt1pIkiRJkqS+MfEgSZIkSZL6xsSDtPq4cNABSKOA40ByHEjgOJBGFe/xIEmSJEmS\n+sYVD5IkSZIkqW9MPEiSJEmSpL4x8SCNQUk2T3JzkgeS3J/k+Fa/QZI/JnmwPb5r0LFK/ZRkjSR3\nJbm2lbdKMjPJQ0kuS/L2Qcco9VuS9ZNcnuRvSeYk2dv5QONNkhPbZ6L7kvw6yUTnBGn0MPEgjU2v\nAl+vqh2AvYDjkuwAnAzcWFXbAje2srQ6Ox6Y01M+CzivqrYBngW+OJCopJF1PnBDVW0P7Ew3JpwP\nNG4k2RSYCkypqsnAGsCROCdIo4aJB2kMqqqnqurOdvxfug+ZmwKHApe0bpcAhw0mQqn/kmwGfAK4\nqJUD7A9c3ro4BrTaS7Ie8GFgOkBVvVxV83E+0PgzAVgryQRgbeApnBOkUcPEgzTGJdkS2BWYCUyq\nqqda09PApAGFJY2EHwAnAYtbeUNgflW92spP0CXkpNXZVsAzwM/btqOLkqyD84HGkap6EjgHeIwu\n4bAAuAPnBGnUMPEgjWFJ1gWuAE6oqoW9bdX9Vq6/l6vVUpJDgLlVdcegY5EGbAKwG3BBVe0K/I/l\ntlU4H2h11+5hcihdIm4TYB3goIEGJWkZJh6kMSrJmnRJh0ur6spW/e8kG7f2jYG5g4pP6rN9gE8m\neQSYQbec9nxg/bbMFmAz4MnBhCeNmCeAJ6pqZitfTpeIcD7QeHIg8M+qeqaqXgGupJsnnBOkUcLE\ngzQGtb3s04E5VfX9nqZrgGPa8THA1SMdmzQSquqUqtqsqraku4HYTVV1FHAzcHjr5hjQaq+qngYe\nT/L+VnUA8ADOBxpfHgP2SrJ2+4y0ZBw4J0ijRLrVd5LGkiT7ArcCs3ltf/updPd5+A3wXuBR4LNV\n9Z+BBCmNkCT7Ad+oqkOSbE23AmID4C7g6Kp6aZDxSf2WZBe6m6y+HXgY+DzdP5ecDzRuJDkdOILu\nl7/uAr5Ed08H5wRpFDDxIEmSJEmS+satFpIkSZIkqW9MPEiSJEmSpL4x8SBJkiRJkvrGxIMkSZIk\nSeobEw+SJEmSJKlvTDxIktQjyaIkdye5L8lvk6w96JgAkpzax3MfluS0djwtSSXZpqf9hFY3pZWv\nT7J+O36uPW6Z5L52PCXJD1dxjDOSbLsqzylJkkaGiQdJkpb1QlXtUlWTgZeBY9/sE5Os0b+wGHbi\nYRjxnAT8uKc8Gziyp/wZ4P4lhao6uKrmD3WyqppVVVOHE+ubcEGLU5IkjTEmHiRJGtqtwDYASa5K\nckeS+5N8eUmHJM8lOTfJPcDeSU5LcntbMXFhkrR+tyQ5L8msJHOS7JHkyiQPJjmj53xHJ/lrW3Xx\n0yRrJDkTWKvVXTpUvyHiOTPJA0nuTXLO8m8wyXbAS1U1r6f6KuDQ1v4+YAEwr+c5jyTZaKiLlmS/\nJNe24w3atbs3yW1Jdmr105Jc3K7Lw0mmtvp1klyX5J52DY/o+VscmGTCm/rLSZKkUcPEgyRJK9C+\n4H6c7r//AF+oqt2BKcDUJBu2+nWAmVW1c1X9BfhRVe3RVkysBRzSc9qXq2oK8BPgauA4YDLwuSQb\nJvkAcASwT1XtAiwCjqqqk3ltJcZRQ/VbPh5gDvApYMeq2gk4g9fbB7hzubqFwONJJtOtfLhsWBdv\nWacDd7XXPxX4RU/b9sDHgD2BbydZEzgI+Fe7npOBGwCqajHwELDzW4hFkiQNgIkHSZKWtVaSu4FZ\nwGPA9FY/ta0iuA3YHFhyv4FFwBU9z/9IkplJZgP7Azv2tF3THmcD91fVU1X1EvBwO+cBwO7A7S2G\nA4CtVxDjG/XrjWcB8CIwPcmngedXcK6NgWdWUD+DLulwGPC7FbS/WfsCvwSoqpuADZO8s7VdV1VL\nVlvMBSbRXZuPJjkryYeqakHPueYCm7yFWCRJ0gC4XFGSpGW90FYRLJVkP+BAYO+qej7JLcDE1vxi\nVS1q/SbS3SthSlU9nmRaTz+Al9rj4p7jJeUJQIBLquqUlcT4Rv2WxlNVrybZky4xcTjwVbpkyDLv\nF1hvBee5FjgbmFVVC9uOkVWt9xosAiZU1T+S7AYcDJyR5Maq+k7rM7HFK0mSxhBXPEiStHLrAc+2\npMP2wF5D9FuSZJiXZF26L/vDcSNweJL3wNL7I2zR2l5pWxFW1m+pFsN6VXU9cCIr3qYwh3Yfi15V\n9TzwTeC7w3wPy7uVtg2kJXDmVdXCoTon2QR4vqp+RZf42K2neTvgvrcYjyRJGmGueJAkaeVuAI5N\nMgf4O912i9epqvlJfkb35fhp4PbhvEhVPZDkW8AfkrwNeIXuPhCPAhcC9ya5s93nYah+vd4BXN1W\nYgT42gpe9s/AuUlSVbVcPDOGE/8QpgEXJ7mXbqvHMSvp/0Hg7CSL6d7XVwCSTKJbjfL0KohJkiSN\noCz3GUOSJI0zSc4Hfl9Vfxp0LENJciKwsKqmr7SzJEkaVdxqIUmSvgesPeggVmI+cMmgg5AkScPn\nigdJkiRJktQ3rniQJEmSJEl9Y+JBkiRJkiT1jYkHSZIkSZLUNyYeJEmSJElS35h4kCRJkiRJffN/\nTw6efx6gQyAAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1VpRimNp5tuW", + "colab_type": "text" + }, + "source": [ + "# Image Throughput\n", + "\n", + "One of the first thing I noticed running batches through my first ported EfficientNet weights -- the image throughput does not scale with FLOP or parameter counts. Much larger ResNet, DPN, etc. models can match the throughput of EfficientNet models with far fewer parameters and FLOPS. I've trained on many of these models and training throughputs do -- in relative terms -- mirror the validation numbers here.\n", + "\n", + "This was surprising to me given the FLOP ratios. I'd like to see an in depth comparison with Tensorflow, XLA enabled, targeted for both GPU and TPU." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "iapzkrt2gBwR", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 357 + }, + "outputId": "f47bfaa1-d78a-4d2f-a325-8fe247acc46a" + }, + "source": [ + "print('Results by image rate:')\n", + "results_by_rate = list(sorted(results.keys(), key=lambda x: results[x]['rate'], reverse=True))\n", + "for m in results_by_rate:\n", + " print(' {:32} Rate: {:>6.2f}, Top-1 {:.2f}, Top-5: {:.2f}'.format(\n", + " m, results[m]['rate'], results[m]['top1'], results[m]['top5']))\n", + "print()\n" + ], + "execution_count": 44, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Results by image rate:\n", + " efficientnet_b0-224 Rate: 165.73, Top-1 64.58, Top-5: 85.89\n", + " resnet50-224 Rate: 159.51, Top-1 66.81, Top-5: 87.00\n", + " dpn68b-224 Rate: 155.15, Top-1 65.60, Top-5: 85.94\n", + " resnet50-240-ttp Rate: 154.35, Top-1 67.02, Top-5: 87.04\n", + " efficientnet_b1-240 Rate: 151.63, Top-1 67.55, Top-5: 87.29\n", + " gluon_seresnext50_32x4d-224 Rate: 150.43, Top-1 68.67, Top-5: 88.32\n", + " efficientnet_b2-260 Rate: 144.20, Top-1 67.80, Top-5: 88.20\n", + " tf_efficientnet_b2-260 Rate: 142.73, Top-1 67.40, Top-5: 87.58\n", + " resnet50-260-ttp Rate: 135.92, Top-1 67.63, Top-5: 87.63\n", + " gluon_seresnext101_32x4d-224 Rate: 131.57, Top-1 70.01, Top-5: 88.91\n", + " gluon_seresnext50_32x4d-260-ttp Rate: 126.52, Top-1 69.67, Top-5: 88.62\n", + " tf_efficientnet_b3-300 Rate: 119.13, Top-1 68.52, Top-5: 88.70\n", + " gluon_seresnext50_32x4d-300-ttp Rate: 104.69, Top-1 70.47, Top-5: 89.18\n", + " gluon_seresnext101_32x4d-260-ttp Rate: 95.84, Top-1 71.14, Top-5: 89.47\n", + " ig_resnext101_32x8d-224 Rate: 83.35, Top-1 73.83, Top-5: 92.28\n", + " gluon_seresnext101_32x4d-300-ttp Rate: 74.87, Top-1 71.99, Top-5: 90.10\n", + " tf_efficientnet_b4-380 Rate: 69.10, Top-1 71.34, Top-5: 90.11\n", + " ig_resnext101_32x8d-300-ttp Rate: 43.62, Top-1 75.17, Top-5: 92.66\n", + "\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Y2bawRNtfFmH", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 621 + }, + "outputId": "ba888805-c714-4fdf-9ea8-b84d6016b296" + }, + "source": [ + "rate_effnet = np.array([results[m]['rate'] for m in names_effnet])\n", + "rate_effnet_tf = np.array([results[m]['rate'] for m in names_effnet_tf])\n", + "rate_resnet = np.array([results[m]['rate'] for m in names_resnet])\n", + "rate_resnet_ttp = np.array([results[m]['rate'] for m in names_resnet_ttp])\n", + "\n", + "fig = plt.figure()\n", + "ax1 = fig.add_subplot(111)\n", + "ax1.scatter(rate_effnet, acc_effnet, s=10, c='r', marker=\"s\", label='EfficientNet')\n", + "ax1.plot(rate_effnet, acc_effnet, c='r')\n", + "annotate(ax1, rate_effnet, acc_effnet, names_effnet, xo=.5, align='left')\n", + "\n", + "ax1.scatter(rate_effnet_tf, acc_effnet_tf, s=10, c='#8C001A', marker=\"v\", label='TF-EfficientNet')\n", + "ax1.plot(rate_effnet_tf, acc_effnet_tf, c='#8C001A')\n", + "annotate(ax1, rate_effnet_tf, acc_effnet_tf, names_effnet_tf, xo=-.5, yo=-.2, align='right')\n", + "\n", + "ax1.scatter(rate_resnet, acc_resnet, s=10, c='b', marker=\"o\", label='ResNet')\n", + "ax1.plot(rate_resnet, acc_resnet, c='b')\n", + "annotate(ax1, rate_resnet, acc_resnet, names_resnet, xo=.3, align='left')\n", + "\n", + "ax1.scatter(rate_resnet_ttp, acc_resnet_ttp, s=10, c='#43C6DB', marker=\"x\", label='ResNet TPP')\n", + "ax1.plot(rate_resnet_ttp, acc_resnet_ttp, c='#43C6DB')\n", + "annotate(ax1, rate_resnet_ttp, acc_resnet_ttp, names_resnet_ttp, xo=0., yo=0., align='center')\n", + "\n", + "ax1.set_title('Top-1 vs Rate')\n", + "ax1.set_ylabel('Top-1 Accuracy (%)')\n", + "ax1.set_xlabel('Rate (Images / sec)')\n", + "ax1.legend()\n", + "plt.show()" + ], + "execution_count": 48, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+AAAAJcCAYAAAB5WM7HAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzs3Xt8j/X/x/HHe59tttmaw4bNLMs6\nOM0wc5hYcqjE91s6x5eKiBIyEYqikvNEIVJRpBO/ioavfVHJqTlrJsPmMKc5m5nr98dn+9hyWprN\ntuf9dnOz6/S+Xtdnbrd6fd7X9byMZVmIiIiIiIiIyI3lVNAFiIiIiIiIiBQHasBFRERERERE8oEa\ncBEREREREZF8oAZcREREREREJB+oARcRERERERHJB2rARURERERERPKBGnARERERERGRfKAGXERE\nihVjzMlsfy4YY85kW346j89V0hjzjTFmlzHGMsY0yMvxr3DObsaY85nXc9wYs84Y0+pvHD/bGDPo\nRtYoIiJSXKkBFxGRYsWyLM+sP8BuoE22dbPy+nRALPAkcDSPx76a2MzrKwV8Asw1xpTMx/OLiIjI\nZagBFxERycYY426MmWiM2WeMSTLGjDTGuGRuu88Yk2CMGWqMOWKM2WmMefRKY1mWddqyrGjLsn4B\nLlzjvB2NMSv+sm6AMebLzJ//ZYzZZow5YYzZY4zpea1rsSzrAvAZ4AXcljmOszHma2PMAWNMqjFm\nqTHmzsxtPYF2wODMGfS5mesrGWPmGWMOGWP+NMZ0u9a5RURE5FJqwEVERHIaCoQANYG6QCTQL9v2\nyoArUAHoAnxijAnKg/N+C9QxxgRmW/cU8Hnmz9OB/1iW5QWEAsuvNaAxxhl4BjgLJGXbNA+ogv0a\ntmGfJceyrGjga+CtzDsCHjXG2IAfgV8Af+A+4DVjTNPrvVAREZHiSg24iIhITk8Db1iWdciyrAPA\nMKBDtu3ngaGWZZ2zLGsxsBh45J+e1LKs49gb3ScAjDE1gYDMdQAZQHVjjJdlWYcty/r9KsM1Ncak\nAmeAN4EnLcs6mnme85ZlfWpZ1knLss5i/8Ih3BjjdoWxGgNulmWNyLzmeODjrDpFREQk99SAi4iI\nZDLGGOyzwruyrd4FVMy2fDCzcc2+3d8Yc0e2MLdD11nC59ifFwf77PdXlmWdy1z+F/bbw3cbY/5r\njKl3lXH+Z1lWKaAsEANEZG3IvAV9dOat5Mexz4CbzH0v51agcubt6qmZjX0f7J+TiIiI/A1qwEVE\nRDJZlmUB+7E3nVkCgeRsyz5/mS0OBPZalhWfLczN5zpL+BEIMsZUxT7DnHX7OZZl/WpZ1oNAeexN\n9eeXHyLH9RwHugHdjDHVMlc/A7QA7gG8gbsy15usw/4yzB5gm2VZpbL98bIs66HrukIREZFiTA24\niIhITl8AbxhjyhpjygEDgZnZtrtgDylzNcY0w97Mfn2lwYwxJbI17K5XudWbzJn1b4HozPP8L3OM\nksaYJ4wxtwDpwAmuEeqWbcwD2J/xHpy5ygv7M+GHgZLYb7HP7gCZgW2ZVmTW0MsY45Y5gx5ijKmT\nm/OLiIjIRWrARUREcnod2AJsBuKAn4H3sm1PxP4c+H7swWjPWJb151XG24X9Weyy2BvqM8aYq92+\n/TnQHJiTmWKe5dnMsY4B/8n8k1tjgHaZaefTgIOZ9W8ks8HOZgpQL/N289mWZaUDDwCNMs9/EPgA\n8Pwb5xcRERHA2O+2ExERkWsxxtwHvG9ZVnBB1yIiIiKFj2bARURERERERPKBGnARERERERGRfKBb\n0EVERERERETygWbARURERERERPKBc0EXkBs+Pj5W5cqVC7oMERERERERuQHWrl17yLIs34Ku40Yr\nFA145cqVWbNmTUGXISIiIiIiIjeAMWZXQdeQH3QLuoiIiIiIiEg+UAMuIiIiIiIikg/UgIuIiIiI\niIjkg0LxDLiIiIiIiMhfpaenk5SUxNmzZwu6FMklNzc3AgICcHFxKehSCoQacBERERERKZSSkpLw\n8vKicuXKGGMKuhy5BsuyOHz4MElJSQQFBRV0OQVCt6CLiIiIiEihdPbsWcqWLavmu5AwxlC2bNli\nfceCGnARERERESm01HwXLsX996UGXERERERERCQfqAEXERERERG5TjabjdDQUMefd999F4Dly5dT\nvXp1QkNDOXPmDFFRUVSvXp2oqCg+/PBDPv300yuOuXfvXh555JHrrmncuHGcPn3asVy5cmXatWvn\nWP7qq6/o1KnTVceIi4vjxx9/vO4a5PIUwiYiIiIiInKd3N3diYuLu2T9rFmzGDBgAO3btwdgypQp\nHDlyBJvNds0x/f39+eqrr667pnHjxtG+fXs8PDwc69auXcuWLVuoVq1arsaIi4tjzZo1PPDAA9dd\nh1xKM+AiIiIiIiJ56KOPPuLLL79k8ODBPP3007Rt25aTJ09St25d5syZw5AhQxg1ahQACQkJNG/e\nnFq1alGnTh127NhBYmIiNWrUACAjI4OoqCjq1atHSEgIkydPBiA2NpbIyEgeeeQR7rrrLp5++mks\nyyI6Opq9e/dyzz33cM899zhqeuWVVxg+fPgltZ46dYpnn32W8PBwateuzbx58zh37hyvv/46c+bM\nITQ0lDlz5uTDp1Y8aAZcRERERESKh1tugRMnLi57ecHx4/9oyDNnzhAaGupYHjBgAJ07d2bFihU8\n+OCDjlvJPT09HTPlQ4YMcez/9NNP079/fx566CHOnj3LhQsXSElJcWyfNm0a3t7erF69mrS0NCIi\nImjZsiUAv//+O5s3b8bf35+IiAh+/vlnevbsyZgxY1i6dCk+Pj6OcR577DEmTZpEQkJCjvqHDx9O\ns2bNmD59OqmpqYSHh9O8eXPefPNN1qxZw/vvv/+PPh/JSQ24iIiIiIgUD9mb78stX4cr3YKeu3JO\nkJyczEMPPQSAm5vbJfvExMSwYcMGxy3px44dY/v27bi6uhIeHk5AQAAAoaGhJCYm0rhx48uey2az\nERUVxTvvvMP999+fY/z58+c7ZuTPnj3L7t27r+t65NrUgIuIiIiIiNykLMtiwoQJtGrVKsf62NhY\nSpQo4Vi22WycP3/+qmN16NCBd955x3F7e9b4X3/9NXfeeWeOfX/77bc8qF7+Ss+Ai4iIiIiIFAAv\nLy8CAgL47rvvAEhLS8uRXg7QqlUrPvjgA9LT0wGIj4/n1KlT1xz3xGVm911cXOjduzdjx47NMf6E\nCROwLAuw39Z+tTHkn1EDLiIiIiIixYOX19WXr0PWM+BZf/r37/+3jv/ss8+Ijo4mJCSERo0asX//\n/hzbO3fuTLVq1ahTpw41atSga9eu15zpfv7557nvvvtyhLBlee6553IcP3jwYNLT0wkJCaF69eoM\nHjwYgHvuuYctW7YohC2PmaxvOm5mYWFh1po1awq6DBERERERuYls3bqVqlWrFnQZ8jdd7vdmjFlr\nWVZYAZWUbzQDLiIiIiIiIpIP8qQBN8ZUMMaMzouxbrTY2Fg2bNjgWB48eDC33norzZs3z7HfjBkz\naNSoEREREaxbtw6AHTt2ULduXTw9PVmxYsUVz3H8+HEaNWpEZGQk4eHhLFmyBIBPP/2U8PBwmjRp\nwhNPPEFaWtoVxzh69CgtW7akadOmRERE5Kg5y7Bhw5gxY8Yl68eMGUOTJk2IiIjgP//5j+N5kXXr\n1hEREUGjRo1yHHe5a80uNTWVTz/91LH8189QREREREREri1PGnDLsvZblvXK9RxrjLHlRQ259dfm\nsXv37ixdujTHPkePHiU6OprY2FhmzpxJz549AfDz82PRokWOd/ldiaenJ8uWLSM2NpbZs2c7ngNp\n3Lgxv/76K8uWLSMwMJCZM2decYxZs2YRERHB//73P4YPH87w4cNzfY0vvvgiy5Yt4+effwbsrxYA\neOmll5g5cyaxsbFER0dz9OjRK15rdmrARURERERE/rm8mgGvbIxZbIypboxZZYz5wRjzqTFmyBX2\njzTG/GSMmQsMN8ZUyjzmv5l/+xpjPIwxC4wx//vjjz+Ij48nNjaWe++9l8cee4yaNWsyd+5cAPbs\n2UPr1q1p1qwZrVu35uDBgxw5coR69eqRkpLCli1baNKkCSkpKcyYMYPhw4cTGRlJRkYGfn5+ODnl\n/BhWrVrF3XffjaurK0FBQZw4cYK0tDQ8PDwoU6bMNT8PJycnnJ3tb3g7fvw4ISEhANx2223YbPbv\nG0qUKIGzszNpaWk0btyYbdu2sX//fsLDwzl69ChVq1bl+PHjgP0LgXLlygGwbNkyateuTZs2ba74\nagBXV1fA/kqBCxcuEBwcTFpaGqdOnSIoKAhXV1fuvvtuVq1adcVrzW7MmDGsXbuWyMhIZs2adcln\nGBwcTO/evWnatCnt27fnwoUL1/yMREREREREipu8fg/4O0BPy7JWGmOmXmNff+BBy7LSjTGzgbcy\nj/sX8CrwOXDUsqz7w8LCrODgYPbu3UtqaioxMTEcOHCAtm3b8uijjxIVFcXgwYNp0KAB8+bNY8SI\nEYwaNYrRo0fTsWNHjh8/zieffEK5cuXo1KkTwcHBtG/f/oqFHT58mNKlSzuWS5UqxZEjR/Dz88v1\nB5GcnMzjjz9OfHw806dPz7Ft27ZtLFy4kOXLl1OiRAmmTZvGM888g7e3N+PGjaN06dLUrVuX119/\nnRo1apCamuq45b1Pnz7MmzePSpUqXfIuwOyGDx/OjBkzuP3226lUqRKHDx+mVKlSl1yTZVnXvNY+\nffqwZcsWFi9eDMD27dtzfIbnz5/nscceY+zYsXTp0oX58+fz73//O9eflYiIiIiISHGQ1yFswcDq\nzJ+v9eb2NZZlpWf+XBN41xgTC0QBPsDvwFpjzMw9e/Y4ZoNDQ0Ox2Wz4+/uTmpoKwMaNG+nfvz+R\nkZGMHDmSQ4cOAdCkSRPHDHRwcHCuL6JMmTKOsQGOHTt21ZnvDMti0cHjWJl/Z1gWFStWZMWKFaxa\ntYoXX3zRsW9SUhIdO3Zk9uzZuLm5AXDnnXcSFBQEQKNGjQB47733aNeuHZs2bWLu3Ln06NEDsM+o\nBwYGYowhPDwcgBUrVhAZGUlkZCQnT54EYODAgcTHxxMUFMSMGTOueE1XWt+5c2ciIyN5//33r/l5\nZa+lfv36/PHHH9c8RkREREREpLjJ6wZ8B5AVHV/vGvtmZPt5M9DbsqxIy7IaA88DJYAxlmW1d3Z2\n5rPPPgPszd5fVa9enbFjxxIbG8uKFSuYMmUKANOmTSM8PJyEhASyXmPm6up6zffm1a9fnxUrVpCe\nns7u3bvx9PSkRIkSV9z/v4dOMCIhhRYrdzAiIYWFyYcc22655Ra8Mt8veOjQIdq1a8eHH35IlSpV\nHPssWrSI9PR0fHx8mD9/PmC/fdzHxweAcuXKceTIEQC8vLxISkoCYPVq+3cdjRs3JjY2ltjYWDw9\nPTl79qzjs/L29sbDwwM3NzdKlizJ7t27SU9PZ8WKFYSHh1/xWj/66CNiY2N58cUXL/nM/rpsWZbj\n8129ejV33HHHVT9fEREREZGi4PDhw473f1eoUIGKFSs6lo0xOd4PnpiYeMnxnTp1IigoyLFP1mRc\nWloazZs3d7yDe/ny5VSvXp3Q0FCSk5OvmUnVuXNntmzZcl3XFBsbyy+//OJYHjJkCB4eHqSkpDjW\neXp6XnOct99++7rOX9Tl9S3orwHTjTGHgGPArlwe9wow0RiT9ZucDmwBoo0x5z09PXnwwQfZtevy\nw40ePZoePXo4Zn+fffZZwsLCmDFjBkuWLCElJYV27dqxePFiWrRoQa9evfj+++/58ssvmTRpErNn\nz2br1q00b96cyZMnU6VKFbp3707Tpk0xxjB+/HjAPvv88MMPs2XLFjZv3swDDzzA0KFDqeftkaOe\ncvsSafJUO2w2G+fPn2fcuHGA/R9vcnIyvXv3BqBDhw60adOGgQMH8tNPP+Hs7Ezz5s2pU6cOL730\nEh06dGD69OmcOXOGESNGOK61TZs2+Pv7Oxr7Sz7MV15h8+bNjue/hw4dCsD48eN58sknsSyL7t27\nO249v9y1ZlehQgXc3d1p164d3bt3v+QzdHZ25uuvv6Zfv35UrFiRtm3b5uZ3LiIiIiJSqJUtW5a4\nuDjA/v/6np6e9O3bF7A3qVnbrmbkyJGXNNS///47gOP4bt26MWDAAMcjoF999dVVx/zoo4/+3oVk\nkzWpl/VlAICPjw+jR4929CS58fbbb/Paa69ddx1FlbEsK+8GM8Yl67byzGfAf7Is6+r/OnIhLCzM\nypphvRktOnicEQkXvxEKLunKu1UrUsolXwPeC0xwcDAJCQkFXYaIiIiIFDNbt26latWqBV0GcPkG\nPGuC8Eo6derEgw8+mKMBT0lJoVGjRhw8eJCgoCBeeOEFBgwYgLe3N40aNWL48OE8+OCDbNq0iYyM\nDF599VUWLlyIk5MTXbp04aWXXiIyMpJRo0YRFhZGTEwMb7zxBmlpaVSpUoWPP/4YT09PKleuTMeO\nHfm///s/0tPTmTt3Lm5ubjRo0ACbzYavry8TJkxwvFJ5xowZrFu3jjJlyuS4tpkzZxIdHc25c+eo\nX78+kyZNYuDAgYwcOZKaNWtSvXp1Zs2aleO6L/d7M8astSwrjCIur2fAaxpjxmeOmwh8Z4x5DwjP\nts85y7Ja5vF5C8yYMWOYN38+qekZlHKxsefMOaw3o3n+XAb9gssTVsrj2oOIiIiIiMgNt3vpz2z5\n7GvHcrUO7Qi8J+KGnOvMmTOEhoYCEBQUxLfffnvZ/aKiohg2bBiAo1n96KOPGDVqFN9//z0Av/76\nq6NRz34r+5QpU0hMTCQuLg5nZ2fHY6tZDh06xLBhw1i8eDElS5ZkxIgRjBkzhtdffx2wz2yvW7eO\nSZMmMWrUKD766CO6deuW44uEJUuW4OnpybPPPsv48eMdd9eCvZGeM2cOP//8My4uLnTv3p1Zs2bx\n7rvv8v777+fqDoDiJk8bcMuy1gF3/2V1v7w8x82mT58+9OnTJ8e6HafSeHv7Afpv3Us7P2+eC/TB\n1enSZ9eLCs1+i4iIiEhhcPrAIbZ8+hVWRgbGZqNyy6Y37Fzu7u7XfQt6bi1evJhu3bo5XoH81+Do\nlStXsmXLFiIi7F8ynDt3joYNGzq2P/zwwwDUrVuXb7755qrn6tmzJ6GhoY7GHOzN+dq1a6lXzx7/\ndebMGcfrk+Xy8noGXIAqJUswqWYAU3Yd5ut9x/j92BkG3l6BWz1cC7o0EREREZFi645HH+R/UW9x\nMmkfJf3KccejD+br+Z955hl+//13/P39+fHHH2/4+SzLokWLFnzxxReX3Z4VNJ2VXXU1pUqV4qmn\nnmLixIk5xu/YsSPvvPNO3hVdxOV1CrpkKmFz4qXbfBl2lx9HzmXwwoY9zNt/jLx85l5ERERERHLP\nyWajyXuDAGjy3iCcbPmb2fTxxx8TFxeXZ813ixYtmDx5sqN5/ust6A0aNODnn3923LF66tQp4uPj\nrzqml5cXJ06cuOy2Pn365Djfvffey1dffeVISD9y5IgjONvFxYX09PTLjlOcqQG/wRqULsmUWpWo\n5e3OhJ0HGfzHPo6mX/3bJRERERERuTHufKwN90S/xZ2PtSnoUgD7M+DZX1d27ty5XB/buXNnAgMD\nCQkJoVatWnz++ec5tvv6+jJjxgyefPJJQkJCaNiwIdu2bbvqmG3atOHbb78lNDSU5cuX59jm4+PD\nQw89RFpaGgDVqlVj2LBhtGzZkpCQEFq0aMG+ffsAeP755wkJCeHpp5/O9fUUB3magn6j3Owp6Llx\nwbKYt/8YU3YdxtPZiagq5QgvXbKgyxIRERERKbRuphR0yb3inIKuGfB84mQMD/mVYmLNALydbby2\nbR+Tdh7k3IULBV2aiIiIiIiI5AM14PnstpIlmFgzgH9X8Oab/cfosTGJnafTCrosERERERERucHU\ngBeAEjYnXgzyZfhdfhxNz6DHhiS+25eqgDYREREREZEiTA14AapfuiRTQyoR6u3O+4mHGLhNAW0i\nIiIiIiJFlRrwAlba1Znhd/nRo7IPvx87Q5f1e1h19FRBlyUiIiIiIiJ5TA34TcBkBrRNCgmgVGZA\n2/sKaBMRERERESlS1IDfRII8SjApJICHKnjz3f5jdN+QxJ+nFNAmIiIiInKzstlshIaGUqNGDdq0\naUNqaup1jRMZGUlY2MW3cK1Zs4bIyMirHpOYmHjJu7/l5qYG/Cbj6uREjyBf3r7Lj2PnM+ixMYlv\nFdAmIiIiInJTcnd3Jy4ujk2bNlGmTBkmTpx43WOlpKSwYMGCXO+vBrzwUQN+kwovXZIptSpRx9ud\niYmHeG3bPo6eU0CbiIiIiMjNqmHDhiQnJzuWR44cSb169QgJCeGNN94A4NSpU7Ru3ZpatWpRo0YN\n5syZ49g/KiqK4cOHXzJuRkYGUVFRjrEmT54MQP/+/Vm+fDmhoaGMHTv2Bl+d5AXngi5Arqy0izPD\n7vJj/oFjTE48TJf1e+gbXI4GpUsWdGkiIiIiIoXS/PkQEwMtW0Lbtnk3bkZGBkuWLOG5554DICYm\nhu3bt7Nq1Sosy6Jt27YsW7aMgwcP4u/vzw8//ADAsWPHHGM0bNiQb7/9lqVLl+Ll5eVYP23aNLy9\nvVm9ejVpaWlERETQsmVL3n33XUaNGsX333+fdxciN5RmwG9yxhj+VcEe0FbG1cagbfuY8OdB0jIU\n0CYiIiIi8nfMnw9PPgkTJ9r/nj//n4955swZQkNDqVChAgcOHKBFixaAvQGPiYmhdu3a1KlTh23b\ntrF9+3Zq1qzJokWLePXVV1m+fDne3t45xhs0aBDDhg3LsS4mJoZPP/2U0NBQ6tevz+HDh9m+ffs/\nL17ynRrwQqKyRwnerxlAOz9v5h04Ro+NCmgTEREREfk7YmLg9Gn7z6dP25f/qaxnwHft2oVlWY5n\nwC3LYsCAAcTFxREXF0dCQgLPPfccd9xxB+vWraNmzZoMGjSIN998M8d4zZo148yZM6xcudKxzrIs\nJkyY4Bhr586dtGzZ8p8XL/lODXgh4urkxAuVfXmn6sWAtm/2pXJBAW0iIiIiItfUsiV4eNh/9vCw\nL+cVDw8PoqOjGT16NOfPn6dVq1ZMnz6dkydPApCcnExKSgp79+7Fw8OD9u3bExUVxbp16y4Za9Cg\nQbz33nuO5VatWvHBBx+Qnp4OQHx8PKdOncLLy4sTJ07k3UXIDadnwAuheqVKMrVWIKN2HGBS4iFW\nHT1Nv+BylHHVr1NERERE5EratoUvvrgxz4AD1K5dm5CQEL744gs6dOjA1q1badiwIQCenp7MnDmT\nhIQEoqKicHJywsXFhQ8++OCScR544AF8fX0dy507dyYxMZE6depgWRa+vr589913hISEYLPZqFWr\nFp06daJ37955e0GS50xheL1VWFiYtWbNmoIu46ZjWRb/d+A4HyYewt1miAour4A2ERERESk2tm7d\nStWqVQu6DPmbLvd7M8astSwr7AqHFBm6Bb0QM8bQtoI3H4RUwsfVmUHb9hH950HOKqBNRERERETk\npqMGvAi41cOVCTUr8YhfKeYfOEb3jXvYoYA2ERERERGRm4oa8CLC1cnQrbIP71b15+T5C7y4cQ9f\n7VVAm4iIiIiIyM1CDXgRE1bKg6m1AgkrVZIPdx1iwNa9HD53vqDLEhERERERKfbUgBdB3i423ryz\nAr1u82XTibN0Wb+bX46cKuiyREREREREijU14EWUMYYHy3szqWYlfF2def2PfYz7M0UBbSIiIiIi\nIgVEDXgRlxXQ9qhfKb4/cJzuG/eQoIA2EREREZE8YbPZCA0NpUaNGrRp04bU1NTrGicyMpKwsItv\n4VqzZg2RkZFXPSYxMZHPP//8kvUbN24kNDSU0NBQypQpQ1BQEKGhoTRv3pzExETc3d0JDQ2lWrVq\ndOvWjQsXLlxxveQtNeDFgKuToWtlH0ZU9edUZkDb3L1HFdAmIiIiIvIPubu7ExcXx6ZNmyhTpgwT\nJ0687rFSUlJYsGBBrve/UgNes2ZN4uLiiIuLo23btowcOZK4uDgWL14MQJUqVYiLi2PDhg1s2bKF\n77777qrrJe+oAS9G6pbyYEqtQMJLlWTyrsP037qXQwpoExERERHJEw0bNiQ5OdmxPHLkSOrVq0dI\nSAhvvPEGAKdOnaJ169bUqlWLGjVqMGfOHMf+UVFRDB8+/JJxMzIyiIqKcow1efJkAPr378/y5csJ\nDQ1l7Nixf7teZ2dnGjVqREJCQq7Wyz+nBryY8XaxMTQzoG3zibM8v343Px85WdBliYiIiIjccBmW\nxaKDx7Ey/87IwztCMzIyWLJkCW3btgUgJiaG7du3s2rVKuLi4li7di3Lli1j4cKF+Pv7s379ejZt\n2sR9993nGKNhw4a4urqydOnSHGNPmzYNb29vVq9ezerVq5k6dSo7d+7k3Xff5e677yYuLo7evXv/\n7ZpPnz7NkiVLqFmzZq7Wyz+nBrwYygpo+yCkEuVLuPDGH/sZuyOFMwpoExEREZEi7L+HTjAiIYUW\nK3cwIiGF/x468Y/HPHPmDKGhoVSoUIEDBw7QokULwN6Ax8TEULt2berUqcO2bdvYvn07NWvWZNGi\nRbz66qssX74cb2/vHOMNGjSIYcOG5VgXExPDp59+SmhoKPXr1+fw4cNs3779umvesWMHoaGhRERE\n0Lp1a+6///6rrpe841zQBUjBCXR3JbpGADP2HObLvalsOH6G124vz+2ebgVdmoiIiIhInmvu48WI\nhJQcy/9U1jPgp0+fplWrVkycOJGePXtiWRYDBgyga9eulxyzbt06fvzxRwYNGsS9997L66+/7tjW\nrFkzBg0axMqVKx3rLMtiwoQJtGrVKsc4sbGx11Vz1rPeuV0veUcz4MWci5Ohy60+vFfNn9MZF3hp\nUxJzkhXQJiIiIiJFz+K/zHj/dfmf8PDwIDo6mtGjR3P+/HlatWrF9OnTOXnS/rhncnIyKSkp7N27\nFw8PD9q3b09UVBTr1q27ZKxBgwbx3nvvOZZbtWrFBx98QHp6OgDx8fGcOnUKLy8vTpzIu2uQG08z\n4AJAbW97QNvYP1OYuvswa44ai3bWAAAgAElEQVSd5tUq5fEpoX8iIiIiIlI0NMuc8W7u48XiQycc\ny3mldu3ahISE8MUXX9ChQwe2bt1Kw4YNAfD09GTmzJkkJCQQFRWFk5MTLi4ufPDBB5eM88ADD+Dr\n6+tY7ty5M4mJidSpUwfLsvD19eW7774jJCQEm81GrVq16NSp03U9By75y1iFYKYzLCzMWrNmTUGX\nUSxYlsWClONMSjyEi5PhldvK0bisZ0GXJSIiIiJyia1bt1K1atWCLkP+psv93owxay3LCrvCIUWG\nbkGXHIwxPFDemw9DKuFXwoUh8fsZo4A2ERERERGRf0wNuFxWgLsr42sE8Lh/KRakHOeFDXuIP3m2\noMsSEREREREptNSAyxVlBbSNrObP2QsX6KmANhERERG5yRSGR2rlouL++1IDLtcU6u3BlJBAGpYu\nydTdh+m3ZS8H084XdFkiIiIiUsy5ublx+PDhYt/UFRaWZXH48GHc3Irva48Vwia5ZlkWCw+eYOLO\ng7gYQ+8q5WiigDYRERERKSDp6ekkJSVx9qwelSws3NzcCAgIwMXFJcf64hLCpndMSa4ZY7i/3C3U\n9HLj7e0HeDN+P/eXu4XulX1wt+lmChERERHJXy4uLgQFBRV0GSK5pq5J/rYAd1eiawTwZMXSLEw5\nTrcNe9imgDYREREREZGrUgMu18XZyfBcYFlGVavIuQsWL29K4vPkI2QUgkcaRERERERECoIacPlH\nanm7M6VWJSLKeDJ99xGitiSTkpZe0GWJiIiIiIjcdNSAyz/m5Wxj8O3liapSjviTaXRdv4dlh08W\ndFkiIiIiIiI3FTXgkieMMbQqdwuTQypR0d2FN+P3MzLhAGcyLhR0aSIiIiIiIjcFNeCSpyq6uzKu\negBPVSxNzMET9oC2EwpoExERERERUQMuec7ZyfBsYFlGV88MaNucxOdJCmgTEREREZHiTQ243DAh\nt9gD2hqX8WT6niNEbU7mgALaRERERESkmLphDbgx5k5jTFy2P8eNMb2ybX/FGGMZY3xuVA1S8Lyc\nbQy6vTz9qpRj+yl7QFvsoRMFXZaIiIiIiEi+u2ENuGVZf1iWFWpZVihQFzgNfAtgjKkEtAR236jz\ny83DGEPLcrcwuVYgAe4uDNt+gPcSDnBaAW0iIiIiIlKM5Nct6PcCOyzL2pW5PBboB+ih4GLE382F\ncdUDaF+xNIsPnqDr+t1sVUCbiIiIiIgUE/nVgD8BfAFgjPkXkGxZ1vqrHWCMed4Ys8YYs+bgwYP5\nUaPkA2cnQ6fMgLYMC17elMRMBbSJiIiIiEgxYKwb3PgYY1yBvUB14ASwFGhpWdYxY0wiEGZZ1qGr\njREWFmatWbPmhtYp+e/k+QzG/3mQpYdPUtPLjf63l6d8CZeCLktERERERPKZMWatZVlhBV3HjZYf\nM+D3A+ssyzoAVAGCgPWZzXcAsM4YUyEf6pCbjKezjdduL0//4PLsOJ3G8+v3sFQBbVLE7d+/n1de\neaWgy8iV2NhYNmzY4FgePHgwt956K82bN8+x34wZM2jUqBERERGsW7cOgB07dlC3bl08PT1ZsWLF\nFc9x/PhxGjVqRGRkJOHh4SxZsgSATz/9lPDwcJo0acITTzxBWlraFcc4evQoLVu2pGnTpkREROSo\nOcuwYcOYMWPGJet79epFgwYNaNCgAe+++y4ASUlJNG3alLvvvpuIiAj++gXwxx9/jIuLviwUERGR\nvy8/GvAnybz93LKsjZZllbMsq7JlWZWBJKCOZVn786EOuQkZY2ju68WHIYEEurswfPsBRiQc4NR5\nBbRJ0VShQgVGjx59XcdmZGTkcTVX99cGvHv37ixdujTHPkePHiU6OprY2FhmzpxJz549AfDz82PR\nokU88sgjVz2Hp6cny5YtIzY2ltmzZ9O/f38AGjduzK+//sqyZcsIDAxk5syZVxxj1qxZRERE8L//\n/Y/hw4czfPjwXF9jjx49WLlyJb/88gvz5s1jx44deHl5MXfuXJYvX87UqVPp3bu3Y/+zZ8/y9ddf\nExgYmOtziIiIiGS5oQ24MaYk0AL45kaeRwo/fzcXxtUIoENAaZYcPEG3DbvZooA2KYISExNp3rw5\nmzdvJjw8nNatW/Of//yHIUOGXHb/2NhYWrVqxaOPPsrAgQPZs2cPrVu3plmzZrRu3ZqDBw9y+vRp\n7r//fpo2bUpkZCTx8fHExsZy77338thjj1GzZk3mzp0LcNnjjxw5Qr169UhJSWHLli00adKElJQU\nZsyYwfDhw4mMjCQjIwM/Pz+cnHL+Z2PVqlXcfffduLq6EhQUxIkTJ0hLS8PDw4MyZcpc8/NwcnLC\n2dkZsM+Gh4SEAHDbbbdhs9kAKFGiBM7OzqSlpdG4cWO2bdvG/v37CQ8P5+jRo1StWpXjx48D9i8E\nypUrB8CyZcuoXbs2bdq04bfffrvs+W+//fYcddhsNry9vR1jZJ07S3R0NN26dcMYc81rExEREfkr\n52vvcv0syzoFlL3K9so38vxSuNiMoWOlstT19uCdhAP02pREh4AyPBVQGpv+Z1eKmAEDBhAdHU2D\nBg3o0qXLVffdu3cv33//PS4uLjzxxBMMHjyYBg0aMG/ePEaMGMFTTz1F6dKlWbBgAQAXLlxg7969\npKamEhMTw4EDB2jbti2PPvooUVFRlxw/atQoRo8eTceOHTl+/DiffPIJ5cqVo1OnTgQHB9O+ffsr\n1nb48GFKly7tWC5VqhRHjhzBz88v159FcnIyjz/+OPHx8UyfPj3Htm3btrFw4UKWL19OiRIlmDZt\nGs888wze3t6MGzeO0qVLU7duXV5//XVq1KhBamqq45b3Pn36MG/ePCpVqkSrVq2uWsOsWbO47bbb\nqFy5smNdRkYGPXv2ZODAgYC9uV+2bBn9+vWjV69eub4+ERERkSz5lYIukms1bnFnckglIn08+STp\nCH02J7P/bHpBlyVyXebNgyeegPnzc65PSEigXr16ANSvX/+qY4SFhTmeOd64cSP9+/cnMjKSkSNH\ncujQIWrXrk3dunVp3749L7/8smM2ODQ0FJvNhr+/P6mpqVc8HqBJkyaOGejg4OBcX1+ZMmUcYwMc\nO3YsVzPf2VWsWJEVK1awatUqXnzxRcf6pKQkOnbsyOzZs3FzcwPgzjvvJCgoCIBGjRoB8N5779Gu\nXTs2bdrE3Llz6dGjB2CfUQ8MDMQYQ3h4OAArVqwgMjKSyMhITp48CcDixYv5+OOP+fDDD3PU1bVr\nV+6//37HM+/vvPMO/fr1+1vXJiIiIpKdGnC5KdkD2iowILg8O0+n8fyGPSw5qIA2KVzmz4fHHoM5\nc+Dhh2Hq1IvbqlSp4gj3Wr169VXHyboVG6B69eqMHTuW2NhYVqxYwZQpU0hLS6NPnz7MnDkTX19f\nPvvsM4DL3iZ9ueMBpk2bRnh4OAkJCY66XF1dOX/+/FVrq1+/PitWrCA9PZ3du3fj6elJiRIlrv3h\nZMoernbLLbfg5eUFwKFDh2jXrh0ffvghVapUceyzaNEi0tPT8fHxYX7mtxqWZeHj4wNAuXLlOHLk\nCABeXl4kJSUBFz/jxo0bExsbS2xsLJ6envz2228MHjyYr776Cnd3d8d5+vbti5+fX44vBOLj43n7\n7be577772LdvH48//niur1NEREQEbvAt6CL/1L2+XlTzcuPdhAO8k3CA1amneTHIB09n27UPFilg\nMTFw7pz954wMeOEF6NwZzp+Ht99+m2effRYfHx+8vb259dZbczXm6NGj6dGjh2P29tlnn6VatWr0\n7NkTZ2dnLly4wCeffMKuXbtyfXxYWBgzZsxgyZIlpKSk0K5dOxYvXkyLFi3o1asX33//PV9++SWT\nJk1i9uzZbN26lebNmzN58mSqVKlC9+7dadq0KcYYxo8fD9hnnx9++GG2bNnC5s2beeCBBxg6dOgl\n9WzatInevXtjs9k4f/4848aNA2DIkCEkJyc7AtA6dOhAmzZtGDhwID/99BPOzs40b96cOnXq8NJL\nL9GhQwemT5/OmTNnGDFihONa27Rpg7+/v6Ox/6vnnnsOgH//+9+OYyzLYvz48URERBAZGYmvry9z\n587lu+++cxwXHBzMnDlzcvU7ExEREclyw98Dnhf0HnDJsCxmJR1lZtIRypVwZsDt5anu5X7tA0UK\n0Pz58OSTcPo0uLlBw4YQGwtlysCgQen06OGCiwt06dKFVq1aXTMxXERERKSoKi7vAVcDLoXK5hNn\neGf7AVLSztM+oAxPK6BNbnLz59tnwlu2hLZtYd066NsXli5dh5vbywQGnqdOncp89tlnvPbaa6xa\ntcpxrKurKzExMQVYfd4aM2aM47bxLN98883ffmZcREREih414DcRNeCS3anzF5iw8yCLD52gupcb\n/YPL4+fmUtBlieSaZcGPP0JUFGzdCnffDaNHQ2Ymm4iIiEixU1wacIWwSaFT0tmJ/reX57Xby7Pz\n9Dm6KqBNChljoHVr2LABPvwQ/vgDwsPhqacgMbGgqxMRERGRG0UNuBRazXy8mBJSiSAPV95JOMDb\n2/dz8nxGQZclkmvOztC1K2zfDgMHwrffwl13wauvQrY3e4mIiIhIEaEGXAq1Cm4ujKlekU6VyhB7\n6CRdN+xh0/EzBV2WyN9yyy0wbJi9EX/iCRg5EoKDYcIESE8v6OpEREREJK+oAZdCz2YM7QPKMK5G\nRQzQZ3Myn+w5TEYhyDcQyS4gAGbMgLVroVYt6NkTqle3z4zrn7OIiIhI4acGXIqMal7uTA4J5F5f\nLz5LOkqvTUnsPavpQyl8ateGxYvh++/tt6k//DA0bQrZAtJFREREpBBSAy5FSklnJ14NLs/A28uz\n+0w63TbsZtHB4xSGtH+R7C4X1Fa/voLaRERERAozNeBSJN3j48WUWpWo4lGCEQkpvL39gALapFDK\nCmpLSIBBg+C77+DOO6FfPwW1iYiIiBQ2asClyCpfwoVR1SvyTKUy/O+wPaBtowLapJDy8oK33oL4\neHjySRg16mJQ27lzBV2diIiIiOSGGnAp0mzG8HRAGcbXCMCG4ZXNyXy8+zDnL+iWdCmcLhfUVqOG\ngtpERERECgM14FIsVPVy48NalWjh68Ws5KP02qyANincsoLafvgBXFzsQW1NmiioTURERORmpgZc\nig0PmxNRweUZdHt59pxJp+v63cSkKKBNCi9j4IEHYP16mDzZfnt6/fr2W9QV1CYiIiJy81EDLsVO\nZGZA2+0lS/DejhSGbz/ACQW0SSHm7AzPP38xqG3ePAW1iYiIiNyM1IBLsVS+hAsjq1fk2UplWH7k\nJM+v38MGBbRJIZc9qO2pp+xBbVWqQHS0gtpEREREbgZqwKXYshnDUwFlGF89AFcne0DbdAW0SREQ\nEAAffwzr1tmfFX/5ZaheHb75RkFtIiIiIgVJDbgUe3d5ufFhSCVa+XrxefJRXt6URPIZTRdK4Rca\nCosWwY8/gqsrtGtnD2r77beCrkxERESkeFIDLgK425zoG1yewXdUIPlsOl037GGhAtqkCDAG7r//\nYlDb9u3QoIE9qG3nzoKuTkRERKR4UQMukk3Tsp5MqVWJOz3dGLUjhbcU0CZFRFZQ2/btMHiwPajt\nrrsgKgqOHi3o6kRERESKBzXgIn9RroQL71Xz57nAsvycGdC2/pgC2qRo8PKCN9+0N+JPPw2jR0Nw\nMIwfr6A2ERERkRtNDbjIZdiM4cmKpYmuYQ9o67slmWkKaJMipGJFmD79YlBbr14KahMRERG50dSA\ni1zFnZ72gLb7yt3CF8lH6bkpiSQFtEkRcrmgtrvvVlCbiIiIyI2gBlzkGtxtTrxSpRyv31GBfWfT\n6bZhDwsU0CZFSPagtilTICHBHtT2xBMKahMRERHJS2rARXKpSVlPptQK5C5PN0bvSOHN+P0cT1dA\nmxQdzs7QpcvFoLb58xXUJiIiIpKX1ICL/A2+JZwZUc2fLoFl+eXoKZ7fsJu4Y6cLuiyRPKWgNhER\nEZEbQw24yN9kM4bHK5ZmQo0A3JyciNqyl6m7DpGugDYpYrKC2n7/HerUsQe1VasGX3+toDYRERGR\n66EGXOQ63eHpxgchlbi/3C3M2ZvKy5uS2KOANimCatWCmBhYsADc3OCRRxTUJiIiInI91ICL/APu\nNif6VCnHkDsqsC8tnRc27OHHA8cU0CZFjjFw330QF6egNhEREZHrpQZcJA80LuvJ1JBAqnq5MebP\ngwyN388xBbRJEZQV1JaQAK+/fjGorW9fBbWJiIiIXIsacJE84lPCmRFV/Xn+1rKsPHqKrht287sC\n2qSI8vSEoUPtQW3t28OYMVClCowbp6A2ERERkStRAy6Sh5yM4TF/e0Cbu5MT/bbsZYoC2qQIq1gR\npk2zB7WFhUHv3gpqExEREbkSNeAiN8DtmQFtrcvfwpd7U+m5KYndCmiTIqxWLfjpp5xBbY0bw8qV\nBV2ZiIiIyM1DDbjIDeJmc6LXbeUYemcFDmQGtH2vgDYpwrIHtU2dCn/+CQ0bwuOP238WERERKe7U\ngIvcYBFlPJlSK5DqXm6M+/MgQ/5QQJsUbc7O0Lmz/fnwN96A77+3B7W98oqC2kRERKR4UwMukg98\nXJ15t6o/XW8ty2+pp3h+/W7WpiqgTYo2T08YMgTi46FDBxg7VkFtIiIiUrypARfJJ07G8Kh/ad6v\nWYmSzk68unUvkxMPcU4BbVLEZQW1xcXlDGr76isFtYmIiEjxogZcJJ8FlyzBpJqVaFP+FubuS+Wl\njXsU0CbFQkgIxMTAwoXg7g6PPgoREfDrrwVdmYiIiEj+UAMuUgDcbE68fFs53rzTj0PnziugTYqV\nVq0uBrXt3AmNGimoTURERIoHNeAiBahRmZJMqRVIjcyAtjcU0CbFhM125aC2I0cKujoRERGRG0MN\nuEgBK+vqzDtV/Xmhsg+rU0/RRQFtUoxkBbVt3w7/+Y89qC042P53WlpBVyciIiKSt9SAi9wEnIyh\nnV8p3q9ZCS9nG69u3cuHCmiTYsTfHz76yH5rer160KePgtpERESk6FEDLnITqVKyBJNqBtC2vDdf\nZQa07TqtgDYpPkJC4Kef7EFtHh4KahMREZGiRQ24yE2mhM2Jnrf5MuyuiwFt8/YroE2Kl6ygto8+\nuhjU9thjsGNHQVcmIiIicv3UgIvcpBqULsnUWoGE3OLOhJ0Hef2PfaQqoE2KEZsNnnvO/nz4kCHw\nww9Qtar99nQFtYmIiEhhpAZc5CZWxtWZt6v60b2yD2tST9Nl/W5Wp54q6LJE8pWnpz0pPSuobdw4\nBbWJiIhI4aQGXOQm52QMD/uVYmLNSng72xiwdR+TEg9y7sKFgi5NJF9lD2oLD78Y1DZ3roLaRERE\npHBQAy5SSNxWsgQTawbwrwrefLPvGC9uTCLxtKb/pPgJCbGHtC1cCCVL2p8Nj4iAX34p6MpERERE\nrk4NuEghUsLmxEtBvgy/y48j6Rl035DEvP2pCmiTYqlVK/j994tBbRER9tR0BbWJiIjIzUoNuEgh\nVL90SaaGVCLU250JOw8xaNs+jqafL+iyRPLdX4PafvxRQW0iIiJy81IDLlJIlXZ1ZvhdfvSo7MO6\nY2d4fv0eVh1VQJsUT5cLaqtSBcaMUVCbiIiI3DzUgIsUYsYYHvIrxcSaAXg723ht2z4m7lRAmxRf\n2YPa6teHV16xz4h/+aWC2kRERKTgqQEXKQKyAtoequDNt/uP0X1DEjsV0CbFWPagNk9PePxxaNRI\nQW0iIiJSsNSAixQRJWxO9Ajy5e27/Eg9bw9o+3afAtqkeMsKaps2DXbtUlCbiIiIFCw14CJFTHjp\nkkytVYna3u5MTDzEwG37OHpOAW1SfNls8OyzEB+voDYREREpWGrARYqg0i72gLaXgnyIO3aGLuv3\nsFIBbVLMZQW1JSRAx44wfryC2kRERCR/qQEXKaKMMfyrQikmhQRQ2tXGoG37mLDzIGkZCmiT4s3P\nD6ZOtQe1NWigoDYRERHJP2rARYq4yh72gLaH/byZt/8YPTYm8ecpTfeJ1KwJCxbATz8pqE1ERETy\nhxpwkWLA1cmJ7pXtAW3HzmfQY2MS3yigTQSAli0V1CYiIiL5Qw24SDFiD2gLpG4pdyYlHuK1bfs4\nooA2EUdQ2/btMHSofWa8alXo3VtBbSIiIpJ31ICLFDOlXGy8dacfPYN8WX/sDM8roE3EoWRJeP11\neyPesSNERyuoTURERPKOGnCRYsgYQ9sK3nwQUomyWQFtfyqgTSSLgtpERETkRlADLlKM3erhyoSa\nlWjn5828A8fovjGJHQpoE3HIHtTm5XUxqO3nnwu6MhERESmM1ICLFHOuToYXKvvyblV/TpzP4MWN\ne/hqbyoXNM0n4tCyJaxbB9On24PaGjeGRx6xv1NcREREJLfUgIsIAGGlPJhSK5CwUiX5cNchBmzd\ny2EFtIk42GzwzDMXg9oWLoRq1exBbYcPF3R1IiIiUhioARcRh1IuNt68swIvB/my6cRZnl+/m1+P\nKKBNJLvsQW2dOtmD2oKDYfRoBbWJiIjI1akBF5EcjDG0qeDNpJqV8HF1ZvAf+xj/ZwpnFdAmkoOf\nH0yZAuvXQ8OG0LevPahtzhwFtYmIiMjlqQEXkcvKCmh71K8U/3fgON037iFBAW0il6hRA378EWJi\n7EFtTzxhb8gV1CYiIiJ/pQZcRK7I1cnQtbIPI6r6c+r8BV7auIev9h5VQJvIZbRocTGobc8eBbWJ\niIjIpdSAi8g11c0MaKtXqiQf7jrMgK17OaSANpFLZAW1xcfDm29eDGrr1UtBbSIiIqIGXERyydvF\nxtA7K9DrtosBbT8fOVnQZYnclEqWhMGD7bPfzzwDEyZAlSowapSC2kRERIozNeAikmvGGB4s780H\nIZUoV8KFN/7YzzgFtIlcUYUKMHmyPaitUSOIioK77lJQm4iISHGlBlxE/rZAd1eiawTwmH8pvj9w\nnBc27GG7AtpErih7UNstt1wMaluxoqArExERkfykBlxErourk+H5W314r5o/pzPsAW1fKqBN5Kqy\ngto+/tge1Hb33dCunf2d4iIiIlL0qQEXkX+kjrc9oK1B6ZJM2XWYV7fu5VCaAtpErsRmg06dLga1\n/fSTgtpERESKCzXgIvKPebvYeOOOCvS5zZetJ87SZYMC2kSuJXtQ27PP5gxqO3u2oKsTERGRG0EN\nuIjkCWMMD2QGtPllBrSN3ZHCGQW0iVxVVlDbhg0QEWEPaqtaFWbPVlCbiIhIUaMGXETyVCV3V8bX\nCOBx/1L8mHKc7hv2sP2kpvNErqV6dfjhB1i0CLy94cknoUEDBbWJiIgUJWrARW6QGTNmMGzYsIIu\no0C4OBm63OpD75LpJCyN4aVNScxJPsrX33xD1apVcXNzy7H/unXriIiIoFGjRsyYMcOxvlWrVvj6\n+l7zc+zQoQORkZGEhYUxduxYAH7//XciIiJo0qQJzZo1488//7xm3fHx8bi4uLDiMh3PzJkzGTJk\nyCXrZ8+eTePGjWnSpAkPPvggx48fByAxMZFmzZoRERHB22+/7dh/4cKFNGzYkIYNG/LTTz9dto7o\n6GjHz3FxcSxbtuyatUvR0bw5rF1rD2pLSlJQm4iISFGiBlykGMnIyMjX83kcOUDQppU0LF2SqbsP\ns7T8HcSsXE1AQECO/V566SVmzpxJbGws0dHRHD16FIBp06YxcuTIa55n2rRpxMbGsnLlSiZNmsSJ\nEyfw8/Nj4cKFLFu2jL59+/LGG29cc5y33nqLpk2b/q1rfPjhh1mxYgXLli2jTp06fPbZZwD079+f\noUOH8vPPP/Pf//6Xbdu2kZGRQb9+/ViwYAELFiygX79+l/2dqAGXrKC27dvhrbcuBrW9/DIcOlTQ\n1YmIiMj1UgMukgcyMjJ46qmnaNq0Kf379yc4ODjH9uzLnTt3JjY2FoChQ4fSsGFD6tevzw8//ADA\nkCFDePrpp2nbti2hoaFs27btsueMjY0lPDyce+65h2eeeQaAjRs30rx5c5o1a8Zjjz3GmTNnALj1\n1lvp3r07//rXv0hPT6dz587cc889NG7cmFWrVgHQt29fGjZsyD333MOcOXMACAwMpGvXrjRo0IC+\nffsCXPZ4y7Jo27YtsbGxnD59moYNG7Jz507GjBnDogU/srTrk7Q9kUyiszs9/0jh7IWLD7ampaXx\n/+zdeXhMd/vH8ffJvpCEoAhRGpSIpSKWiEwQqmqpvdYQoY9WS6lqeRCUVktptQ/VoAS1tlXhV1rG\n2tpDLLXUrraQheyZ+f7+mGQqJGhLMpH7dV2uy0zOOXPPTKrzmft77pOUlETlypWxs7MjICDAXNO9\nQT0vdnZ2AKSmpuLp6YmTkxNly5alePHiANjb22NjYwPAkCFDWLRoEUajkdatW7N7924Adu/eTdmy\nZXM85rFjx/Dz86Nt27asXbv2gY8NkJSUhLe3N2AKzgEBAQC0bduWrVu3cvr0aSpXroybmxtubm48\n++yznD59OsfxZsyYweXLl9HpdERERDBjxgwiIiLQ6XTm+4cMGULLli15+eWXuXNHht09zZycYOxY\n06C20FCYPRu8vODjj2VQmxBCCFEYPbEArmladU3Tou/6k6hp2jBN0z7WNO13TdMOa5r2naZpbk+q\nBiHyyw8//ICLiwtbt26lXbt2ZGY+/DJc0dHRbN++nV27dvHTTz8xfPhwjEbTwLLSpUuzdu1aRo0a\nxddff53r/mvWrGHy5Mls2bKFiIgIAF5//XXmz5/P5s2b8ff3N99/5coVRo8ezbp164iIiMDLy4st\nW7awevVqhg8fDsCGDRvYvn07W7ZsoWvXrgBcv36d8PBwfv31V9atW0diYmKu+2uaRkREBO+88w6h\noaEMHz6cypUr8/bbb9O2bVv0ej1vtgpkTu2KlHOw5VZ6JtOzBrTdvHkTN7e//hlwc3Pj1q1bf/s9\n6Nq1K1WqVKFp06ZYW2IsWb0AACAASURBVFub709KSmLs2LG88847gCngzpkzh//85z+0aNGChg0b\nAvDBBx8wevToHMd87733mDVrFlFRUbi6uub52BEREfj4+LB9+3ZzAM9+L+9+Tjdv3qREiRIPfK5v\nv/02Hh4e6PV6QkNDefvttwkNDUWv1+Ph4QFAQEAAP//8M40bN87z90M8XcqWhTlz/hrUNmqUDGoT\nQgghCqMnFsCVUieUUnWVUnWB+kAy8B2wCaillKoNnATee1I1CJFfTp06RYMGDQBo2LAhmqblua3K\n+rR84sQJGjVqhKZpuLm5UaZMGWKz1pbWr18fMHWgb+ZxYeB33nmHtWvX0qtXLxYsWADA0aNH6du3\nLzqdjmXLlnH16lUAPDw88PT0BExd8uXLl6PT6ejevTsJCQkAfPjhhwwYMICQkBCOHz9u3q9s2bJo\nmkaFChWIi4vLc//SpUvTqlUrDh06RLdu3XKtuULWgLZiNlb83/VEXjt8kfVJivj4eDbdSMSgFAkJ\nCZQsWfIRX/m/rFy5knPnzhEVFcWxY8cAU7e+e/fuvPvuu9SsWRMABwcH+vfvz4oVK3jzzTcBiIqK\nwtfXF3d39xzHPHXqFH5+fgDmoH769Gl0Oh06nc7cvQ4NDSUmJoYuXbqYl8xbWf31z2v2cypZsiTx\n8fH33T927Fh0Oh1jx459pOd6d00nTpz4ey+UKNTyGtS2fXtBVyaEEEKIR5FfS9BbAH8opc4rpTYq\npbLbg78Bj7bGVAgL5uXlxb59+wDYu3evOWRnc3V15erVqxgMBqKjowGoVq0av/32G0qZAuj169cp\nVaoUQI4Af++xsrm7uzN79mwiIyP58MMPSUxMpFatWixbtsx8PvS4ceMAcnSEvb296du3L3q9Hr1e\nz4EDB1BK0bJlSxYtWsTAgQPN+937RYJSKtf9AY4cOcKuXbto3769+RxmOzu7+1YD2FppuNhY83HN\n8iRmGvg2NoVzRmvCd0Tz05832bFjhzlgPgqlFOnp6YApXDs6OuLo6IjRaKR379507NiRjh07mre/\ncuUKERER/Pe//+X9998HTKsR9Ho9L774Ips2bWLkyJGcP3/+vvcVTO919nP38vIi9a51wG5ubjg5\nOQFQp04ddu3aBZhWFzRr1oyqVaty9uxZEhMTSUxM5OzZs3h5eTF58mT0er152Nzd4T231/DumqpV\nq/bIr5V4emQPalu4EC5fhmbNoFMnOHmyoCsTQgghxIPY5NPj9ACW5XL/AGB5bjtomjYIGASYO3dC\nWKqOHTuycuVKAgMDadCgAfb29jl+PmrUKIKDg/H29qZMmTIA1KtXjyZNmtC4cWOMRiPTp0/PEbwe\nZsaMGWzcuBGj0UhwcDAuLi588cUXhISEkJGRAZiWUAcHB+fYLywsjKFDhxIUFASAr68vU6ZMoU2b\nNoDpPOrsAJ6b3PafOHEigwYNIjIyEk9PT1q1akVAQAA+Pj788ccfdOnShfHjxxMfH094eDh//vkn\nIzu3p9+gwXxTsR7Vho3lyPjh9NWgxat9iDHa8kKmgeH/eY1du3aRlpbGvn37+P777++rJzMzk1at\nWgGQnp5Ot27dqFy5MqtWrSIqKopr164RGRmJj48Ps2bNon///sycOZNGjRrRo0cP1q9fz5gxYxgz\nZgwAISEhDBw4kEqVKjFlyhQGDBiAu7u7+cuRe3388cf88ssvAJQsWZL58+cDMHXqVEJDQ0lPT6dN\nmzbUqFHDfH/r1q3Nf7/7y5FsjRs35pVXXqF79+74+/sze/Zsjhw5wuzZswH49ddf+eqrr7Czs2PF\nihV5vlfi6WZtDf36Qdeu8Omn8OGH8OOP8J//wLhxkMevrBBCCCEKkJZXd+2xPYCm2QF/At5KqWt3\n3T8G8AU6qYcU4evrq7I7PkJYqoyMDGxtbdm5cydTp05l3bp1BV2Sxdt0I5GPTl83367ubM+l1AyS\nDEasAO/iDjQo4UQDN2e8nOweuLS/qNDpdERGRj7ygDpRdFy9ChMmwLx5ULw4jBkDQ4fCPVf9E0II\nISySpmn7lVK+BV3Hk5YfHfA2wIF7wncI8DLQ4mHhW4jCokePHsTGxpKWlsbcuXMf67FHjRplngwO\npmXJGzdufKyPURCalzJNKW9Zqjg/x9423z52O5W98cnsiU9m/oVbzL9wi5K21pQ4foDdc2ZS3MYK\n66wwPm7cOJo3b15gz0EIS5E9qO3NN01D2kaNgi++gKlToXt3+BsLbIQQQgjxhORHB/xb4Cel1IKs\n2y8CM4BApdSNRzmGdMCFKLpupWeyLyuM749P5nZWd7xGcQcauDnh5+aEl7M9VtIdFyKHX36BkSMh\nOhoaNIDp0yHrynhCCCGExSkqHfAnGsA1TXMGLgBVlFIJWfedBuyB7NHOvymlXnvQcSSACyEADEpx\n4k4qe+JMgfxkUhoAbrbW+Lo64VfCifquTrja3n9etRBFkdEIixeblqNfvgyvvGI6V1xm9wkhhLA0\nEsAtiARwIURu4jIy2R+fwp74JPbFJ5OYaUQDqhezx8/NGT83J6oWszcvVxeiqEpO/mtQW2qqDGoT\nQghheSSAWxAJ4EKIhzEoxck7aVnnjidx4k4aCnC1saK+mxN+bs74ujnhJt1xUYRdu2Ya1PbVV1Cs\nmKkz/uabMqhNCCFEwZMAbkEkgAsh/q6EDAP74pPZG5/Mvvhk4jMNaEA1Z3vTueMlnKhezEG646JI\nOnbMNKQtKgoqVZJBbUIIIQqeBHALIgFcCPFvGJXiVJKpO743Ppnjt1MxAsVtrPB1daKBm+lPCbv8\nuDCEEJbj3kFtn3wCzZoVdFVCCCGKIgngFkQCuBDicUrMMLA/IdkcyOMyDABUzeqON3BzomZx6Y6L\nosFohMhIeP9906C2jh3ho49kUJsQQoj8JQHcgkgAF0I8KUal+CM5nb1xSeyNT+ZoVne8mLUVL2Rd\n5szXzYlS0h0XT7nkZJg507QcPTUVXnsNxo+XQW1CCCHyhwRwCyIBXAiRX+5kGjiQkMKerEB+M6s7\n/pyTXVZ33Bnv4g7YWEl3XDydsge1zZsHzs4yqE0IIUT+kABuQSSACyEKglKKs8np7MmarH70dioG\nBU7WVrzg6ohfViAvbS/dcfH0OXYM3n0X1q0DT09TZ7xHDxnUJoQQ4smQAG5BJIALISxBUqaRgwnJ\n7Mk6d/xGeiYAzzra4VfCtFzdu7gjttIdF0+RzZtNg9oOHgRfX5g+XQa1CSGEePwkgFsQCeBCCEuj\nlOJcSrppkFtcMjG3U8hU4GilUc/VdJmzBm5OPGNvW9ClCvGv3TuorUMHmDZNBrUJIYR4fCSAWxAJ\n4EIIS5dsMHXHsyerX0szdcc9HW3xc3OmgZsTPi6O2El3XBRiuQ1qGzcOSpcu6MqEEEIUdhLALYgE\ncCFEYaKU4kJKBnvjTYPcDiemkKHAwUqjnqsjvm7O+Lk5Uc5BuuOicLp3UNv778Nbb8mgNiGEEP+c\nBHALIgFcCFGYpRiMHEpMYU+caZjb1azueEUHW9Nk9RJO1HFxxE6mW4lC5t5BbVOmwKuvyqA2IYQQ\nf58EcAsiAVwI8bRQSnE5NcM0WT0umUOJKWQohb2VRh0XRxpkXXvcw9GuoEsV4pHdO6jtk08gMLCg\nqxJCCFGYSAC3IBLAhRBPq1SDkcOJKebJ6pdTMwDwyO6Ou5m64w7W0lIUls1ohCVLTMvRL10yDWr7\n6COoXr2gKxNCCFEYSAC3IBLAhRBFxeWsyep74k3d8TSjwk7TqO3iaJ6sXsHBFk2TYW7CMt09qC0l\nxTSobfx4GdQmhBDiwSSAWxAJ4EKIoijdaORwYip74pPYG5fMxazueFl7G/zcnPHLOnfcUbrjwgJd\nuwbh4fDVV38NanvzTXB0LOjKhBBCWCIJ4BZEArgQQsCV1Iysy5wlcTAhhVSjwlYDHxdH/NycaODm\njKejdMeFZTl+3DSo7ccfZVCbEEKIvEkAtyASwIUQIqd0oyImMYV9WcvVz6ekA/CMvY353PF6rk44\nSXdcWAgZ1CaEEOJBJIBbEAngQgjxYNfSsrrjcckcSEgmxaiw0aBW8azJ6iWceNbRTrrjokDJoDYh\nhBB5kQBuQSSACyHEo8swKo7e/muy+tlkU3e8tJ0NvlmXOXvB1QlnG+mOi4KRkvLXoLbkZBnUJoQQ\nQgK4RZEALoQQ/9yNtEz2xiexJz6ZAwkpJBuMWGvgXdyBBm7O+Lk5UcVJuuMi/12/DhMmyKA2IYQQ\nEsAtigRwIYR4PDKNimN3UtkTl8Te+GT+yOqOu9tam84dL+FMfVdHitlYF3CloiiRQW1CCCEkgFsQ\nCeBC/DsXtuzk2OLV5ts1+3TGM8i/ACsSliI2PdM8yO1AfDJ3DEasgJrFHbImqzvxnLM9VtIdF/lg\nyxbToLYDB6B+fZg+XQa1CSFEUVFUArhNQRcghHjykq/FcvSblWA0ollb82wr+UQrTErZ2fBiGRde\nLOOCQSmO3041nzs+/+It5l+8RYns7ribE/VdnXCxle64eDKCgmDvXli61LQcXaeD9u1h2jQZ1CaE\nEOLpIAFciCLAq1MbNEABVrY2WNvbYczMxMpG/gkQf7HWNGq5OFLLxZEBnu7cSs9kX4JpsvqvcUls\nvHEbK+D5Yg74lXDC182JatIdF4+ZlRX07g2dO/81qM3bWwa1CSGEeDrIEnQhioj9M79GP3w89m6u\npMUn4FzuGWoN6I5P6Ku4VvYs6PKEhTMoxYk7qeyNT2ZPXDInk9JQgJuNNb5Z3XFfNydcpTsuHrPr\n1yE8HObOBScnU2f8rbdkUJsQQjxtisoSdAngQhQRRoOB6C+/ofagXpz7Pz0x85ZwdsMWlNFIpeBm\n+IT1xKtDa6zt7Aq6VFEIxGcY2BefzN74JPbFJ5OQaUQDqhezN1133M2ZasXssZbuuHhMfv/dNKht\n7VqoWNE0qK1nTxnUJoQQTwsJ4BZEArgQT0bixcscXbCCmIhl3L5wGcfS7nj364pPWE9KVnuuoMsT\nhYRBKU7dScs6dzyJ3++YuuMuNlbUd3UyL1cvYSunPIh/T6+HESP+GtT2ySemc8WFEEIUbkUlgMv3\nxkIUYS4VPWg8bjgDz/xKp/WL8Wjqx4GZX7OgejOWB3bm+JI1ZKamFnSZT72FCxcyefLkgi7jH7PW\nNJ4v7kDfiiX53Kciq3wr837VZ/Bzc+ZgYgofnb5O133nGHL4Igsu3OTo7RQM93z5e+7cOdauXWu+\n/d1331GjRg0cHBxybHfgwAH8/f1p0qQJCxcuNN/funVrSpcu/dDXsU+fPuh0Onx9ffn0008BOHjw\nIP7+/jRr1ozmzZtz5syZhz7nkydPYmtry44dO+77WWRkJBMmTLjv/mnTptGwYUP8/f0ZOnQo2V+A\nx8bG0r17d5o3b06rVq3M2y9cuJAmTZrg7+/PgQMH7jtefHw8ixYtMt/W6/UcPnz4obUXdjqdaVDb\n4sWm5elBQdChg6lDLoQQQlg6CeDisbv7Q+HVq1dp3LgxQUFBpKenP/Ix3njjDZo1a8batWuJjIzE\nz8+PiRMn8uGHHxITE5Pnfr169fpHNX/22Wf/aL9H2dfLyyvPnz3oQ/zJkydp0qQJOp0Of39/Dh06\nBMCZM2do1qwZOp2OoKAgLl26BJgCTPPmzfH392fKlCl/6zlYWVtTuU1zOqz5mkEX99J06nvcuXyV\n9b2HMrd8fTa/NY7YI/LptigxGAz/eF9XW2ualyrO6KrPsKL+s3zpU4GQiiWxtdJYdjmOt45cpsve\ns0w6eZWfridyKz3zvgDerFkzDh48SIUKFXIce+jQoURGRqLX6/nss8+Ii4sDICIigo8//vihtUVE\nRKDX6/ntt9/48ssvuX37NuXKleP//u//2LZtGyNHjmT8+PEPPc6kSZMI/JvXx3rllVfYvXs3O3fu\n5Nq1a2zevBmAYcOGMW7cODZv3szGjRsBiIuL47PPPkOv1xMZGcmbb7553/GKagCHvwa1nThhGtK2\nZQvUqgWvv24K5UIIIYSlkgAuHru7PxRu2bKFVq1asWXLFuz+xrnFGzduZNu2bbRv357FixezfPly\nxo0bx+jRo/Hx8clzvyVLlvyjmp9kAH+QB32Ir1KlCjt37kSv1zNp0iRzZ+/LL78kNDQUvV5Pv379\n+PzzzwEYPXo04eHh7Ny5k82bN/P7P2wHOZctQ8PRbzDg5Ha6/rKcSq0DOTxnMd/4tGBpk/YcWbCc\njKTkf/aEBQaDgZ49exIYGMjo0aPv+4Lm7tsDBw5Er9cDEB4eTuPGjWnYsCFRUVEATJgwgV69etG+\nfXvq1q2b53uu1+vx8/MjKCiI/v37AxATE0PLli1p3rw53bp1IyUlBYBKlSoxZMgQOnToQEZGBgMH\nDiQoKIimTZuyZ88eAEaOHGn+Ym358uUAeHp6MnjwYBo1asTIkSMByMjIYFBYGIPbtWFOj/b0Sv6T\nlfWfJSX8TZ45Fc3B67fo1jyQdj9uJ2TCB6xa+yMNApqxZ98+3N3d7+t+p6WlkZSUROXKlbGzsyMg\nIMBc071BPS/Z/w6lpqbi6emJk5MTZcuWpXjx4gDY29tjk3V1gCFDhrBo0SKMRiOtW7dm9+7dAOze\nvZuyZcvmeMxjx47h5+dH27Ztc3yRcLeqVaua/579OAaDgSNHjjB9+nQCAwP58ssvAdizZw8BAQHY\n2dlRuXJlbt++TVpaWo7jzZgxg/3796PT6ViyZAkLFy7kgw8+QKfTYTAY8PLyYvjw4QQGBtK7d2+M\nRuMjvUaFiaMjjB4Np0+bpqTPnQteXqZQnvUrLYQQQlgUCeDiscv+UFi1alXGjRvHokWLGDhwYK7b\nbt26lcDAQHQ6Ha+99hpKKYYOHcrFixfR6XTMnTuX3bt307NnT1atWkVISIi5Wzxr1iwaNmxIUFAQ\n33zzDfBXeElISKBbt260aNGC5s2bc/r0aQB0Oh3Dhg2jVatWtGjRgrS0NGbMmMHly5fR6XRERESw\ncOFCOnbsSKdOnahVqxbbt28Hcg8s9+6bl9w+BOf2If5uNjY2aFkDrBITE6lduzYA3t7exMfHA6Yu\nWZkyZQCIjo4mICAAgLZt27J169ZHebvypFlZ4dm8KS8v+5JBl/cTOH0caXEJ/DTgbeaUf4Gf/zOa\nawfyXo0gcvfDDz/g4uLC1q1badeuHZmZmQ/dJzo6mu3bt7Nr1y5++uknhg8fbv49Kl26NGvXrmXU\nqFF8/fXXue6/Zs0aJk+ezJYtW8y/p6+//jrz589n8+bN+Pv7m++/cuUKo0ePZt26dURERODl5cWW\nLVtYvXo1w4cPB2DDhg1s376dLVu20LVrVwCuX79OeHg4v/76K+vWrSMxMTHX/V3tbPh+8TfsnzkV\nuy8mMWnUSF5vVAfffoNwbhSI67T5fGAsycQTV9hwLRHDXSvVb968iZubm/m2m5sbt27d+tvvQdeu\nXalSpQpNmzbF2vqvqe1JSUmMHTuWd955BzD9WzZnzhz+85//0KJFCxo2bAjABx98wOjRo3Mc8733\n3mPWrFlERUXh6ur6wMffunUrV65coVmzZly/fp2YmBjeeustNm3axNKlSzl+/Dg3b96kRIkSD3yu\nb7/9NvXr10ev19OrVy9CQkIYM2YMer0ea2trMjMz6datG1u3bsXR0THPLwaeBmXKwOzZcOSIaUn6\n+++brhseGQlP4fcOQgghCjEJ4OKxy/5QeOrUKcaMGUNoaGiuwUApxbBhw1i7di16vR5HR0eioqL4\n/PPP8fDwQK/XM3jwYOrWrcvKlSvp0qWLed8jR46wZs0adu7cyZYtW+jdu3eOY0+dOpVOnTrxyy+/\n8Omnn+b4sKzT6di4cSPPPfccmzZt4u233zY/XmhoqHm7NWvW8NVXXzFr1iwg98CS1753y+tDcG4f\n4u+1f/9+GjduzOuvv24+N7Rly5bMnTuX2rVrM2fOHPOXG3d3t/5pMMmLU6mS+L49mJBjerpv/w6v\njq05unAlkfVfZHH9Fzk0ZxFpibcf2+M9zU6dOkWDBg0AaNiwoflLltxknyN84sQJGjVqhKZpuLm5\nUaZMGWJjYwGoX78+YOpA37x5M9fjvPPOO6xdu5ZevXqxYMECAI4ePUrfvn3R6XQsW7aMq1evAuDh\n4YGnp+mydDExMSxfvhydTkf37t1JSEgA4MMPP2TAgAGEhIRw/Phx835ly5ZF0zQqVKhAXFxcnvuX\nLl2aVq1acfjQId7s24ueHiV5o3JpWpYqzrhqZWnmXoyjt1OZfuY619IyGHToAl+dj2XtbSNx8fFs\nupGIQSkSEhIoWbLk334PVq5cyblz54iKiuLYsWOAqVvfvXt33n33XWrWrAmAg4MD/fv3Z8WKFeYl\n4FFRUfj6+uLu7p7jmKdOncLPzw/AHNRPnz6NTqdDp9OZvwQ8fPgwo0eP5ttvv0XTNEqUKEH58uWp\nU6cOdnZ26HQ6YmJiKFmypPmLNsD8XAcOHIhOp2P27NkPfZ6apuWo6cSJE3/7tSpsnn8efvjBtCS9\nTBno0wcaNDANbhNCCCEsgYykFQUmNjaWc+fO0aFDBwDu3LlD9erVH2nfY8eO0bRpU/NS0bu7WGAK\nDlu3bmXOnDkA5u3g0QJLbttkBxYwLV9t2bLlI9Wa24fgvD7Ev/zyy9y5c4c33niDLl26UL9+fX79\n9Vf27NnDG2+8wZ49e3j33XeZPHkynTp1YtmyZbz//vt88cUXWN11LZ5/Gkwe5blUaOpHhaZ+BM2a\nyPEl3xHzVSQ//+c99CMm8nyPDviE9aRcwxceGCyLMi8vL37++WdCQ0PZu3cv916JwtXVlatXr1K6\ndGmio6Pp06cP1apVY968eais0Hn9+nVKlSoFkON1zuuqFu7u7syePRulFNWqVaNr167UqlWLZcuW\nUa5cOQDzjIa7/1vy9vY2L2PO3kYpRcuWLWnXrh07duxg3LhxrF69+r73WymV6/5g+gJt165dtG/f\nns8++4w333wTOzs7NKOBZu7FaOZeDKUUZ5PTaWJjTXEba1b9GY8ROG+0JnxHNCkNarJjx45HOl/7\n7poyMjKws7PDwcEBR0dHHB0dMRqN9O7dm44dO9KxY0fz9leuXCEiIoL//ve/vP/++8yYMYPo6Gj0\nej27du0iJiaG33//neXLl+Pl5cW+ffto2LAhe/fupVy5cnh5eZlPIQBTIB8wYACrV682v38ODg5U\nqVKFixcvUrFiRfbv30+nTp2oXLkyY8eOJSMjgytXrlCsWDHs7e1zfJn5559/5lhBYWdnl+O2UipH\nTS+++OIjv1aFnU4He/bAsmXw3numrni7djBtmimkCyGEEAVFArh47O79EJiXUqVKUaVKFdatW0ex\nYsUAUxfqUXh7e/O///0Pg8GAtbU1RqMxRwD19vamcePGvPLKKwA5BsDlFlju3jevbfIKLPfue6/c\nPgTn9SF+3bp15v1SU1PN58C6ubnh5ORkPl72h/cyZcqYO9116tRh165dNGnShA0bNjBz5swHv4j/\nkoObK/VeD6HukH5c3RtNzLyl/L7se47M/5ZStZ7HJ6wnNXp3wrFkiYcfrAjp2LEjK1euJDAwkAYN\nGmBvb5/j56NGjSI4OBhvb2/z6QX16tWjSZMmNG7cGKPRyPTp0x/6e3e3GTNmsHHjRoxGI8HBwbi4\nuPDFF18QEhJi/m/uvffeIzg4OMd+YWFhDB06lKCgIAB8fX2ZMmUKbdq0AUy/o+PGjcvzcXPbf+LE\niQwaNIjIyEg8PT1p1aoVAQEB+Pj48Mcff9ClSxfGjx9PfHw84eHhJF6/yqG3+tF/0GssqFiXasPG\ncmT8cAYAvfr3p7irm/mxdu3aRVpaGvv27eP777+/r57MzEzzSpL09HS6detG5cqVWbVqFVFRUVy7\ndo3IyEh8fHyYNWsW/fv3Z+bMmTRq1IgePXqwfv16xowZw5gxYwAICQlh4MCBVKpUiSlTpjBgwADc\n3d3N/33ea9iwYcTHx9OvXz/AtDKhbdu2zJo1i969e5ORkUHz5s154YUXANM56IGBgWiaZl6Jc7ey\nZcvi6OhI586dGTJkCMHBwQwbNox169axYsUKbGxsWL16NaNGjcLDw4P27dvn+V49jaysoFcv6NQJ\nZs0yXTe8Vi0YPBjGjzd1yIUQQoj8JtcBF4+d0Wikbdu2ODk58dJLL3HlyhXGjh2b67Zbt24lPDwc\npRRWVlZ8+umn1K5dGy8vrxznbUdGRlKhQgXzB96mTZsyc+ZMli1bhrOzM/369aNfv37m/RISEnjt\ntde4du0aSinatm3LyJEjcxxr8uTJ5mP269ePxMREunfvTmpqKpcuXWLs2LFcunSJ3r17o9frOXLk\nCCNGjLgvsNy9b48ePe57jl5eXnTq1Indu3fj4eHB4sWLc3QZ735Od4uKiuKjjz4yb/vpp59St25d\njh49yuDBg7GxsSEjI4O5c+dSq1Ytzpw5Q2hoKOnp6bRp0ybP1/xJSr99h9+//YGYeUu5ujcaa3t7\nqnVpi09YTyo0ayRd8SwZGRnY2tqyc+dOpk6dmuOLF5G7TTcS+ej0X+OtPRxsuZyagYeDLSEVSxLo\nXgwr+f3K4e5/RwXcuAHh4TBnDjg5mTrjw4aZBrkJIYQoeEXlOuASwIUQT8T16CMcnreU45FrSE+8\nTYnqz+EzsCfe/briVNr94Qd4inXu3JnY2FjS0tKYO3cuderUeWzHHjVqlHkyOJhWpGRf2qowMyjF\n5tjbtCxVnJ9jbxPkXoy98SlEXLjJuZR0vJztCfUsia+rE5qmsXnzZiZOnJjjGOPGjaN58+YF9Azy\nnwTw3J04Ae++azpXvGJF+OADU6f8bywqEUII8QRIALcgEsALv2PHjjFkyJAc9w0aNIiePXsWUEWP\nn3zgz11GcgonV/7I4XlL+XPnXqxsbfHq2BqfsF5UatEUTT71in/BoBRbYu+w8OJNrqZlUsfFkVBP\nd2oWd3j4zqJIo4FIWAAAIABJREFU27oVRoyA/fvhhRfgk09M54oLIYQoGBLALYgEcCGeDjePneTw\nvCUcW7SK1FvxuFb2pFZoD2r1706x8mULujxRiKUbFeuvJRB5OY74DANNSjgzwLMkzzrZP3xnUWQZ\njaZBbe+/DxcumAa1ffQR1KhR0JUJIUTRIwHcgkgAF+Lpkpmayqnv/o+YeUu4uGUXmrU1VV5uSe2w\nnjz7YhBW90y1F+JRpRiMrL4Sz4o/40kxGAkuXZx+FUvyjL1tQZcmLFhKCnz2mWlQW1ISDBoEEybI\noDYhhMhPEsAtiARwIZ5ecafOEBPxLUcXLCf5eizFKpSj1oAe+IS+iounR0GXJwqphAwD316O4/ur\nCYCi3TOuvFqhBCVs5eIfIm83bsDEifC//5kGtY0eDcOHy6A2IYTIDxLALYgEcCGefob0dP74cRMx\n85ZybuNWACq/GIRPWE+qvNwSa1vpYIq/73paBosvxfHT9UTsrTS6lHejS7kSONvI7AGRt7sHtVWo\nYOqMy6A2IYR4siSAWxAJ4EIULQnnLnJk/rccmf8tdy5fxblsGbxDuuEz8FXcnnu2oMsThdCFlHQW\nXrjJtltJuNhY0dOjJO3LumAniUo8wNatMHIk7NsH9erB9OkyqE0IIZ4UCeAWRAK4EEWTMTOTs/+3\nhZh5SzkT9QvKYMCzuT8+g3rh1fFFbOxlwJb4e07cSWX+hZvsT0ihtJ0N/SqWJLh0cazlGuIiD0Yj\nfPut6brhFy7Ayy/DtGkyqE0IIR43CeAWRAK4EOL25SscXbCcmIhvSTx3EQf3Enj364pPWC/cn/cq\n6PJEIXMgIZmI8zc5kZSGp6MtAyq641/SGU2CuMhDaqppUNsHH5gGtYWFQXi4DGoTQojHRQK4BZEA\nLoTIpoxGzv+8nZh5Szj9/U8YMzPxaOqHT1hPqnV9GVuZliQekVKKHbeSWHDxJhdSMni+mD2hnu7U\nc3Uq6NKEBcse1DZnjmk42+jRMGyYaWibEEKIf04COKBpmh3wEhAAlAdSgCNAlFLqRL5UiARwIUTu\nkq/HcvSblcTMW0LcqbPYu7pQo09naof1pHTtmgVdnigkDEqx8cZtFl28xY30TOq7OhLq6U61Yg4F\nXZqwYCdOmML399+bBrV98AH07i2D2oQQ4p8q8gFc07T/Ap2AbcB+4DrgAFQDggANGKmUOvKki5QA\nLoR4EKUUl7b+yuF5Szm1ej2GtDTK+tXDJ6wnz/fogF0x54IuURQC6UYja68msvTyLRIzjTRzL0b/\niiWp6GhX0KUJC7ZtG4wY8degtk8+gebNC7oqIYQofCSAa1oHpdQPee6oaeWAikqpPU+quGwSwIUQ\njyrl5i2OR67h8Lyl3Dx6Attizjz/akdqh/XkGd86co6veKikTCOrrsSx8s940o2K1mVc6FuhJKXt\n5RriIncyqE0IIf69Ih/Ac93YtCTdRimV/ORKup8EcCHE36WU4s9f9xEzbyknlq8lMyWV0nVqUntQ\nL2r06oS9q0tBlygsXFxGJksvxfHjtQQ0NDqWdaWHRwlcba0LujRhoXIb1DZhAjzzTEFXJoQQlk8C\n+L0balp/oCdgDexSSo19koXdTQK4EOLfSEtI5PjS74iZt5TrB49g4+hA9W7t8AnrRfkmvtIVFw90\nNTWDRZdusenGbRytrehe3o1O5dxwtJaTfUXuYmNNg9r+9z9wcDCdKz58uAxqE0KIBynyAVzTtJeU\nUuvvuv2tUqpH1t8PKaXq5FONEsCFEI/Ntf2HOTxvCceXfEfGnSTca1bDZ+Cr1OzbBUf3kgVdnrBg\nZ5PTWHDhFrvikihha00vjxK0fcYVWyv5Akfk7uRJePddGdQmhBCPQgK4po0H6gL/VUodyRrKVgEw\nAiWVUt3zq0gJ4EKIxy39ThInlq8lZt4Sruw+iLWdHVU7v4RPWE8q6ppIV1zk6djtFL6+cJPDiamU\ntbchpKI7QaWKYS2/MyIP27bByJGwdy/UrQvTp8ugNiGEuFeRD+AAmqaVByYBGcA4oCTgpJQ6kD/l\nmUgAF0I8STdijhMzbynHFq8mLT4BN69n8RnYE++Qbjg/U7qgyxMWSCnFvoRkIi7c4nRSGpWd7Aj1\ndKehm5N8eSNyZTTC8uWmQW3nz0PbtqZBbTXliolCCAFIADf9UNMcMXW8vYGJwC5gulIqLX/KM5EA\nLoTIDxkpKZxavZ7DXy3h8vbdWNnY8FyHVtQO60Wl4GZosm5U3MOoFFtv3mHhxVtcTs3Au7gDAz3d\n8XFxLOjShIVKTYXPPzctR7992zSoLTxcBrUJIUSRD+CapoUDTQEbYJVS6nNN0zoBrwMRSqml+VWk\nBHAhRH67+ftpYr5eyrFvVpISewuXShWoFdqDWgN6UNyjXEGXJyxMplHxfzcSWXzxFjczDPi5ORHq\n6c5zzvYFXZqwUDKoTQghcpIArmnRSqm6mmkt3X6l1AtZ99sCbyqlpudXkRLAhRAFJTMtjT9++InD\n85Zy4eftaFZWVH6pOT5hvajyUnOsbOTa0OIvqQYjP1xNYNnlOO4YjDQvVYyQiu6Ud7At6NKEhTp5\n0hS+v/sOPDxMnfE+fWRQmxCi6JEArmnLgDjACUhUSr2Zn4XdTQK4EMISxJ85z5GIZRyZv5ykq9cp\nVr4stQZ0p1boq7g+W7GgyxMW5HamgRV/xrPmSjyZSvFSGRd6VyiJu518YSNyt307jBjx16C2Tz6B\nFi0KuiohhMg/RT6AA2iaVg/IUEodyb+S7icBXAhhSQwZGZyJ+oWYeUs4u2ELAJWCm1E7rCfPtW+F\ntZ1dAVcoLMXN9EwiL91i/fVEbDSNTuXc6F7ejWI21gVdmrBA9w5qe+kl+PhjGdQmhCgaikoAz3OB\nk6ZpjZRSB/MK35qmFdM0Tf6XIIQocqxtbana8UU6RS0m7NxuGo8bzq3jp/ix62C+qtiAraMmc+vk\nHwVdpsVbuHAhkydPLugynih3OxveqlKG+XU8aVLCmWWX4+hz4DzLL8eRajBy7tw51q5da95+woQJ\n1KhRA51Oh06nw2AwAHDgwAH8/f1p0qQJCxcuzPPxZsyYQbNmzfD396dv375kZGSQkpJCcHAwTZs2\npVGjRmzYsOGhdWdkZFC1atVc359Lly6h0+nuu//kyZM0adIEnU6Hv78/hw4dAiA1NZVevXoREBBA\nr169SE1NBeDcuXM0b94cf39/pkyZkmsdCxcuJDExEYD4+HgWLVr00NoLMysrePVV+P1304T0nTvB\nxwdeew2uXSvo6oQQQjwODzrDqKemads1TXtf07TWmqa9oGlaE03T+mqatgDYABTPpzqFEMIiuXh6\n0GTCCAae/Y1XohZRvokv+2d8xYLqzVgR1IXjS78jMytwCMuXHXgfNw9HO8ZUK8uc2hWpUdyBeRdu\n0u/geZYdOML3P/yQY9sxY8ag1+vR6/VYW5s65UOHDiUyMhK9Xs9nn31GXFxcro/zxhtvsG3bNnbu\n3AnAxo0bsbGxYd68eezYsYN169YxbNiwh9Y7d+5cnn/++b/1HKtUqcLOnTvR6/VMmjTJHN4XLlzI\n888/z/bt26levbr5C4TRo0cTHh7Ozp072bx5M7///vt9xyxqATybgwO88w6cPg2vvw4REeDlBZMn\nQ3JyQVcnhBDi38gzgGed8/0KpvPA+wAfA+8DPsA3SqkApdTufKlSCCEsnJW1NVVeakGH7yIYdHEv\nTaeMJvHCn6zv9QZzPeqzZdg4Yo+eKOgyC4zBYKBnz54EBgYyevRovLy8cvz87tsDBw5Er9cDEB4e\nTuPGjWnYsCFRUVGAqUvcq1cv2rdvT926dXMNbgB6vR4/Pz+CgoLo378/ADExMbRs2ZLmzZvTrVs3\nUlJSAKhUqRJDhgyhQ4cOZGRkMHDgQIKCgmjatCl79uwBYOTIkTRu3JigoCCWL18OgKenJ4MHD6ZR\no0aMHDkSINf9lVK0b9+eS3t/ZWwlN2681QvH2CvM+PRTvl37I3X9A9ibdarVtGnTaNq0KZ999hkA\naWlpJCUlUblyZezs7AgICDDXdC+7rNMflFIYjUa8vLywtbXl2WefBcDR0RGrrOleK1asIDQ0FIDx\n48czY8YMAO7cucOGDRvo3Lmz+bh37tyhbdu2tGzZMs9utY2Njfka6ImJidSuXRuArVu38vLLLwPQ\nrl07tm7dCkB0dDQBAQEAtG3b1nx/ts2bNxMdHU3Xrl0ZOnQoM2bMYP/+/eh0OqKiopgwYQLdunWj\nbdu2NGzYkGPHjuVaV2FWqhR89hkcPQrBwfDf/0K1arBwITyh74qEEEI8aUopi/9Tv359JYQQhY3R\nYFDnft6m1nYbrGbYVlKfUF4tadxOxcz/VqXfSSro8vLV6tWr1eDBg5VSSu3YsUNVqlRJLViwQE2a\nNEkppdRzzz1n3jY0NFRt2bJFHTx4ULVo0UIZjUYVFxenqlatqgwGgxo/frx66623lFJKLVmyRI0Y\nMSLXxxw6dKj66aeflFJKGQwGpZRSAQEB6vz580oppWbOnKk+//xzpZRStra25vv/97//qalTpyql\nlLp69apq0qSJUkqpmjVrqoyMjBzHs7e3V1euXFFGo1FVr15dJSQk5Ln/9evXla+vr+rRo4davny5\nMhqNavYP61W1jt1Vi12n1OBDF9TGP84rg8GgkpOTVYsWLdS2bdvU5cuXVWBgoPl5jRs3Ti1dujTP\n13ry5MnKy8tLtWnTRiUl5fw9CwsLU/PnzzffHjhwoHrrrbdUu3btlNFoNB9/48aNOd6fGTNmqClT\npiillIqMjMxRz9327dunGjVqpMqXL69+++03pZRSwcHB6uzZs0oppc6cOaNatWqllFKqatWq5v3m\nz59vPv7dAgMD1cWLF5VSSp09e1a1aNHC/LPx48erQYMGKaVMv1MdOnTI8zV5WmzbplSDBkqBUnXr\nKvXzzwVdkRBCPD7APmUB2fNJ/5GLXAghxBOiWVlRqUUA7ZbPYfDl/QR+8l9Sb8Xz04C3mVP+BX4e\n8h7XDhbojMt8c+rUKRo0aABAw4YNzZ3S3Kis4aAnTpygUaNGaJqGm5sbZcqUITY2FoD69esDpg70\nzZs3cz3OO++8w9q1a+nVqxcLFiwA4OjRo/Tt2xedTseyZcu4evUqAB4eHnh6egKmLvny5cvR6XR0\n796dhIQEAD788EMGDBhASEgIx48fN+9XtmxZNE2jQoUKxMXF5bl/6dKladWqFYcOHaJbt25omoa3\niyNNSzoz2usZ7mQa+OhaOiOP/8ny2GReeeUVluh34FqiBPHx8ebnlZCQQMmSJfN8/caMGcPJkyep\nXLlyjvPFJ02ahIuLi3k1AMCoUaOYNWsWY8aMQdM0rl27xsGDBwkODs5xzJMnT+Ln52d+/7K9/PLL\n6HQ6Vq1aZX5ffv31V7777juGDh0KQMmSJc3131271V3X2cq+f9WqVeh0OnPH/GHurunkyZOPtE9h\nFhAAv/0Gy5ZBXBy0bAlt25o65EIIIQoHCeBCCJEPnEq74zviNfof30r3bWvw6tCKowtWEPlCayJ9\n23Bo7mLSEm8XdJlPjJeXF9lXs9i7d685ZGdzdXXl6tWrGAwGoqOjAahWrRq//fYbSini4+O5fv06\npUqVAsgR4O89VjZ3d3dmz55NZGQkH374IYmJidSqVYtly5ah1+v57bffGDduHID5XGsAb29v+vbt\naz4P+8CBAyilaNmyJYsWLWLgwIHm/e79IkEplev+AEeOHGHXrl20b9/evLzczs4Og8FAy9LFWVC3\nEv1L2HI6KY3FF28xcU0UvzmVZlVsMk7Ozly4cIGMjAx27NhhDp73yh5wpmkarq6uODk5ATB79mxO\nnTrFxx9/bN7WaDTy+uuvs2DBAt59910yMjKIiYnhxo0bvPjii0yfPp1Fixbx448/UrVq1RzvX7Z1\n69ah1+vp0qWL+bEB3NzczI8dGBjI+vXrAVi/fj2BgYEA1KlTh127dgGwYcMGmjVrRpcuXdDr9axb\nt878+mRmZt7392x311S1atVcX5OnjZUV9OiRc1Bb7doweDBkfZ8khBDCgskFSYUQIh9pmkaFgIZU\nCGhI0KyJHItcQ8y8pfz82mi2jphI9R4dqB3Wk7J+9R7YJS5sOnbsyMqVKwkMDKRBgwbY29vn+Pmo\nUaMIDg7G29ubMmXKAFCvXj2aNGlC48aNMRqNTJ8+PUfX9GFmzJjBxo0bMRqNBAcH4+LiwhdffEFI\nSAgZGRkAvPfee/d1e8PCwhg6dChBQUEA+Pr6MmXKFNq0aQOYQm52AM9NbvtPnDiRQYMGERkZiaen\nJ61atSIgIAAfHx/++OMPunTpwvjx49k0fTrXfv+d3++kUqJeQ0o10bH4Uhw2g97Bv2NnnKw0+oeG\n4ermlutjjxgxgqNHj5rP/w4PD+f69eu89dZb5vPXAX755Rc++OADWrVqRUhICCkpKYwZM4Zp06bR\nsmVLwDQA7dKlS7Rr147bt2/TrVs3Nm3aRK1atXJ97F9++YWPPvrI/GXGzJkzAQgJCWHAgAEEBARQ\noUIF82qEqVOnEhoaSnp6Om3atKFGjRr3HbNTp06EhobSpEkTwsPDcXR0pHPnzgwZMgQwnZvepk0b\nYmNjHzgd/mmUPaitf3+YNAm+/BKWLoV334W334as7z+EEEJYmAdeBxxA07TdwHxgmVIqMV+quodc\nB1wI8TRTSnF1z0EOz1vKiW9/ICMpmVI+NfAJ60nN3p1wKJF72CpsMjIysLW1ZefOnUydOtXc5RQ5\nbbqRyEenr5tvt3/GBSNwMCGFy6mmLw5cbKyo4+JIXVcn6rk6UtHB9qn6wuZRTJgwAS8vL3r37l3Q\npViEU6dg9GhYswbKl4cPPoA+fcBaLjkvhCgkisp1wB8lgD8P9Ae6AruABUqpX/KhNjMJ4EKIoiIt\n8TYnvv2Bw/OWcm3fIWwcHKja5SVqD+qNR1O/Qh2yOnfuTGxsLGlpacydO5c6deo8tmOPGjUqx2Rw\nOzs7Nm7c+NiOn58MSrE59jYtSxXn59jbNC9VHOus9/1GWibRicnsOv8nXw4OIT3r/+G2moZvqxcZ\nOuxt6ro6Us7BtiCfQr6QAJ677dthxAjYuxfq1IFPPjGdKy6EEJZOAvi9G2qaNdAemA2kY+qKf66U\nin/gjo+BBHAhRFF07eARYuYt4fiS70hPvE3J573wGfgqNft2xam0e0GXJwqYUooraZlEJyRzMCGF\n6MQU4jJM16Yqa29DHRdH6rk6UdfFkVL2csZZUWI0wooVpo74+fPQpg18/DF4exd0ZUIIkTcJ4Hdv\npGk1MXXB2wGbgSVAU6C7UuqFJ1ohEsCFEEVbRlIyJ1b+SMy8pfy5ax9WtrZ4vfIitcN64tm8Kdrf\nOC9aPL2UUlxIyeBgQjLRiSkcSkjhtsEIQEUHW+q6mpas13FxxM1W1iUXBampMHs2TJ4Mt2/DwIEQ\nHg5lyxZ0ZUIIcT8J4NkbaNoeIBlTx3ulUirlrp+tVUq1f7IlSgAXQohssUdPEPP1Uo59s4rUuHhc\nq1TCZ+CreId0o1i5Zwq6PGFBjErxR3I60QnJRCekcDgxhRSj6f/5VZzsqOvqSD0XJ3xcHChmI4H8\naXbzJkycaBrU5uAgg9qEEJZJAnj2BppWTSlVoBfXlAAuhBA5ZaamcmrNBmLmLeGi/lc0a2ueaxeM\nT1hPnm2tw0omL4l7ZBoVJ5PSTIE8MYUjiamkK4UVUNXZ3hTIXZ3wLu6Ao7WsqngayaA2IYQlkwCe\nvYGmTQKmZ5/rrWlaCWCYUmp8PtQHSAAXQogHiTt1hpivl3FkwXJSbtykeMXy1BrQg1oDeuDi6VHQ\n5QkLlW5UHL+dSnSi6Rzy3++kkqnARoPnizlQz9WRui5O1CjugJ1V4R3+J+63Y4dpUNuePTKoTQhh\nOSSAZ2+gaQeVUvXuue9Afpz7nU0CuBBCPJwhPZ0/1m7k8LylnN+0DYDKbYLwCetFlbYtsLZ9+idj\ni38uxWDk6O1U0znkCSmcSkrDCNhpGt4uDtTLuuxZ9WL25qnsovBS6q9BbefOyaA2IUTBkwCevYGm\nHQZ8lVLpWbcdgH1KqVr5UB8gAVwIIf6uhHMXORKxjCPzl3Pnz6s4ly2Dd/9u+AzsiVuVSgVdnigE\n7mQaiElM5WCiKZCfSU4HwMlao1ZxR1OH3NWJ55zssJJAXmilpcHnn8ugNiFEwZMAnr2Bpr0PtMY0\nhA1gAPB/SqmpT7g2MwngQgjxzxgzMzm7YTOHv1rC2fWbUUYjni2a4hPWE6+OL2Jjb1/QJYpCIj7D\nwOHEFHOH/GJqBgDFbayo4+JI3azLnnk62hbq69UXVTdvwqRJ8MUXYG//16A2Z+eCrkwIUVRIAL97\nI01rB7TIurlJKRX1RKu6hwRwIYT4925f+pMjC5ZzJOJbEs9fwrFUSWr260rtsJ6UrO5V0OWJQiY2\nLZPoxBTTdcgTU7iWlglACVtr6ro4moe6lbO3kUBeiJw6Be+9B6tXmwa1TZ4MffvKoDYhxJMnAdyC\nSAAXQojHx2gwcOHn7Ryet4Q/ftiIMTMTj4CG1A7rSdUubbF1dCzoEkUhdCU1g+iEFKKzlqzfzDAA\nUMbOJmu5ummoW2l7mwKuVDyKnTtNg9p275ZBbUKI/CEBPHsDTWsAfA7UAOwBDUhTSrk8+fJMJIAL\nIcSTkXTtBke/WUnMvCXEnz6HvZsrNft0xiesJ6V9ahR0eaKQUkpxMTWDgwmmDvmhxBQSM40AeDjY\nZk1Yd6SOqyMlbCWQW6rcBrVNmwa18m0KkBCiKJEAnr2Bpu0FegPfAn5ACFBJKTX2iVeXRQK4EEI8\nWUopLup3ETNvKadWr8eQnk65hvXwCetF9e7tsSsmJ4KKf86oFGeT002BPDGZw4kpJBtMnz+edbQz\nd8jruDhSzEbWOluatDSYPdu0HD0xEUJDYeJEGdQmhHi8ikoAt3qUbZRSJwAbpVSGUmoe0PYJ1yWE\nECIfaZqGZ5A/bZd+weA/96P7dALpt5PYOHAkc8u/wKbBo7i2/3BBlykKKStN4zlne7qUd2Py8+X5\nrkEVZteqQKinO+521qy/nsj4E1fptPcsQw5f5KvzseyJSyLFYCzo0gWmoWwjRsDp0/Dmm7BwIXh5\nmUJ4UtK/O/bChQuZPHnyY6mzsDp37hxr16413/7uu++oUaMGDg4OObY7cOAA/v7+NGnShIULF5rv\nb926NaVLl37o69inTx90Oh2+vr58+umnABw8eBB/f3+aNWtG8+bNOXPmzEPrPXnyJLa2tuzYseO+\nn0VGRjJhwoT77p82bRoNGzbE39+foUOHopQiJSWF4OBgmjZtSqNGjdiwYUOOfbZs2YKmaVy6dOmh\nNQlRmDxKB3wb0BLTFPQLwBUgTClV+8mXZyIdcCGEyH9KKf78dR8xXy3hxIofyUxJpUy9WviE9aRG\nz1ewd823M5HEUy7dqDhxJ9W8ZP3YnVQyFVhr8Hwxh6wJ647ULO6AndWj9A7Ek3T6tGlZ+uMY1LZw\n4UIuXbrE2LH5trDyoQwGA9b5OHVOr9cTGRnJ119/DcDNmzdxdnamVq1anD592rydv78/kZGReHh4\n0KhRI3755RdKlCjBpUuX+Pnnnx/6Oqanp2NnZ0dmZiY1atTgwIEDJCUl4ezsTPHixVm/fj3Lli1j\n8eLFD6y3T58+XLlyhQkTJtC0adMcP4uMjOT06dP3hfBTp05RtWpVALp168bgwYNp1qwZly9f5tln\n/5+9+46rsu7/OP662DgQUFwoLsQUcOQCF0NAc+BGxTRUsLIsLSW7+aVpOepOUtOycA/IlWmas8Q0\ny9QcuPceqCxR9vn+/jh4ksTRnXBQPs/Hg0ee61zf6/qcc9R8n++qzs2bN2nZsiXHjx8H9P//CQwM\n5Nq1a6xatYoqVao88fspnl3SA/6XkNzz3gRygNpAzwKsSQghRBGgaRqOLZrSfv5UXr3yJ21nTkAp\nxU9D/8Osyi+yYeAILu/czbOwmKco2ixMNNxtrBlQ1Z5Ityp837Qmn9StTFBlO3KUIuZyIiOPXKHL\nH2cZdfgySy4lcPh2Gtk6+b1nDM7OsGIF7NgBVavCoEHw4ouwefOj2+Xk5BAcHIyXlxejR4/G2Tnv\n7gv3Pw4NDSU2NhaAcePG4enpSfPmzVm3Tr8Rz4cffki/fv0IDAykYcOGHDt2LN97xsbG0qxZM3x8\nfBg4cCAAcXFx+Pn54evrS1BQEGlpaQBUq1aNoUOH0qVLF7KysggNDcXHx4dWrVrxxx9/ADBy5Eg8\nPT3x8fFh6dKlADg5OfHqq6/i4eHByJEjAfJtfy9UxsbGcvfuXTw9PTl79iyRkZGsW7cOb29v9u7d\nS9myZR/o/c7IyODOnTvUqFEDCwsLWrdubajpScOphYUFAOnp6Tg5OVGiRAkqVqxI6dKlAbC0tMTM\nTL8mw9ChQ1m4cCE6nY527dqxa9cuAHbt2kXFihXz3PPIkSM0a9aMjh075unJv9+98H3/fczNzale\nvToA1tbWmNz35dry5ctp164dJWUfPPE8Uko99AcwBRY+6pzC+GncuLESQghhfDqdTl3dvV9tDBul\nppWqrT6jsprn6qP2fP6NunvzlrHLE8+p21nZ6reEVPXV2RtqyP7zqu3Ok6rtzpOq4++n1PtHLqul\nlxPU8dtpKlunM3apxY5Op9TSpUrVqKEUKNW+vVJxcfmfu3LlSvXqq68qpZTasWOHqlatmpo3b576\n6KOPlFJK1apVy3Du4MGD1datW9W+fftU27ZtlU6nU4mJiap27doqJydHjR07Vr399ttKKaWWLFmi\n3n333XzvOWzYMLVx40allFI5OTlKKaVat26tzp8/r5RSaurUqeqLL75QSillbm5uOP7VV1+pSZMm\nKaWUunbtmmrRooVSSql69eqprKysPNeztLRUV69eVTqdTtWpU0clJyc/tH18fLxq0qSJ6tOnj1q6\ndKlSSqnbUSX/AAAgAElEQVStW7eqwYMHP1D7/e/H5cuXlZeXl+HxmDFjVHR0tOHx/e/jo/Ts2VM5\nODioMWPG5DmempqqPDw81OHDh5VSSqWlpSlPT081ZMgQ9cknnxjO69y5s7p586Z65ZVX1Pbt25VS\nSgUGBqqdO3cqpZQKDQ1VY8eOfej9Y2NjDZ/n/cLCwtTcuXOVUkplZmYqf39/lZGRoby8vNTFixcf\n+7rE8wHYo4ycOwvj55FLjyqlcjRNq6lpmrlSKuufBHtN0+oAS+87VBMYAyzMPV4dOAcEKaUS/8m1\nhRBCGIemaVRs0oCKTRrgHTmWY9+uJi4qmtgRH7J99CRq9+hA/bBgqnh5yt7P4qkpZWaKh11JPOz0\nvWHJWTkcSEnjQHIa+1Lu8s35u/rzTE1okLsHecMy1lS3tpDfhwVM0yAoCLp0+WuhtgYN9Au1tWgB\ne/ZAQAAEBuqHITdt2hSA5s2bP/KzUbkja44fP46HhweapmFra0v58uW5efMmAI0bNwb0PdCbH9L9\nPmrUKD755BMWLFiAr68vgwcP5vDhwwwYMADQ9wb75e6v5ujoiJOTE6DvJd+5cycbNmwAIDk5GYDJ\nkyczaNAgTExMGDVqFK6urjg6OlIxd0W6KlWqkJiY+ND2Dg4OBAQEsGrVKmJiYp74fba3tycpKcnw\nODk5GXt7+yduf8/y5cu5e/cubdq0oXfv3tSrV4+srCx69+7Ne++9R7169QCwsrJi4MCBhIeHc/Xq\nVQDWrVtHkyZNKFu2bJ5rnjx5kmbNmgH6z/XSpUucOnWK0NBQAGbPno2zszMHDx5k9OjR/PDDD3k+\n+48++ggbGxvDCIVvvvmGl19+2dBjL8Tz5kn2/jgNbNc0bTVgWGpDKTX9UY2UfuG2hgCappkCl4FV\nwGjgJ6XUZE3TRuc+fu9/K18IIYSxWJQqSf3QYOqHBnPj4BEORkVzdNFKjkWvwq52DdxCg3ELCaJE\n+XLGLlU8Z8qYm9KmbCnalC0FwK3M7Nw9yPVzyH9N1P9zxdbclIa5gbyRjTWVrcwlkBeQewu1hYTo\nQ/iMGRAVpX9u7lz49lv9EPMtW7YwePBgdu9+cPpKmTJluHbtGg4ODuzfv5/+/fvj4uJCVFQUSimS\nk5OJj4+nXDn93yn3f5Z/v9Y9ZcuWZcaMGSilcHFxoVevXri5uRETE0OlSpUA/dxoIM+8b1dXV5yd\nnRkxYoThHKUUfn5+dO7cmR07djBmzBhWrlz5wO8ppVS+7QEOHTrEzp07CQwMZPr06bz11luGedmP\nYmVlRcmSJblw4QKVKlVix44djB079pFt/l5TVlYWFhYWWFlZYW1tjbW1NTqdjpdffpmuXbvStWtX\nw/lXr15lzpw5fPDBB/znP/8hMjKS/fv3Exsby86dO4mLi+PYsWMsXboUZ2dn9uzZQ/Pmzdm9ezeV\nKlXC2dnZMIUA4NSpUwwaNIiVK1caPj+AGTNmcPLkSRYsWGA4dujQIU6fPk10dDQHDx6kf//+rF+/\n/oFh+UI8q54kgF/I/SmR+/O/aAucVkqd1zStC+Cde3wBEIsEcCGEeKY51K9H2y8+ps0nEZxYsZa4\nqGi2vzeBXyM+oVaXAOoPeZlqfq3RZAEtUQDKWpjR1qE0bR30c1mvpWflhvE09iXfJfZWKgAOFmaG\nMN6gjDUVLM2NWfZzqWxZ+PxzuHEDlizRH0tLg4kTYf36rixfvhwvLy+aNm2KpaVlnrbh4eH4+/vj\n6upK+fLlAWjUqBEtWrTA09MTnU7HlClT8swVfpzIyEg2bdqETqfD398fGxsbZs6cSUhICFlZ+sGd\n77//Pv7+/nnahYWFMWzYMHx8fABo0qQJEydO5KWXXgL0Pedjxox56H3zaz9+/HiGDBnC4sWLcXJy\nIiAggNatW+Pu7s7p06fp2bMnY8eOJSkpiXHjxnHlyhX8/PwYOnQo3bt3Z9q0afTt2xelFEOHDsXO\nzs5wr507d5KRkcGePXv4/vvvH6gnOzubgIAAQP9lQFBQEDVq1GDFihWsW7eO69evs3jxYtzd3Zk2\nbRoDBw5k6tSpeHh40KdPH3788UciIiKIiIgAICQkhNDQUKpVq8bEiRMZNGgQZcuWzROu7zd8+HCS\nkpJ45ZVXAP3IhKZNm/L2228b5tQD/PTTT3z11VeGdt7e3ixatEjCt3iuPHYV9KdyE02bC/yplJqh\naVqSUso297gGJN57/Lc2Q4AhAE5OTo3Pnz9f4HUKIYR4em4dPUnc7GgOL1hO+q1EbKpXxX1wH1wH\n9qa0YyVjlyeKCaUUl9Oz2Jecxr6UNA4k3yU5W7+9WWVLc30gL2NNQxtr7CyepF9CPIk1a6BvX7h7\nF0xMQKcDOzt4++0sRowwJy7uVyZNmsTatWuNXaoQoogoLqugP8k2ZJuBB05SSgU80Q00zQK4Argq\npa7fH8Bzn09UStk96hqyDZkQQjy7sjMyOPX9BuKiornw0w40ExNqdGxL/SH9qNHeBxMzCT2i8OiU\n4tzdTPanpLEvOY0DKWnczd1vvLq1BQ1ye8jr21hjY154W1E9j9asgU2b9HPAK1fW7xv+ww89MDO7\nScWKGcTEfE2rVg2e2v3Cw8MNK4ODftXvTZs2PbXrPyt+/vlnxo8fn+fYmDFj8PX1NVJFQjwZCeD3\nTtC05vc9tAJ6ABlKqVFPdAP9kPM37gV2TdOOA95KqauaplUCYpVSdR51DQngQgjxfEg6fY642TEc\nmreUu9dvUMqxIm6D+uA+uC821WSfV1H4cpTi5J0M/YJuyWkcup1Guk6hAc4lLQ1zyN1trClhKlMo\n/q29e/VBfM0asLWFESPg7behTBljVyaEMDYJ4I9qpGm7lFLNH38maJr2LbBRKTUv9/F/gVv3LcJm\nr5QKf9Q1JIALIcTzJScrizNrtxAXFc3ZDVsBqB7ghXtYMLUCAzA1l7m5wjiydIpjqekcyO0hP3I7\njSwFJsALpawMK6y7lrLCUgL5/+zPP/VBfPVqfRAfPlwfxG0fmJQohCguJIDfO0HTbO57aAI0Br5S\nSrk89uKaVhL9Am41lVLJucfKAssAJ+A8+m3IEh51HQngQgjx/Eq5cJlDc78lbk4MqZeuUqJ8OVwH\n9sY9tC92zjWMXZ4o5jJydBxOTdevsp6cxrHUdHSAuQb1Sv81f7xOKSvMTWSF9X9q3z59EP/+e30v\n+PDh+h8J4kIUPxLA752gaRfRzwHXgGzgLDBOKbWt4MvTkwAuhBDPP11ODuc2bOVgVDRn1m5B5eRQ\n1acF7mH9qN2tPWayCq4oAu7m6IjLXWF9f0oap+5koAArEw23e4G8jDXOJS0xlS3Pntj+/fogvmoV\n2Nj8FcTtHrlKkBDieSIBvAiRAC6EEMVL6pVrHJ6/jLjZMSSfvYCVvS31BvSkflg/ytZ77AAsIQpN\nSlYOB1PurbCexrk0/X7PJU1NqG/zVw959RIWmEggf6wDB/RB/Lvv9EH87bf1Qdze3tiVCSEKmgTw\neydo2mvAt0qppNzHdkAvpdQ3hVAfIAFcCCGKK6XTcf6nHcRFLeHU9xvRZWVRuWVT6ocF49KrM+Yl\nrI1dohB5JGRmG+aP709O40qGfq/pMmYmNChTgka5odzRyhxNAvlDHTyoD+IrV0Lp0vogPmKEBHEh\nnmcSwO+doGn7lVIN/3Zsn1KqUYFWdh8J4EIIIe7G3+TwwhXERS0h8cQZLMvY8EK/btQPC6Z8Qzdj\nlydEvq5nZOlXWM8dtn4jMxuAchamNLQpYdiHvIKlLDyYn4MH4aOPYMUKfRB/6y19EC9b1tiVCSGe\ntuISwJ9k+c48m2BqmmYCyP8lhBBCFKoS5cvRdORrDDz2C723raRmZz8OzfmWRY3asbhpBw5+s5jM\n26nGLlMUovnz5/Pxxx8bu4xHqmBpTkB5G95zrkD0i9WY39CJ4TUdcCttzZ6ku3x2Op5+f56n/5/n\nmHI6np9u3OZWbkh/EufOnWPNmjWGxx9++CF169bF29sbb29vcnJyAPjzzz9p2bIlLVq0YP78+Q+9\nXmRkJG3atKFly5YMGDCArKws0tLS8Pf3p1WrVnh4eLB+/frH1pWVlUXt2rXz/XwuXbqEt7f3A8c3\nbtyIh4cHXl5edOjQgVu3blG/Pnz7bQ4DBozE2tqPCRO8cXI6QkQE/Pzz41/T9OnTH/peCSGEMTxJ\nAN+saVqMpmlemqZ5AUuALQVclxBCCJEvTdOo0saDDou+4NUre/GZNp6c9Aw2v/oesyo1YlPYKK7+\nsY9nYY0T8fy5F3jzo2kaVawt6FShDP/nUpHlTaoT1aAqb1QvR80Slmy/lcqkU9fpvfccg/af54sz\nN/jlVirJWQ+/Zn6hMiIigtjYWGJjYzE11fejDBs2jMWLFxMbG8v06dNJTEzM93pvvvkmv/zyC7/+\n+isAmzZtwszMjKioKHbs2MHatWsZPnz4Y9+Hr7/+mhdeeOGx592vbt26bNu2jW3bttGpUyemTp0K\nwDfffEPLli5cv76FuLhYOnWqx6RJ4O8/jAYNFrNixcNfkwRwIURR8yQBfBTwKzAi92cHMLIgixJC\nCCGehLW9HS++NZgBB7fQ97c11OkdyNHoVUQ378Sihv7smzGP9KRkY5cpnoKcnByCg4Px8vJi9OjR\nODs753n+/sehoaHExsYCMG7cODw9PWnevDnr1q0D9L3E/fr1IzAwkIYNG3Ls2LF87xkbG0uzZs3w\n8fFh4MCBAMTFxeHn54evry9BQUGkpaUBUK1aNYYOHUqXLl3IysoiNDQUHx8fWrVqxR9//AHAyJEj\n8fT0xMfHh6VLl6JpGl4v1GbDh++xMaQbNZfO5Ev3KgysZMOfH7/Ph706E+jrjf/C7xmy/zz1/drz\n5Q/ruZGSiqenJ2fPniUyMpJ169bh7e3N3r17Afj0009p1aqVIXxmZGRw584datSogYWFBa1btzbU\n9HcWFhYAKKXQ6XQ4Oztjbm5O9erVAbC2tsbERP/Px2XLljF48GAAxo4dS2RkJACpqamsX7+eHj16\nGK6bmppKx44d8fPzY+LEifne28nJCUtLSwAsLS0xMzMDYPny5Zw/fx4fHx9mzXqTRYsy2bs3Axub\nO8yaVQMXFwtMTFqzeXPe1xQdHc3ly5fx9vZmwoQJD7xXISEhhISE0L59e7y8vLh69Wq+dQkhxNP0\nJAHcHPhSKdVVKdUV+AowK9iyhBBCiCenaRqVPRrTbs4UXru6D79Zk9HMzPh52P/xdaUXWT/gLS5t\n3yW94s+w1atXY2Njw7Zt2+jcuTPZ2Y8fpr1//362b9/Ozp072bhxIyNGjECn0wHg4ODAmjVrCA8P\nZ/bs2fm2/+677/j444/ZunUrc+bMAeCNN95g7ty5/Pzzz7Rs2dJw/OrVq4wePZq1a9cyZ84cnJ2d\n2bp1KytXrmTEiBEArF+/nu3bt7N161Z69eoFQHx8POPGjeO3337jx3XrqKjL5PaG7+jTpD7Xd//G\n6u9WkDTrU2zMzXB4dzyjw9/DvUcfbHr052dTGzoOGUr7Dh2IjY2lcePGDBs2jAMHDrB582bWrFnD\n9u3buXXrFrb3baxta2tLQkLCQ9+3CRMm4OLiQkJCAlWrVs3z3IgRIwgPDwcgKCgIExMThg8fzr59\n+wyv87///e8DveRRUVG0atWKLVu20LJly0d+btevX2fGjBm8/vrrAFy+fJlKlSqxdetWrKysmDt3\nLhUq3KJBA1sOHYLOnWHvXlsGDEhg9Gi4cUN/neDgYBwdHYmNjSUiIoJ33nmHjh07Gt4rgDp16rBh\nwwaGDBnCJ5988si6hBDiaXiSAL4VKHnf45LAzwVTjhBCCPHvWNqUpsGr/em/dwMv792Aa0gvTn2/\nkaVtujO/njd7Ir/m7s2Hhw9RNJ08eZKmTZsC0Lx580euIH7vi5bjx4/j4eGBpmnY2tpSvnx5bt68\nCWAIYE5OTty6dSvf64waNYo1a9bQr18/5s2bB8Dhw4cZMGAA3t7exMTEcO3aNQAcHR1xcnIC9L3k\nS5cuxdvbm969e5OcrB+FMXnyZAYNGkRISAhHjx41tKtYsaJ+eHqVKiQmJhra+/n6MHZwCBZpd/jM\n1ZH17ZoQ1LE9JudO8kL7QJZdSeSb87fYcuM27xy6xMKLCVwxL0GGTrEjNYtu3bqxJHYHZezsSEpK\nMryu5ORk7B+xnHhERAQnTpygRo0aeeZWf/TRR9jY2BhGAwCEh4czbdo0IiIi0DSN69evs2/fPvz9\n/fNc88SJEzRr1szw+d3TqVMnvL29WbFiBQApKSn07NmTWbNmUb58eQDs7e1p3749AO3bt+fgwYPY\n29uTlJREvXoQEwP9+yfj6WnPJ5/MoGJFb+rXDzUE8Ue5v6bjx48/voEQQvxLT9KTba2Uun3vgVLq\ntqZpJQqwJiGEEOKpqPCiOxW+mozXZ2M4vuwHDkYtYdu749nx/mScu7XHPSwYJ5+WaCZP8n20MCZn\nZ2e2bNnC4MGD2b179wOjGcqUKcO1a9dwcHBg//799O/fHxcXF6KiolBKkZycTHx8POXKlQPIE+Af\nNjKibNmyzJgxA6UULi4u9OrVCzc3N2JiYqhUqRIAmZn6fb/vzbUGcHV1xdnZ2dAjnJmZiVIKPz8/\nOnfuzI4dOxgzZgwrV6584IsEpVS+7QFOHDnC6b27GdCjG5V//o7Jb7zJkqTzzLUwIV2nWHQpgTlH\nU7C2sSFLpzj03ToqdejOztQsSpYsyYULF6hUqRI7duxg7Nix+b7m9PR0rKys0DSNMmXKUKKE/p98\nM2bM4OTJkyxYsMBwrk6n44033mDevHm89957bN68mbi4OG7cuEH79u25fPkyGRkZNGjQgNq1a7Nn\nzx7atm3L7t27DddYu3at4ddpaWl069aNiIiIPCHd29ubPXv24OzsbPivlZVVntd05MgONm8ey7Vr\n7fj44zf59luoXh2srMy4dk1HxYomWFhYPDBy4v6aXFxc8n1PhBDiaXqSAH5X07QGSqkDAJqmNQTS\nC7YsIYQQ4ukxL1kCt4G9cRvYm5uHjnEwKpqji1ZyfOkabGtVxy20L24hQZSsWN7YpYqH6Nq1K8uX\nL8fLy4umTZsa5grfEx4ejr+/P66uroae00aNGtGiRQs8PT3R6XRMmTLFMH/5SURGRrJp0yZ0Oh3+\n/v7Y2Ngwc+ZMQkJCyMrS7+/9/vvvP9DbGxYWxrBhw/Dx8QGgSZMmTJw4kZdeegnQh9wxY8Y89L75\ntR8/fjxDhgxh8eLFODk5ERAQQOvWrendshkLJnxI/PgRfBTxf4yfPoWjx4+TkJmNXaPmlGvhjV+5\n0thNm0bfvn1RSjF06FDs7Ozyvfe7777L4cOHDfO/x40bR3x8PG+//bZh/jrATz/9xIQJEwgICCAk\nJIS0tDQiIiL49NNP8fPzA/Sr1F+6dInOnTtz+/ZtgoKC2Lx5M25u+W8bOHPmTA4cOMDkyZOZPHky\n/v7+REREEB4ezsCBA5k1axb29vYsWrQIgGn5vCY7O1iyBD74AD7+GJYs6YmjY0e8vV8iKmogp0+f\npmfPnoYvIE6fPk27du1IS0sjJibm8b8phBDiX3qSfcCbAzHAeUADqgLBSqldBV+enuwDLoQQ4mnL\nTk/n5MofORgVzaVtv2FiZkbNzv7UDwumWoAXJqamj7+IKFRZWVmYm5vz66+/MmnSpDy9p+Ivm2+k\n8MmpeMPj8FoOBJQvY8SKjOf4cX0Qj44GS0sYOhRGjYIKFSAkJITQ0FBatWpl7DKFEBSffcAf2wOu\nlNqlaVpdoG7uoSPAw/fDEEIIIZ4BZlZW1O3Xnbr9upNw4jRxs2M4PH8Zp1atp7STI26D+uA2qDc2\nVR2NXarI1adPH27evElGRgZff/31U712eHh4npXBLSws2LRp01O9R2HxLVcagOSsHGadv5XvfPmE\nhAS6d++e51hgYCDvvPNOodRYWOrUgUWL/uoR//xz+PJLeP11yF3AXgghCtVje8DznKzfBzwY6KKU\nqlhgVf2N9IALIYQoDDmZmZxavZG4qGjOb/4FzcSEGi/54B7Wj5od22JiJpuAiGdHjlKMOHSZi+mZ\nzG3ghJ2F/P49eVIfxBcvBgsLeO01CA+H3Cn9QggjKi494E8yBL0J+tDdAygHvAWsVkrdLPjy9CSA\nCyGEKGzJZy8QNyeGQ3OXcufqdUpWqoDbwCDcQ4MpU8PJ2OUJ8UQupGXy6oGLeNqVYEwdSZn3nDoF\nEyboe8fNzeHVV+G99ySIC2FMxT6Aa5o2HugNXEM/B3wl8IdSqkbhlacnAVwIIYSx6LKzObPuJ+Ki\nlnB2/VaUToeTX2vqD+mHc5d2mFpYGLtEIR4p5nIicy7cYoxLRdqULWXscoqU06f1QXzhQn0QHzJE\nH8QrVzZ2ZUIUPxLANe0WcBiIBH5USmVqmnZGKVWzMAsECeBCCCGKhtuXrnBo7lLi5sRw+8JlrB3K\n4vpKL9xD+2Jfx9nY5QmRrxylGBZ3ifjMbOY0cKKMuSww+HenT8PEibBgAZiZ/RXEHWUJCCEKjQRw\nTTMH2gF9AS9gM9AecFRK6QqtQiSACyGEKFp0OTmc3/wLcVHRnF6zCV12NlXaeOAeFkztHh0wt7Y2\ndolC5HHmTgZD4y7iVbYU79cutGV8njlnzvwVxE1NISwMRo+WIC5EYSj2ATzPSZpmDQSiD+PNgc1K\nqQEFXJuBBHAhhBBF1Z1r8Ryav4xDs2NIOn0OKztb6r7cHfewYBzc6z7+AkIUkoUXE1h4KYGP6lTC\n076kscsp0s6e1Qfx+fPBxOSvIF6lirErE+L5JQH8YQ00zRborpSaWzAlPUgCuBBCiKJO6XRcjN3J\nwahoTn23npzMTCp5vIh7WD9e6B2IeckSxi5RFHNZOsXQuIukZOUwp6ETpcxkKPrjnDunD+Lz5umD\neGioPohXrWrsyoR4/kgAL0IkgAshhHiW3L2ZwNFFKzgYFU3C0ZNYlC7FC8FdqT/kZSq86G7s8kQx\ndiI1nTfjLhHgUJqRzhWMXc4z49w5mDQJ5s7VB/HBg+H99yWIC/E0SQAvQiSACyGEeBYppbj8627i\nopZwYtlastPTKf+iO/XDgnkhuBuWNqWNXaIohmafv8m3V5KYXLcyTWxlZMY/cf78X0Ec/griTrIz\noRD/mgTwIkQCuBBCiGddelIyR5esIi5qCTcOHMGshDV1egdSPyyYSh6N0TTN2CWKYiJTp+O1gxdJ\nz1HMbuhECVMTY5f0zLlwQR/E58zRPx40SB/Eq1Uzbl1CPMskgD+qkab5KKW2FkA9+ZIALoQQ4nmh\nlOL6ngMcjIrmWPQqsu7cpZzbC7iHBVP35e5Y29sZu0RRDBy5ncbbhy7TuUIZ3qrpYOxynlkXLsDk\nyfogrhQMHAj/+Y8EcSH+FxLAH9VI0y4opQptsI0EcCGEEM+jzNupHPt2NXFR0VzbvR9TS0tcenbE\nPSyYKm08pFdcFKivzt1g5dVkPqtXmYZlZCj6v3Hxoj6Iz56tD+IhIfogXr26sSsT4tlR7AO4pmnf\nPawNEKCUKrT9KySACyGEeN7FHzhMXFQ0Rxd/R0ZyCnYuNXEPDcb1lV6UKF/O2OWJ51B6jo4hBy6i\nUHzTwAlrGYr+r126pA/iUVGg0/0VxGvUMHZlQhR9EsA1LRF4Bbjz96eAJUqpQls6UwK4EEKI4iLr\nbhonlv/Awahorvy6GxNzc5y7tsM9rB/V2rZCM5GQJJ6e/cl3GXnkCj0qleH16jIU/Wm5dAk++UQf\nxHNy4JVXICJCgrgQjyIBXNM2AJ/kN9db07SdSqkWBV3cPRLAhRBCFEe3jpwgbnY0hxeuIP1WImVq\nOOE2uA9uA3tTqnJFY5cnnhPTzsSz9noK09wcqVfa2tjlPFcuX9YH8W++0QfxAQP0QbxmTWNXJkTR\nIwFc0zRVRJZIlwAuhBCiOMvOyODUqg0c/GYxF7fuRDM1pWbHtriHBVPjJV9MTE2NXaJ4ht3N0RG6\n/wJWphqz6lfFQkZZPHVXruiD+NdfQ3b2X0G8Vi1jVyZE0VHsA3i+J2tae6XUhgKsJ18SwIUQQgi9\nxFNniZsdw+H5y7h7/QalqlTCbVAf3Af1waZaFWOXJ55Re5LuMvroFfpUtiW0mqw5UFCuXIFPP9UH\n8aws6N9fH8SdnY1dmRDGJwE8v5M17U+l1IsFWE++JIALIYQQeeVkZXHmh80cjIrm3MZYAKq386Z+\nWDA1O/tjam5u3ALFM+ezU9fZdOM2X7hXoU4pK2OX81y7elUfxGfN0gfxl1+G//s/CeKieJMAnt/J\nmrZPKdWoAOvJlwRwIYQQ4uFSzl8ibk4Mh+Z+S+rla5So4IBrSBDuoX2xc5ZVn8STSc3OYfD+C9iY\nm/Kle1XMTWQbvIJ27dpfQTwj468gXru2sSsTovBJAM/vZE3zVEr9VoD15EsCuBBCCPF4uuxszm7Y\nSlxUNGfW/YTKycHJtyXuYf1w7tYeM0tLY5coirjfE+/wf8euMqCKPQOq2hu7nGLj2jX473/hq6/0\nQbxfP30Qd3ExdmVCFB4J4PdO0DRL4FWgFaCAHcA3SqmMgi9PTwK4EEII8c+kXrnGoXlLiZsdQ8q5\ni1iVtcN1QE/cw/pRtq50r4mHm3TyGrG3UvnKvSo1S8qXNoXp+nV9EP/yS30QDw7WB/E6dYxdmRAF\nTwL4vRM07VsgA1iceygYsFZK9Sng2gwkgAshhBD/G6XTcf6nHcR9s5hTqzehy8rCsVUz3MOCcenZ\nCfMSsu2UyCs5K4fBBy5Q3sKML9yrYKrJUPTCFh8Pn30GM2dCejr07asP4i+8YOzKhCg4EsDvnaBp\nR5RS9R53rCBJABdCCCH+vbvxNzm8YDlxUUtIPHkWyzI21H25O+5hwZRv4Grs8kQR8sutVMafuMZg\np7L0dbQzdjnFVnw8TJkCM2ZAWpo+iH/wgQRx8XwqLgH8STZ6PKBpWtN7DzRNawzsK7iShBBCCFEQ\nSu/uVQ4AACAASURBVJQvR9NRrzPw+HaCYldQs5MfcbNjWNQwgCXNOnJwdjSZqXeMXaYoAtqULUVr\n+5IsvJjA+buZxi6n2CpfXr9/+LlzEB4Oq1dDvXr6oelHjxq7OiHE/+JJesAPAfWAM7mHagBHgSxA\nFca2ZNIDLoQQQhSMtIREji5aycGoaG4dPo55qZK80Lcr9cOCqdCkAZoMPy62EjOzGXTgAlWtLPjc\nzVGGohcBN2/qe8S/+ALu3oXevfU94vUKbVyqEAWnuPSAP0kAr/Wo55VSp59qRfmQAC6EEEIULKUU\nV3/fy8GoaI4vXUP23TQcGtTDPawfdft1w8q2jLFLFEbw043bTDp1ndeqlaNnZVtjlyNy3bwJkZH6\nIH7nDgQF6YO4q8wkEc8wCeD3n6RprkDr3IfblVKHC7Sqv5EALoQQQhSejOQUjkavIi4qmvh9hzCz\ntsKlVyfqD3mZyi2aSK94MaKU4oPjV9mXnMY39aviaG1h7JLEfW7d0gfx6dP1QbxXL30Qd3MzdmVC\n/HMSwO+doGlvAkOB73MPdQFmKqW+LODaDCSACyGEEMZxfe9BDkYt4Vj092TeTsW+bm3qhwVTb0BP\nrMvKPtHFwc2MbAYfuECtkhZ8Vs8RE/kCpsi5dQs+/1wfxG/f1gfxMWMkiItniwTweydo2kGghVIq\nNfdxKWCnUqp+IdQHSAAXQgghjC0z9Q7Hl/1AXNQSrv7+J6YWFjh3f4n6YcFU9W6BZvIk67qKZ9X6\n+BSmnI7nrRoOBFaU6QhFVUKCPohPm6YP4j176oO4u7uxKxPi8SSA3ztB0+KAxkqpzNzHlsAepVSh\n/VGWAC6EEEIUHTfijhIXFc2RRSvJSErGtlZ13MOCcQ0JomQFB2OXJwqAUorRR69w5HY6sxs6UcHS\n3NgliUdISICpU/VBPCUFunfXB/EGDYxdmRAPV+wDuKZpZkqpbE3TwoG+wMrcp7oBMUqpzwqpRgng\nQgghRBGUlZbGyZU/EhcVzaVffsfEzIxagQG4hwVTzb8NJqamxi5RPEXXM7II3X+BeqWtmFy3sqwF\n8AxITNQH8alT9UG8Wzd9EG/Y0NiVCfEgCeCa9ue9LcY0TWsGtMp9artSanch1QdIABdCCCGKuoTj\np4ibHcPh+ctIu5mATbUquA3ug9vA3pSuUtnY5YmnZPW1ZL44e4N3a5XnpfI2xi5HPKHERH1v+NSp\nkJwMXbvC2LESxEXRIgFc0/YppRoVcj35kgAuhBBCPBuyMzI4vXojB6OiubBlO5qJCTU6+OIe1o+a\nHXwxMTMzdoniX9Apxcgjlzl9J5M5DZwoZymf57MkKUkfxD//XB/Eu3TRB/FGReJf/KK4kwCuaZeA\nyIc1VEo99LmnTQK4EEII8exJOnOeQ3NiODRvGXeuXqdU5Yq4DgzCfXBfytRwMnZ54n90OS2TIQcv\n0qiMNR/VqSRD0Z9BSUn6FdM//1z/68BAfRB/8UVjVyaKs+ISwB+1ZKkpUAoo/ZAfIYQQQoiHsq1Z\njVYTRjPkwh90+X4uDg3r8cekGcyu1YIV7YI5sWItOZmZxi5T/EOO1hYMrFqW3xPv8vPNVGOXI/4H\ntrb6ueDnzsH48fDLL9C4sT6I791r7OqEeL490RxwY5MecCGEEOL5kHLxMofmLuXQnBhuX7yCtUNZ\nXEOCcA/ti71LLWOXJ55QjlIMP3SJS+lZzG3ghJ2FDEV/liUnwxdfQGSkfr54p076HvEmz31fpChK\niksPuMwBF0IIIUSh0+XkcH7TNg5+s4TTP2xG5eRQxcuT+mHB1O7RATMrK2OXKB7j/N1MXjt4AU+7\nkoypU8nY5YinICXlryCekAAdO+qDeNOmxq5MFAcSwDXNXimVUMj15EsCuBBCCPH8Sr16ncPzlxE3\nO4bkM+exsrelXv8euIf1o5xrHWOXJx4h+nICcy8kMMalIm3KljJ2OeIpSUmBGTNgyhR9EO/QQR/E\nmzUzdmXieVZcAvhD54AXlfAthBBCiOdbqUoVaP7+MAaf3EHPLd9Szb8N+79cyAI3X6JbBHJo3lKy\n7tw1dpkiH70r2+FS0pLpZ2+QnJVj7HLEU2JjA//5j36O+MSJsGsXNG+uD+K7dv27a8+fP5+PP/74\nqdT5rDp37hxr1qwxPP7www+pW7cu3t7eeHt7k5Oj/7P0559/0rJlS1q0aMH8+fMfer3IyEjatGlD\ny5YtGTBgAFlZWaSlpeHv70+rVq3w8PBg/fr1j60rKyuL2rVr5/v5XLp0CW9v7weOb9y4EQ8PD7y8\nvOjQoQO3bt0CYPjw4Xh4eODh4cHkyZPztElISMDe3p7Fixc/tqbn0aMWYRNCCCGEKDSaiQnV2ram\n07df8erlvXh99gHpCUlsHPQOsyq/yJbXR3P9zzhjlynuY6ppjKxVntvZOXx57oaxyxFPWenS8P77\ncPYsTJoEf/wBHh7w0kvw++/Gru7puRd4C8vfAzhAREQEsbGxxMbGYmpqCsCwYcNYvHgxsbGxTJ8+\nncTExHyv9+abb/LLL7/w66+/ArBp0ybMzMyIiopix44drF27luHDhz+2rq+//poXXnjhH72WunXr\nsm3bNrZt20anTp2YOnUqAG+88Qa///47O3fuZPXq1Zw+fdrQZtKkSbRo0eIf3ed5IgFcCCGEEEVO\nCYeyNHn3NQYe3UbvX77DuUsAh+cvZ3Hj9ixu8hIHvl5ERsptY5cpgJolLQl2tOOnm6n8lnDH2OWI\nAlC6NIwere8R/+QT2LMHPD2hfXv47beHt8vJySE4OBgvLy9Gjx6Ns7NznufvfxwaGkpsbCwA48aN\nw9PTk+bNm7Nu3TpA30vcr18/AgMDadiwIceOHcv3nrGxsTRr1gwfHx8GDhwIQFxcHH5+fvj6+hIU\nFERaWhoA1apVY+jQoXTp0oWsrCxCQ0Px8fGhVatW/PHHHwCMHDkST09PfHx8WLp0KQBOTk68+uqr\neHh4MHLkSIB82yulCAwMJDY2lrt37+Lp6cnZs2eJjIxk3bp1eHt7szd32flPP/2UVq1aMX36dAAy\nMjK4c+cONWrUwMLCgtatWxtq+jsLCwsAlFLodDqcnZ0xNzenevXqAFhbW2Nioo99y5YtY/DgwQCM\nHTuWyEj9ztKpqamsX7+eHj16GK6bmppKx44d8fPzY+LEifne28nJCUtLSwAsLS0xM9MvyFi7dm0A\nTExMMDMzM3ypcOHCBa5evUqTYrzCnwRwIYQQQhRZmqZRpXVzXlo4nVev7MX3i4/RZWWx5bXRzKrU\niI2D3+XK73t52Jo2onAEO9pTo4QFU8/Ek5otQ9GfV6VKQXi4vkf800/1W5a1aAHt2uUfxFevXo2N\njQ3btm2jc+fOZGdnP/Ye+/fvZ/v27ezcuZONGzcyYsQIdDodAA4ODqxZs4bw8HBmz56db/vvvvuO\njz/+mK1btzJnzhxA3xs7d+5cfv75Z1q2bGk4fvXqVUaPHs3atWuZM2cOzs7ObN26lZUrVzJixAgA\n1q9fz/bt29m6dSu9evUCID4+nnHjxvHbb7+xdu1aUlJS8m2vaRpz5sxh1KhRDB48mBEjRlCjRg3e\neecdOnbsSGxsLI0bN2bYsGEcOHCAzZs3s2bNGrZv386tW7ewtbU1vC5bW1sSEh4+Q3jChAm4uLiQ\nkJBA1apV8zw3YsQIwsPDAQgKCsLExIThw4ezb98+w+v873//+0AveVRUFK1atWLLli20bNnykZ/b\n9evXmTFjBq+//nqe40uWLKFmzZqGLwPGjRtHRETEI6/1vJMALoQQQohngpWdLY3eHEj//ZsJ3rWW\nF/p25fjSNcR4BrKwvh9/fjGX9MQkY5dZLJmbaIyqVZ7ErBy+Pn/L2OWIAlaqFIwape8R/+9/Yd8+\nfRAPCND3kL/5JqxZAydPnqRp7hLqzZs3R9O0h17z3pdox48fx8PDA03TsLW1pXz58ty8eROAxo0b\nA/pe13tzjf9u1KhRrFmzhn79+jFv3jwADh8+zIABA/D29iYmJoZr164B4OjoiJOTE6DvJV+6dCne\n3t707t2b5ORkACZPnsygQYMICQnh6NGjhnYVK1bUf0FYpQqJiYkPbe/g4EBAQAAHDhwgKCgo35rL\nli2LpmlYW1vTvXt39uzZg729PUlJf/19lpycjL29/UPfv4iICE6cOEGNGjXyzBf/6KOPsLGxMYwG\nAAgPD2fatGlERESgaRrXr19n3759+Pv757nmiRMnaJa78l7z5s0Nxzt16oS3tzcrVqwAICUlhZ49\nezJr1izKly9vOG/Lli3MmzePWbNmGd5jTdOoW7fuQ19HcSCbNgohhBDimaJpGpWaNaJSs0b4fP4h\nx2K+52BUNFvf+oDt4ROo3bMD9cP64dj60f/gF0+XSykrgirb8u2VJLzKlqKJbQljlyQKWMmSMHIk\nvP46zJoFH30Emzfrn5s3D4YNc+bcuS0MHjyY3bt3PzBSpUyZMly7dg0HBwf2799P//79cXFxISoq\nCqUUycnJxMfHU65cOYA8f54fNuqlbNmyzJgxA6UULi4u9OrVCzc3N2JiYqhUSb9dXmZmJoBhWDSA\nq6srzs7Ohh7hzMxMlFL4+fnRuXNnduzYwZgxY1i5cuUDf68opfJtD3Do0CF27txJYGAg06dP5623\n3sLCwiLPaICkpCRsbW1RShEbG0tISAhWVlaULFmSCxcuUKlSJXbs2MHYsWPzfc3p6elYWVmhaRpl\nypShRAn9n70ZM2Zw8uRJFixYYDhXp9PxxhtvMG/ePN577z02b95MXFwcN27coH379ly+fJmMjAwa\nNGhA7dq12bNnD23btmX37t2Ga6xdu9bw67S0NLp160ZERESekL5r1y4++OAD1q9fj7W1NQB79+7l\n+PHjtG/fnlOnTlGyZElcXFwMIb+4kAAuhBBCiGeWRelS1B/yMvWHvEz8/kMcjIrm6OLvOLr4O+zq\n1KJ+WDD1BvSihENZY5daLAyoas+vCXeIPB3P7IZOlDCVwZbFQcmS8O67cOIEfPON/tjdu5CS0pXE\nxOV4eXnRtGlTw1zhe8LDw/H398fV1dXQc9qoUSNatGiBp6cnOp2OKVOmGOYvP4nIyEg2bdqETqfD\n398fGxsbZs6cSUhICFlZWQC8//77D/T2hoWFMWzYMHx8fABo0qQJEydO5KWXXgL0IXfMmDEPvW9+\n7cePH8+QIUNYvHgxTk5OBAQE0Lp1a9zd3Tl9+jQ9e/Zk7NixTJkyhePHj6OUwtvbmw4dOgAwbdo0\n+vbti1KKoUOHYmdnl++93333XQ4fPmyY/z1u3Dji4+N5++23DfPXAX766ScmTJhAQEAAISEhpKWl\nERERwaeffoqfnx+gX6X+0qVLdO7cmdu3bxMUFMTmzZtxc3PL994zZ87kwIEDTJ48mcmTJ+Pv709E\nRIRhnnnXrl0BmDJlCiEhIYSEhAD6Of3Ozs7FLnzDI/YBL0pkH3AhhBBCPKmsO3c5vvwH4qKiubJz\nDybm5jh3a0/9sGCcfFuh/YN/zIt/7vDtNIYfukznCmV4q6aDscsRhWjNGujTB9LSQNNg+XIIDMzC\n3NycX3/9lUmTJuXpPRXifsVlH3AJ4EIIIYR4bt08fJy42dEcWbiC9IQkytSshvvgPrgO7E2pShWM\nXd5z68tzN/juajJT6jnSoIy1scsRhWjNGv1w9PXrISICjh7twc2bN8nIyODrr7+mQYMGT+1e4eHh\neVYGt7CwYNOmTU/t+kVNQkIC3bt3z3MsMDCQd955x0gVPV0SwIsQCeBCCCGE+Dey09M5+d164qKW\ncDH2NzRTU2p28qP+kH5Ub+eNyX1zQcW/l56jY8iBiwB806AqVjIUvdgZPBjmz4ft2/ULtAnxOBLA\nixAJ4EIIIYR4WhJPniFudgyH5y/jbvxNSletjNugPrgN6oONk6Oxy3tu7E++y8gjV+hRqQyvV5eh\n6MXN7dvQoIF+KPqBA/qV04V4FAngRYgEcCGEEEI8bTmZmZz+YTNxUdGc27QNgIpNG2JeqgSlq1ZG\nMzGhXv8eOPk8ev9b8XDTzsSz9noK09wcqVdahqIXNzt2QJs2EBr61+JsQjxMcQngsgq6EEIIIYol\nUwsLXHp0xKVHR5LPXeTQnBj2zZxPRqJ+/140jZIVHXBs1QxTc3PjFvuMCqtWjl2Jd/nsdDyz6lfF\nQhbAK1ZatYL33oPJk6FTJwgMNHZFQhif9IALIYQQQuTKzsjga8fGpN9K1I+dVQore1ucu71EnV6d\nqOrbUsL4P7Q76Q7vH71KX0c7BjvJdnDFTWYmNG8Oly/DoUOQu9uYEA8oLj3g8jWkEEIIIUQuM0tL\nfL/4GIB28z6ny/dzqfGSLyeW/cDK9v2YVbEhG0NHcm5jLDm5ewqLR2tqW5L2DqVZejmRE6npxi5H\nFDILC1i8GFJSICwMnoG+PyEKlPSACyGEEELcR5eTw/4vF9Bw6CuG1dGz09M5t+kXTiz7gdNrNpF5\nO1XfM961PS69OuHUtpX0jD9CanYOg/dfwMbclC/dq2Juohm7JFHIpk6FESNg9mz9CulC/F1x6QGX\nAC6EEEII8Q8YwvjyHzi9OjeM29ni3E3C+KPsTLjDmONXGVDFngFV7Y1djihkOh34+8OuXfpV0WvV\nMnZFoqgpLgFchqALIYQQRpKUlMTChQsBuHbtGp6envj4+JCZmfnE13jzzTdp06YNa9asYfHixTRr\n1ozx48czefJk4uLiHtquX79+/1PN06dP/5/aPUlbZ2fnB46lpKTQokULvL29adasGT/99NMTn6OU\nYtiwYbRu3ZpOnTqRkJAAQEJCAp06daJ169YMGzaMf9oZYWZlhXNgAB0WfcHr8QfosnoeNTr6cmLF\nOr576WVmVWjIhkHvcHbDVhmmfp8W9iXxLVeKJZcTOHMnw9jliEJmYqLfF9zMDAYMgJwcY1ckhHFI\nD7gQQghhJOfOnSM0NJQtW7YQExPDsWPHGDdu3D+6houLCydOnACgXbt2zJo1ixo1ahREuYA+JJ86\ndapA2ub3vE6nQ6fTYWZmxpkzZ+jduze7d+9+onM2bNjA8uXLmTNnDgsXLuTIkSNMnjyZ0aNH4+rq\nSv/+/Rk0aBBBQUG0b9/+f3pN98vOyOD8pm0cX/YDp9dsJjPlNlZ2ttTq2o4693rGLSz+9X2eZclZ\nOQw+cIHyFmZ84V4FU02Gohc30dHQrx9MmAD/+Y+xqxFFifSACyGEEKJARUZGsnfvXmrXrs2YMWNY\nuHAhoaGh+Z67bds2vLy88Pb25rXXXjP07l68eBFvb2++/vprdu3aRXBwMCtWrCAkJIQdO3YAMG3a\nNJo3b46Pjw8LFiwA/uptTk5OJigoiLZt2+Lr62sIwN7e3gwfPpyAgADatm1LRkYGkZGRXL58GW9v\nb+bMmcP8+fPp2rUr3bt3x83Nje3btwMQFxeHn58fvr6+BAUFkZaW9kDbhxkxYgReXl68/PLL6HQ6\nTExMMDPT75qakpJC/fr1H2jzsHO2bdtGp06dAOjcuTPbtm175PF/y8zSklqd/+oZ77pmHjU7teXk\nyh/5rkN/ZlVsxIaBIzi7/mdy/sEoh+dJGXNT3qrhwIk7GSy7kmTscoQR9O0LvXvD2LHw55/GrkYI\nI1BKFfmfxo0bKyGEEOJ5c/bsWdW2bVullFLz5s1TH330Ub7n6XQ61bBhQ5WUlKSUUmr48OHqhx9+\nUEopVatWLcN5Xl5e6uLFi0oppV555RW1fft2FRcXp9q0aaOysrKUUkplZ2fnaffee++pmJgYpZRS\n+/fvVz169DBca9WqVUoppcLCwvK937x581SXLl2UUkr9+uuvhratW7dW58+fV0opNXXqVPXFF188\n0DY/1apVUzt37lRKKRUaGmq4/6VLl1TLli2Vg4ODoY6/y++csLAwtXXrVsN7WKdOHaWUUi4uLkqn\n0ymllPr555/VkCFDHlnXv5WVnq5O/bBJ/dh/mJpuU0d9RmX1hW1dtT5kuDq9bovKzsgo0PsXRR8e\nu6La/3ZKnbtT/F67UOrWLaUcHZWqW1epu3eNXY0oKoA9qghkz4L+MTP2FwBCCCGEeLSbN29y7tw5\nunTpAkBqaip16tR5orZHjhyhVatWhh5i09xVve+Ji4tj27ZtzJo1C8BwHkDjxo0BcHJy4tatW/le\nP79zDh8+zIABAwBIT0/Hz8/viWrVNI1mzZoB0Lx5c44fPw6Ao6MjO3bs4Ny5c3h7e9OpUydCQ0M5\ndeoUPXv25M0338z3HHt7e5KS9L2sycnJ2NnZAWBnZ0dycjK2trYkJydjb1+wC4KZWVpSq5M/tTr5\n64epb/6FE8vXcvK79RyevwxL2zI4d22HS69OVPNrXSyGqQ+r4cD+lAtMOR3P526OMhS9mLG3h3nz\nICAA3n9fv0K6EMWFBHAhhBDCSCwsLMjOzn7seeXKlaNmzZqsXbuWUqVKAZD1hIt7ubq68tVXX5GT\nk4OpqalhWPf9z3t6etKtWzeAPAvAafeFIpW7Zsz9bR92jpubGzExMVSqVCnPNf/e9u+UUuzZs4fm\nzZuze/du2rdvT0ZGBpaWlgDY2NhQunRpAGbPnm1o97BzvLy8WLVqFV27duXHH3/Ey8vLcPzHH38k\nODiYH3/8ke7duz+yrqfpYWH81KoNf4XxLgG4BHV+rsO4vYUZb1R3YPKp63x/LZkelWyNXZIoZP7+\n8NZbMG0adOoET/g9nRDPPJkDLoQQQhhJxYoVsba2pkePHuQ8YklgTdOIjIwkMDAQHx8f2rZty9Gj\nR5/oHq6urnTp0oUWLVrg6+vLokWL8jwfERHBsmXL8PX1xcfH57Erld8L699+++1Dz5k5cyYhISH4\n+vri6+trmGP9uLZmZmasXLkSLy8vbt++TWBgIIcOHaJNmzb4+PjQpUsXpubTVfawc9q1a4e5uTmt\nW7dmyZIljBo1CoDw8HCWLFlC69atMTc3JyAg4JGvuaDcC+MvLZjGa9f3023tApy7BHDq+42s6jiA\nryo0ZEPIcM6s2/JczhlvW64UHnYlmHvhFlfSZbX44mjyZHjhBQgJgcREY1cjROGQVdCFEEIIIYqQ\n7IwMLmzZru8Z/34jGckpWJaxoVaXAOoEdaaaf5vnpmf8ZkY2gw5cwLmkBZ/Vc8REhqIXO3v3gocH\n9OwJMTHGrkYYU3FZBV0CuBBCCFGEHDlyhKFDh+Y5NmTIEIKDg41U0dP3888/M378+DzHxowZg6+v\nr5EqKrpyMjM5v2U7J5b98EAYd+nViWr+bTDLHX7/rFp/PYUpZ+J5q4YDgRXLGLscYQQTJsD//Z9+\ni7K+fY1djfh/9u49vufy/+P4473NTmzOOdUcNoe1lYkIYzPCN4diLeRYhk5qnfNNSkSpCBX5kWEO\n8fWtHEpR380xUpbDcipjE2JsDpsdr98fHz6RkcM+xva832678fl83tf7ut4f/nnuuq7XVVgUwG8g\nCuAiIiJS3NnD+NmZ8dS0IhHGjTG88usfJJw4zdQgHyq5lSjsIcl1lpMDISGQkACbN8NttxX2iKQw\nKIDfQBTARURERP6Sm5XFvu9Ws+PszPjZMN75XlsYbxtyU4XxQ5nZRMbv43Yvd972r3pecT8pHn77\nDerXty1H//Zb+IeajVIEKYDfQBTARURERPJnD+Nnqqlnpqbh6u1lq6Z+E4XxLw+mMXHPYZ73vYV/\n3eJd2MORQjB1KgwYYDuW7JlnCns0cr0pgN9AFMBFRERE/tm5Yfy3L77h9LFUXL298O18r62A2w0c\nxvOM4YVt+/ktPYtp9X2o4KbTcosbY+D++20z4D//DLffXtgjkutJAfwGogAuIiIicmVys7LY9/0a\ndsxffEEYrxPRiRptW+Li7l7YwzzP/owsBm5OokFpD0bUraKl6MXQoUNwxx1QrRqsXw9FpOC/XAYF\n8BuIAriIiIjI1cvNzmbfd6ttBdw+X2YL416l8O3cljoP3Vhh/D9/HGPy3hSG+FWidUWvwh6OFIIv\nv4QHHoAhQ2DUqMIejVwvCuA3EAVwERERkYKRm51N0pmZ8QvCeERHarQLKdQwnmsMUVuTST6dzaf1\nfSjrqqXoxVFkJEyfDnFxEBxc2KOR60EB/AaiAC4iIiJS8OxhfMESdn/+NaeP3hhhfG96Fo9t3kfT\nsiUZVrfKde9fCt+JExAUZNsX/ssv4KXFEEWeAnhB3NyyygBTgUDAAI8CGcBkwB3IAZ4wxmy41H0U\nwEVEREQcKzc7m6T/rT0zM/5XGK/VyXa0Wc32odc1jM/Zf5RP9x1lWJ3KtCxf6rr1KzeONWugZUt4\n5BFbhXQp2hTAC+LmljUDWGWMmWpZlivgCcwHxhljvrYs6z7gJWNM6KXuowAuIiIicv2cDeM7Fyxh\n13+/4vTRVEqUKmmfGb8eYTwnzzB4azKHs3KYVt+H0iWcHdqf3JhefdW2D/yLL2wV0qXoUgC/1htb\nVmkgHqhlzunEsqxvgE+NMZ9ZltUD6GSMefhS91IAFxERESkc54Xxz7/mdMoxWxjvdK+tgFu7EEp4\neDik799OZfLEliRalffildqVHNKH3NiysuCeeyA5GbZsgUr6b1BkKYBf640tKwiYAiQA9YGfgGcA\nH+AbwAKcgGbGmL35tB8IDATw8fFpuHfvBZeIiIiIyHWUm51NUuw6ds5ffGEYj+hIjfahBR7GZySl\nMCv5GCPrVeGesiUL9N5yc0hIgLvugnvvhUWLQKfTFU0K4Nd6Y8tqBPwANDfGrLcsazxwHCgNxBlj\nFlqW9RAw0BjT5lL30gy4iIiIyI3FHsbPLlM/E8ZrdWxD3Yc6FVgYz84zPL45iRM5uUwL8qGUi5ai\nF0fjx0NUFEyZAgMGFPZoxBEUwK/1xpZVGfjBGFPjzOsWwCtAMFDGGGMsy7KANGOM96XupQAuIiIi\ncuPKy8khKXYtO+bnE8YjOlLjX62uKYzvOHmawVuSaXeLN8/73lKAI5ebRV4etGsH69ZBfDz49Lqn\nXQAAIABJREFU+RX2iKSgFZcA7uSoGxtjDgJJlmXVPfNWa2zL0f8AQs68FwbsctQYRERERMTxnFxc\nqN6mJW2njOHxg/E8uHwu/g8/wL4Vq1gUPoBJFe9kSY8n2PXfr8jOyLji+9ct5U5E1TJ8/edxfkpN\nd8ATyI3Oycl2LniJEtCnD+TkFPaIRK6OwwL4GYOB2ZZlbQaCgFHAAOB9y7J+OfN6oIPHICIiIiLX\nydkwfu8nY3jswCZbGO/Z5fww3v3xKw7jfW8rx23uJRj7+5+k5+Y58AlubgcPHqRp06a0atWKzMxM\nwsPDCQ0NZcOGDfTs2fOi7ZYtW8asWbOuuL/4+HhWrlx5VWP9p7axsbFERkbaX996K0yaBOvWjaFm\nzSY0b96cwYMHk9+K3qioKO655x7uuece3n77bfv7v//+O506dSIsLIw+ffrY3x81ahTNmzcnLCyM\nxMTEq3oekcvh0GPICoqWoIuIiIjc3PJyckiKO7NnfOFXZBw5SomSntTq2MZ2tNm/wijheell6ttO\nZBC1dT+dKpXm6VoVr9PIby5z585l+/btDB8+nAMHDtC9e3fi4uIc1l90dDTJyckMHTq0wNvGxsYS\nExPD1L8dAt6p0y6WLavNunUwZsxDDBo0iNatW593za5du6hduzZ5eXk0b96cmJgYfH19ue+++5g2\nbRpVqlSxX7t9+3aeeuopVqxYwcqVK/n444+ZN2/eFT+PXBstQRcRERERKSBOLi5Ub92Ceye/Y5sZ\nXzEP/15d2ff9GhY/OJBJt9zJ4m6PsXPhUrLT858ZD/DyoEuV0iw6lMYvaVe+lL0oGjJkCCEhITRt\n2pQZM2YwfPhwZs6cSWRkJAMHDmTz5s2EhoZy8uRJ/M5snD527Bjh4eGEhITQqlUrDh48SHR0NCNH\njgQgLi6OkJAQQkNDeeyxxzDGkJiYSMOGDenVqxd33XUXH3zwAQBjx45l2rRphIaGsn//fkJDQ4mK\niqJt27a0bt2azMxMACZOnEiLFi1o2rSpPVD/vW1+fvvtN7p06UJQUBALFiwAYObM2lSqBL17g7Oz\nGy4uLhe0q127NgBOTk64uLjg7OzM3r17SU9P55lnniEkJISFCxfan7dDhw4AtGzZkl9++aVA/m1E\n8nPh/1YREREREQc6G8art25B6w/fInnlD+yYv5hd//2anfMX4+LpYS/gVvO+1ufNjD9yW3nWHT3F\n+7/9yZT6t+HuXAzmk7y94cSJv157ecHx4yxbtoxjx44RFxdHeno6TZs25eWXX2b//v0MHTqUxMRE\nIiMjWbFixXm3Gz16NG3btmXQoEEA5OX9taTfGENUVBSxsbGULl2aZ599lqVLlxIYGMiBAwdYtWoV\nTk5O+Pv7ExUVxXPPPXfBLHZoaCgffPABAwcOZPny5fj6+rJs2TJWrlxJXl4eLVq0oEuXLvm2/bvD\nhw+zfPly0tPTadSoEeHh4ZQt68SMGdCmTRwnTx5gzpyWF20/e/ZsatWqRY0aNVi3bh2bNm0iISEB\nLy8vmjVrRlhYGCkpKVStWtXeJjc397L/aUSulAK4iIiIiBQaJxcXfMKC8QkLtodx29Fm+YdxD08P\nnve9hRcS/iA66SiP1ahQ2I/geOeG73Neb9myhbi4OEJDQwHIzMwkJSXlH2+3detWBpxzlpeT01+/\nxDhy5AiJiYncf//9AJw8eZK6desSGBiIv78/np6eADg7X/w4uIYNGwLg4+NDSkoKGRkZJCQk0KpV\nKwCOHz9OUlLSP44ToEGDBri4uODt7c0tt9zC4cOHqVSpEhUrbqZy5VdITl7M8uUWnp6r7UF+yZIl\nlCpVihUrVjB9+nQWL14MQLly5bjjjjuoVq0aAEFBQezatYty5cqRmppq7/NSzyZyrRTARUREROSG\ncG4YDzsbxv8+M96hNXUiOtKhfkMWHkilZfmS3O517eeN33Rq1yagRg3aVqvG+I8/htq1ycrOZs6c\nOSQnJ1+yaWBgILGxsfZl2ufOgFeoUIFatWrZQyxAdnY2+/fvx3aC8PlcXV3J+VtJ8nOvM8bg7+9P\ngwYNWLhwIZZlkZ2dTYkSJUhISLig7d/Fx8eTk5NDRkYGhw4domLFiuzevZtHH32U2NiFdO1agUce\ngS1bgomNjbW3W79+Pa+99hpff/01HmeOwPPz8yM9PZ0TJ07g4eFBQkIC1atXx8vLi6ioKKKioli7\ndi3169e/5JhEroUCuIiIiIjccJycnfFp1RyfVs3/CuNnCrjtXLAEypej1Oxo3tqUyZQGNSjpVbKw\nh3x9BQRw3+rVrE1JIbRuXawSJbi1cmVaBwdD6dKXPKdryJAhPProo8TExODs7MycOXPsn1mWxdix\nY+ncuTPGGJycnBg3bhze3t753qt58+Z8+OGHbN26lQ8//DDfawIDA2nTpg0hISE4Ozvj4eHBokWL\nLmhbuXLlC9pWrVqViIgI9uzZw8iRI3FyciIqKorU1FQGDeqLhwfs2PEijz/egXnz4Gz279+/PwAP\nPPAAAO+//z4NGzZkzJgx/Otf/yI7O5sBAwZQqVIlKlWqRHBwMM2bN8fV1ZVp06Zd1j+ByNVQFXQR\nERERuWFFR0fTtWtXewD08PAgqG490g+nUL3MraRPnYnv7LnsXzaPgyWgUg0fZs2eTbly5c67z8yZ\nM/nwww9xd3enatWqzJgxAzc3N9LT03n66afZs2cPubm5fP7555QtW5Zly5YxfPhwAN544w3atWt3\nwdgmTJjA008/Ddhmao8fP07Llhffj3zVLrIHnLw82LEDVq2C1attP3v22K4pWRKaNoXgYGjRApo0\nsb1XBI0eDf/+N8TEwCVOWpMbXHGpgq4ALiIiIiLXLDc31yF7Z0NDQ4mJieHWW28FbMuId+/eDUBe\nbi5vrkvgy/XrcX5nJD0OZ/NziWwya1XlrTffpFaHNpQoaduz/Pvvv1O9enWcnZ156aWXqFu3Lv37\n9+fll1+mdevWtG3b9rxnadCggf2M6pCQEH7++ecLnu/csVzLcVwFKjkZ1qz5K5Rv3gzGgIsL3HWX\nLZCf/al44xzl9tJLL7Fhwwb7a1dXV7799tvLapubCy1bwrZttsf18XHUKMWRiksA1xJ0EREREbkq\niYmJREREUK9ePVxcXDh16hQpKSkYY5gyZQq+vr707NmTpKQkXFxcGD58OD4+PoSHh+Pv709CQgJ9\n+vQhKiqKtLQ0BgwYcF77ffv2ER8fT0REBI0aNWLixIkcPHiQkJAQypcvz9ixY3n+ntv5bOrH3PrK\nv+l6VyC1Zi3g+VlTWNLtcVw83O17xmt1aGMP0G5ufx1dtWLFCrKzsxk1ahQhISEMHz6c3bt3U7Nm\nTcqUKQNAjRo12L17N3Xr1rU/+9ixY+3HbvXu3Zvx48dz4sQJVqxYwezZs+nZsye33347O3fuxN3d\nnXnz5tn3VDvUrbdCt262H4C0NFi71hbGV62Cjz6CsWNtn9Wt+9cMeXAw1Kr11xru62zMmDFX3dbZ\nGWbNgvr1oV8/WLECnIpBcXy5SRljbvifhg0bGhERERG5sezZs8dUqFDBpKWlmZdfftnMnTvXGGNM\nfHy8CQ8PN0eOHDHNmjUzeXl5xhhjcnNzzZ49e0yVKlXMqVOnTEZGhqlRo4YxxuTb3hhjQkJCTFJS\nkr3Pw4cPG2OMWbZsmQkLCzPGGNO5Tz9z14cxZsa+FJOXl2fq1q1r9sWuNcufGGI+rlTfvEdV84FH\nLfNleKRZ+v6HpuFdd5mMjAxjjDGurq5m8eLFJi8vzzz44IPm66+/NmvWrDF9+/a199mnTx+zdu3a\nC57f19fX/vfp06ebESNG2F+HhISYOXPmGGOMGTlypBk3btw1fNMF6PRpY9asMebtt43p2NGYMmWM\nsc2RG1OlijEREcaMH2/Mzz8bk5NT2KO9ItOm2R5j7NjCHolcDWCjuQGyp6N/NAMuIiIiIlctMDAQ\nb29v+5FYkydPBsDFxYXy5cszYMAAevfujaenJ8OGDQPI9zir/Nrnp0IF27Fj7dq148knn7Tdr0ol\nSllZxCQf5Q4rC6dS3njfFcjI14dA3dIMfvUxyuxIYv1nnzN54Qz6ulXm216DqRPRkXJly9K+fXss\ny6Jdu3Zs3ryZzp07n3csVVpaGuXKlWPo0KGsXr2a4OBgRo4c+Y/fTePGjQFo0qQJCxcuvOLv1iHc\n3KBZM9vPyy/b9pEnJPw1Q756NSxYYLvWy8u2j/zsDHmTJuBx41acf+QRWLQIhgyBe++FwMDCHpHI\nhbQ4Q0RERESu2tkAHRAQwEsvvURsbCyxsbF89dVXZGdn06tXL2JiYmjZsiXjxo0DyPc4q/zaw/nH\nXJ08eZLc3FwANm/ebA/jISEhHF4bRx7Qe9oc0v0bsCg1i+/+9z9i4+IIH/wY9d94li9rluKT/5tC\naGRv/lizkaXdn6DKkZOMuzec7fO+ZP26dfj5+VG7dm327NnD8ePHOX78OHv27MHPz4+RI0cSGxtr\nD9/nnp+d33FcZ2sY/fjjj9SpU6egvvKC5eRkS6qPPQazZ8Pevbaf2bOhVy/44w947TVo1cpWXb1p\nU3jpJVvSvYwzx6/GwYMHadq0Ka1atSIzM5Pw8HBCQ0PZsGEDPS9RZe2bb5Zx772zKF3aNvTMzMvr\nLz4+3r7f/0r9U9vY2FgiIyMveH/t2rXccccduLu7X/TYuDFjxtCkSROaN2/O4MGDMefU7srOzqZ2\n7drn/SJo1KhRNG/enLCwMBITE6/qecTxNAMuIiIiItfs1Vdf5bHHHmPixIkYY+jQoQM9evSge/fu\nODs7k5WVxYQJE66o/QsvvEDXrl3p378/zZo1o1OnTgwaNAgvLy8sy+KTTz4BbLPhixcvZs7jPXDx\nLEnAsHeZkXyM+X+k4u/lTqCXB0veHEryH3/wTkw0AD3ffIJude+gyrTZjJw3g0mxy7jF2Y0Oxyx2\nZTnz5rDX7ZXPR48enW+BuaZNm9KlSxe6deuW73Fc69atY8qUKbi6ujJ//vwC/sYdyMcHHn7Y9gNw\n9Ohf+8hXr4bx4+Hdd22f3X77+fvIq1e/5n3k//vf/2jbti3Dhw/nwIEDHDlyhLi4OABmz5590Xbt\n27e3D79zZ3j9dXj77X/uLz4+nuTk5KuqYH+1bQMCAli3bh0dO3a86DVdunThpZdeAuChhx7i+++/\np3Xr1gB88skn1KtXz37t9u3b+f7771mzZg0rV67klVdeYd68eVf8POJ4qoIuIiIiIje95YeP887u\nP+2vO1XyxgmLrScy+D09C4Nt6advSTcCvNwJPBPMK7i5kJebyx9rfmTHmXPGTx04hIu7OzX+1Yq6\nD3WiVsc2uJa6siO8/l69vUjJyICNG/9asr5mje1YNIBq1f4K4y1aQECArUraJQwZMoS1a9eSlZXF\nY489xujRo8nMzKR169YcOnSI1atXU79+fZYsWUJQUBC7d+/m2LFjREZGcuTIEZycnJg7dy7Lli2z\nV6Lv1CmOJUuGERRk0aRJPSZNmsTevXvzLQB45513cuLECapXr24voBcUFERCQgK5ubl89dVXuLm5\nMXHiRObPn09OTg79+/cnMjLygrbVqlU779liY2MZPnw4ZcqUYc+ePbz66qtERETYP7/c/ye9e/cm\nMjKSkJAQTp48Sbdu3YiIiLA/7yeffEJ6ejrPPvssYNvm8euvv17FP27hURV0EREREZGbRFgFLwDa\nVPBixZEThFXwwvnMTOypnDx+PXmarccz2HbiNMv+PM4XB9MAqOTmQqCXOwF1/Al8uwEh497g4Lqf\n2DF/MbsWfsXuz7+2h/E6ER3x7dgGV6/rUM38RubhYQvXLVrYXufmwtatf+0jX7kSzs6+li5t22/e\nogWMGGEL72d5ebFs/nyOHTtGXFwc6enpNG3alJdffpn9+/czdOhQEhMTiYyMZMWKFecNYfTo0bRt\n25ZBgwYBkJeXZ//MGMO+fVHUqBFLamppnJ2fZenSpQQGBnLgwAFWrVqFk5MT/v7+REVF8dxzz11w\nhFxoaCgffPABAwcOZPny5fj6+rJs2TJWrlxJXl4eLVq0oEuXLvm2/bvDhw+zfPly0tPTadSoEeHh\n4edtX/gncXFxHDhwwD7L/u677xIVFcX+/fvt16SkpFC1alX767NbNeTGowAuIiIiIjc9Z8vi3ore\nAPY/zyrp4kSjMp40KmMr/JaTZ/gtPZNtJ2yhPD4tg++OnLRd6+zE7RVuI/Dl57hnxBC8tmxl74Il\n7PzP0isK47GxsY572BuNs7PtDLD69eHJJ2011ffu/WuGfNUq+PrrC9udOGEvvhcaGgpAZmYmKZex\nt3zr1q0MGDDA/vrcQHvkyBH27UukZs372bQJPvvsJHfcUZfAwMB8CwDmp2HDhgD4+PiQkpJCRkYG\nCQkJtGrVCoDjx4+TlJT0j+MEaNCgAS4uLnh7e3PLLbdw+PBhKlWqdMF1u3fvtu8Xnzp1Kn5+fmze\nvJlXXnmFxYsXY1kWhw4dYtOmTQwfPpzo6Gh723Llyp1XOPBSzyaFSwFcRERERIoVFyeLuqXcqVvK\nna5VymCM4WBmDltPZJwJ5aeZnnoUAOcS5ak9YCABzz6N7/5knBZ9xR8xC9n9+dc4u7lR82wY73Sv\nZsbPsiyoUcP207u37b0jR6BixQsuDQgIoG3btowfPx6ArKws5syZc9HCZGcFBgYSGxtL7dq1gfNn\nwCtUqECtWrWIi1vCO++UYuRIKF8+G9ifbwHA/AronXudMQZ/f38aNGjAwoULsSyL7OxsSpQoQUJC\nwgVt/y4+Pp6cnBwyMjI4dOgQFfP5HgD8/PzO+8XN7t27efTRR1m4cKG94OCWLVs4fPgw7du3Z//+\n/WRmZlK/fn1CQkKIiooiKiqKtWvXUr9+/UuOSQqPAriIiIiIFGuWZVHFvQRV3EvYZ8+PZ+eScPI0\n206cZtvxDBYfOk6Wsxd06UbV7j2pmXEK740/s3P2Anb1fAoXhfFLOxMg/+6+++5j7dq1hIaGYlkW\nt956q73Q2KUMGTKERx99lJiYGJydnZkzZ479M8uyGDt2LJ07dyYvz+Dl5cTAgeP49lvvfO+VXwG9\nvwsMDKRNmzaEhITg7OyMh4cHixYtuqBt5cqVL2hbtWpVIiIi2LNnDyNHjsTJyYmdO3fyxBNP8Msv\nv9CjRw8efvhhHn/88fPaRUVFkZqaSt++fQF48cUX6dChA23atAEgOjqa5ORkOnXqBEBwcDDNmzfH\n1dWVadOm/eN3KIVDRdhERERERP5Bdp5h16lM2yz5cVswT82x7bMtafKo+sd+3P+3Es/V6yj3+x58\nWzen7kOdFMbP5e0NJ0789drDA9LTHd7tr7/CXXdBWBgsWXLNRdrFQYpLETYFcBERERGRK2SMYf/p\nbLaeOG3fS550OhsA59xcyuzajffGnyj/63bqVypLUMfW1Op0L27eXoU88htAVhY0bgwHD9qKt11k\ndrwgTZwITz8NkyfDmbptBe6ll15iw4YN9teurq58++23jumsCFIAv4EogIuIiIjIjS41O5dt9n3k\nGew4cZrcM9OtJRP3Um5bAnXIodkdtWnSviXupfNfEl0s/PIL3H03dOkCn33m8O7y8qB9e9uJafHx\ncGbruNxAFMBvIArgIiIiInKzycrLY8fJTLYez+CnfQfZkWXIcHMDwPXoMarsT6LSvn3UyjhFlbRj\n3NGzCz6tmhfyqK+jt96CoUNtAfyhhxze3f79cMcdUKeOrTi7i6ph3VAUwG8gCuAiIiIicrPLM4Z9\npzJZs3kHG/ce5HdXT05VrQKAU2YmtUwOjX1vJcDLndu93PFyKeJHSeXk2M4I//1321L0fAqYFbT5\n86FbN3jzTXjtNYd3J1dAAfwGogAuIiIiIkVNbnY24wNac6BaNU42bYJT967sTs8i14AFVPdwJdDb\nnUAvDwK83Kns5pLvMVo3tV9/hQYNoF07+OKL61IhrVcvmDcP1q2zrYKXG4MC+A1EAVxEREREiqJf\n537BVw8/yX1zPsK/xwNk5Oax4+Rpe3G3bSdOk55rO+O6fAlnArw9CPRyJ9DLHd+SbjgXhUD+/vvw\nwgswYwb06ePw7lJTbUvRS5aEn38GT0+HdymXQQH8BqIALiIiIiJFUV5uLvEfzyDoib44OV+45DzX\nGPamZ7HlxGl7gbdDmTkAuDtZ+JdyJ9DbnQAvD/xLuVPSxel6P8K1y82F0FDYssW2FP3WWx3e5fff\nQ+vW8OSTcJGjv+U6UwC/gSiAi4iIiIjYHM7MsZ1HfsI2U/77qUzyACegpqcrgV4eZ0K5O7e4lSjs\n4V6e336DO++E4GBYtuy6LEV/7jkYNw6+/tpWIV0KV3EJ4Dfhr8hERERE5EqlpqYyc+ZMAA4ePEjT\npk1p1aoVWVlZl32Pp556ipYtW7Jo0SJiYmJo3Lgxb775Jm+//TZbtmy5aLuePXte1ZgnTJhwVe0u\np62fn98F723atInmzZvTsmVLwsLC+P333y+45ptvvuGee+4hJCSE++67j5SUFAByc3N54YUXaNOm\nDaGhoSQkJADw888/07x5c5o1a0Z0dPRVP8+5Krq50KqCF0/VrMjkO2/ji8a1eMe/Kj1vLYt3CWe+\nOXyct3Yd4uGf9/LwT4m8tfMgXx5M5bdTmeTeqJNvvr7w7rvw7bfwf/93XbocNQoCAuDRR+HMP6OI\nw2kGXERERKQYSExMJDIykhUrVjB37ly2b9/O8OHDr+gederUYefOnQC0a9eOyZMnU7NmTUcMF7CF\n5N27dzukbX6fHzx4kJIlS+Ll5cVXX33F3LlzmTVr1nnX7Nu3j0qVKuHm5sbHH3/MgQMHGDFiBJMm\nTcLZ2ZmBAweed33z5s2JiYmhWrVq3HPPPXz33XeULVv2qp7pcuUaw++nMtl6ZoZ86/EMUrJzAfB0\nduJ2+7J1d+qVcsfD+QaZk8vLg7Zt4YcfbMvRHfh/66z4eGjcGO6/31YhvShsqb9ZFZcZcJ1+JyIi\nIlIMjB07lp9++onatWsDkJOTw/79+5k6deoF18bFxTFs2DAsy6JevXpMmjSJp59+mqSkJEJDQ+nR\nowfr16/n4Ycf5vnnn2fJkiVERkYSHBzM+PHjmTNnDp6envTr14++ffvaw25aWhoDBgwgJSUFYwxT\npkzBz8+P0NBQgoKCSEhIIDc3l6+++oqPPvqI/fv3ExoaSu/evXF2duaLL77AycmJnTt3MmnSJFq0\naMGWLVt49tlnycvLo0KFCsyYMYNJkyad17Z///75fifPPvssP//8M7fddhszZ86k8jnHYLm5ueGS\nz0HRPj4++V6zYMEC+6qCgIAAxo4dizGGU6dO2X9J0aJFCzZs2EC7du2u/h/yMjhbFrVLuVO7lDtd\nqoAxhkOZOWw7cZotZ5auz0g6isG2HNavpJutsJu3rdp6eddCighOTvDppxAYCI88Ytuo7eTYXw4E\nBcGIEfDKKzB7tq1CuohDGWNu+J+GDRsaEREREbl6e/bsMa1btzbGGDN9+nQzYsSIfK/Ly8szQUFB\nJjU11RhjTFRUlFm8eLExxhhfX1/7dSEhISYpKckYY0zfvn3NqlWrzJYtW0zLli1Ndna2McaYnJyc\n89q9/PLLZu7cucYYY+Lj4014eLj9Xp9//rkxxpgBAwbk29/06dPN/fffb4wxZs2aNfa2LVq0MHv3\n7jXGGPPBBx+YiRMnXtA2P9WrVzdr1641xhgTGRlp798YY06ePGnuueces23btou2P3jwoAkKCjKH\nDh0yxhhTp04de9/PP/+8mTRpktm/f78JCQmxtxk2bJiZM2fOJcd1vZzIzjHrj5400/YeMc9uTTL3\n/bDbtF67y7Reu8v0+mmPGb3zoFl8MNXsOXXa5OblXd/BTZtmDBjzwQfXpbucHGOCg43x9jYmMfG6\ndCn5ADaaGyB7OvpHM+AiIiIiYnfkyBESExO5//77ATh58iR169a9rLYJCQkEBwfbZ4Wd/1bVe8uW\nLcTFxTF58mSA82aYGzZsCNhmmFMusiE3v2u2bdtGnzNHV50+fZo2bdpc1lgty6Jx48YANGnShB07\ndgCQnZ1Nt27dePnll7n99tsB6NixIydPnuSpp57iwQcf5Pjx4zz44INMnjyZW265BYBy5crR/kwl\nr/bt2/Pf//6Xfv36kZqaau8zLS2NcuXKXdb4HK2UizONy5akcdmSAGTnGXafymTbiQy2njjNxrR0\nVhw5AYCXsxO3e7kTcOZM8rol3XBz5LL1Rx6BhQtt09Lt28Nl/v+7Ws7OMHOmrQZc377XZeJdijEF\ncBEREZFiwNXVlZycnH+8rkKFCtSqVYslS5ZQqlQpwBZKL0dAQACTJk0iNzcXZ2dn8vLycDonyQQE\nBNC0aVO6dOkCcF4BOOuczbfmTI0ip7+loPyuCQwMZO7cuVSpUuW8e/697d8ZY9i4cSNNmjThxx9/\npH379uTl5dGrVy8eeOABHnjgAfu1S5Yssf89IyODLl268Oqrr9KkSRP7+6GhoWzcuBE/Pz/7n+7u\n7pQsWZJ9+/ZRpUoVVq9ezeuvv37JcRWWEk4W/l7u+Hu58yC27+eP09lnlq3bjkBbvy8dABcLapd0\nO6faugdlSlx4hNpVsyxbIbbAQOjXD1avtqVkB6pZEyZMsBVkGzcOnn/eod1JMabf7YiIiIgUA5Ur\nV8bDw4Pw8HByc3Mvep1lWYwdO5bOnTvTqlUrWrduza+//npZfQQEBHD//ffTrFkzwsLCLihg9uqr\nrzJ//nzCwsJo1arVP1YqPxvW582bd9FrPvroI/r160dYWBhhYWHExcVdVlsXFxcWLlxISEgIJ06c\noHPnzvz3v/9l6dKlxMTEEBoayuDBg/Pt75dffuHtt98mNDSUt956C4CXXnqJefPmERoayoYNGxg0\naBAA48ePp0ePHoSEhPDEE084vABbQbEsi2oerrS9xZvnfW/h06DqLGxUkxF1qxBepQxqiCFyAAAg\nAElEQVROlsUXB1N5fcdBHty4h36b9vLu7kN8/edxkjKy7L8guWpVq9oO6P7hB3jvvYJ5qH/Qrx90\n6QL//retBpyII6gKuoiIiIiIXLGsvDx2nsw8cx65rbjb8Zw8AEq7OBHgZSvqdoe3O34l3XF1uniJ\n8ejoaLp27Yq3tzcAHh4ethUG27bR+9gx+sfHYwICePrpp4mPj6d06dLMnDnzgiX9M2fO5MMPP8Td\n3Z2qVasyY8YM3Nzc7J+Hhobi5+dnLz4YHR3NlClTsCyLiRMnctttd3HHHVCpEmzYABkZqSxatMi+\nzSE2NpZy5cpx5513Fuh3KcWnCrpmwEVERESKqYSEBEJDQ8/7mTNnTmEPq0B9//33Fzzj999/X9jD\nummdu3rC1cmJQG8PulUry4h6VVnYqCafBvnwXK2K3FO2JHszsvi/fSk8vXU/92/4naityUzde4Qf\njp3iePb5qzCio6M5fvy4/XW1atWIjY0lNiGB/uXKQZ8+fLN0Kenp6axatYqHHnqIMWPGXDC+4OBg\n1q1bx8qVK/Hx8SEmJsb+2ZIlS/Dy8rK/PnbsGBMmTCA2NpaYmBiefvppKlaEadNg82YYNgxSU1OZ\nOXOmvU1sbCybN28ukO9SiiftARcREREppm6//XZiY2MLexgOdXZpuly9xMREIiIiqFevHi4uLpw6\ndeq8o+R8fX3p2bMnSUlJuLi4MHz4cCJ8fJgXGY5v3brEb02g/gMPkhPRlzm7ktg2+lWy01Jxd4J+\no8dRJu1PNsXHExERQaNGjZg4cSIHDx4kJCSE8uXLM/bNN6nx+OPEvfMOHZ97DoBOnToxadKkC8Za\nq1Yt+9/PPSYuLy+Pjz76iGeeeYb//Oc/AGzYsIEWLVrg6upKzZo1OXHiBJmZmXTo4MagQfDuu7B7\nt+34vtDQUAYMGEB0dDQeHh5MnTqV7777jrp169KpU6fzjrP7p/oDUrwpgIuIiIiIyCUlJiby3Xff\nMWrUKIKCgujevTu//PILr7zyCp988gl79+5l9erVWJZFXl4e+/bt48CBA6xatQonJyf8/f3Z8+ZQ\nXpj1Ie16due2th357sefiH7rDW5/ayJOtepSfuj7VPCryX/+SGX51u3c7VON75Z/S/8xY/iuZ09S\n5syhbO/eAJQpU4Zjx45ddLzbt29n2bJlrFq1CoAZM2bQtWtX3N3d7dekpKSctye/TJkyHD16lCpV\nqvDee/Ddd/DDD89Rv34CsbErANi1axd+fn70OnNgeE5ODg899BDjxo1jwIABLFq06LwCfiJ/pwAu\nIiIiIiKXFBgYiLe3d75HyZUvX54BAwbQu3dvPD09GTZsGAD+/v54enoCfx1J9+u2raxZtRK3mZ8C\ncLuLC1PuvI373Utwh7cHu05lsuroKQDcD/1OvVsD2frbHjZEz6HU0qWkjhxJbp8+LEr+k7Jly7Jo\nzx+8/8jDWMDIkSMJDg4mOTmZvn37Mm/ePNzd3Tl9+jSzZ89m2bJlrF692v5M5cqVy/eYuMjISHbv\n3k3nzg/ywQcduVTJrIsdZydyMQrgIiIiIiJySWcDdH5HyWVnZ9OrVy/69etHTEwM48aNY/Dgwecd\nG3dWfu1dXV2pUsqTQbeVpUaNGiQeTeX3HItfT2Wx8qdNZJby5t/JJ0gZ+i5fL17A1vnLmLfnd076\n3cmEg+kMXbCIeyvaircdOXKE8PBwJk+ejK+vLwB79uwhNTWVjh07cvToUQ4cOMDUqVMJDw9n6NCh\nZGdnc+DAAUqVKoWbm5u9QBuAMX8wblwOCxdCePiFx/nld5ydyKWoCrqIiIiIiFxUYmIikZGRrFix\ngrS0NB577DEOHTqEMYYOHTrQo0cPunfvjrOzM1lZWUyYMIEKFSrY2wD4+fmxe/fufNu/8MILTJ48\nmQULFtCsWTM6derEoEGD8PLywrIsxoz7gBK+ddmcms6EQY+yd38ylpc3AcPepUTpsiy/x9ce9p96\n6im++OIL/Pz8AOjduzf9+/e3P8vZgmtnQ/ann37K1KlTsSyL8ePH06jR+UW4MzPzuOWWDpw+7cms\nWU9Qs6Y3UVFRVKlShfnz51OnTh26du3K+vXrqVatGrNmzbL/skKuTHGpgq4ALiIiIiIiN4fjxzl5\nx53E16jNqDfGkuXuwct+t9hnwB1hxw5o0ABCQ2HpUjh3Yv/sLxbk2hWXAK4SfSIiIiIicnPw9sbj\n02kEr1zB0i+m8rLfLYRV8Prndtegbl1bRfSvv4YzW99FrpoCuIiIiIiI3DScW7eGJ5/EmjCBe7f9\njHM+e80L2hNPQLt28PzzsHPnX+9r9luulAK4iIiIiIjcXN55B3x94ZFH4MQJh3dnWfDpp+DhAb16\nQXa2w7u8qOjoaI4fP25/7eHhQWhoKKGhoUybNg2wFYcbPHgwLVq0sBef+7uZM2fSuHFjWrZsSffu\n3cnMzAQgIiKCZs2a0aRJE6Kjo89rs3PnTkqUKHFeNXm5MgrgIiIiIiJycylZEmbMgL174cUXr0uX\nVavCJ5/Ajz/CqFH/fH1ubq5DxvH3AF6tWjViY2OJjY21F5z75ptvSE9PZ9WqVTz00EOMGTPmgvsE\nBwezbt06Vq5ciY+PDzExMQCMGjWKtWvXEhcXx8iRIzl9+rS9zYgRIwgJCXHIcxUXOoZMRERERERu\nPs2b29aEv/cedOliWyPuYA8+CL17w4gR8K9/wZkjwO0SExOJiIigXr16uLi4cOrUKVJSUjDGMGXK\nFHx9fenZsydJSUm4uLgwfPhwfHx8CA8Px9/fn4SEBPr06UNUVBRpaWkMGDDgvPb79u0jPj6eiIgI\nGjVqxMSJEzl48CAhISGUL1+esWPHUqNGDeLi4ujYsSMAnTp1YtKkSRc8S61atex/d3Nzw8XFFg1r\n164N2I5cc3Z2tleYX79+PZUrV1aV92ukAC4iIiIiIjenESNspcn794etW6FMGYd3OXEixMXZlqJv\n2mSbjD9XYmIi3333HaNGjSIoKIju3bvzyy+/8Morr/DJJ5+wd+9eVq9ejWVZ5OXlsW/fPg4cOMCq\nVatwcnLC39+fqKgoRo8eTdeuXc9r/5///IegoCBiYmK49dZb7f1VqFCBb775hv79+/Pdd9+RkpJC\n2bJlAShTpgzHjh276PNs376dZcuWsWrVqvPeHz16NN27d8fNzQ2At956i+nTp/P8888X4LdZ/CiA\ni4iIiIjIzcnd3bYUvWlTiIqCv+1ZdoTSpW1dhoVBRATUqgVt20LnzrbPAwMD8fb2ZsuWLcTFxTH5\nTOl0FxcXypcvz4ABA+jduzeenp4MGzYMAH9/fzw9PQHsM8z5tc9PhQoVAGjXrh1PPvkkAOXKlSM1\nNRWAtLQ0ypYty8mTJ+2z4iNHjiQ4OJjk5GT69u3LvHnzcHd3t99z5syZbN68mblz5wKwdOlSGjVq\nRPny5QvmSyzGFMBFREREROTmdffdMGQIjBwJXbv+lYQdKDQU7r8fvvjC9nr6dJg7F+68868AHRAQ\nQNOmTenSpQsAWVlZZGdn06tXL/r160dMTAzjxo1j8ODB9mXe58qvPdiWhufk5ABw8uRJPDw8cHZ2\nZvPmzfYwHhISwueff84DDzzAV199RUhICKVKlSI2NtZ+/yNHjhAeHs7kyZPx9fW1v//ll18yZ84c\nFi1ahJOTrWRYfHw8sbGxrF27li1btrB9+3Y+++wzqlevXnBfajFhGWMKewz/qFGjRmbjxo2FPQwR\nEREREbkRZWXZNmQfPAjbtsF1mKnt0QPmzfvr9ZNPwgsvJBIZGcmKFStIS0vjscce49ChQxhj6NCh\nAz169KB79+44OzuTlZXFhAkTqFChgr0NgJ+fH7t37863/QsvvMDkyZNZsGABzZo1o1OnTgwaNAgv\nLy8sy2LChAnUr1+fvLw8Bg8ezObNm/H29mbmzJkXzF4/9dRTfPHFF/j5+QHQu3dv+vfvT6lSpahX\nrx6lSpUCYPbs2VSrVs3erl+/fkRGRhIcHFyg36dlWT8ZYxoV6E1vQArgIiIiIiJy8/vlF9tseNeu\n5ydjBwkJgZUrbX/39LTNgF+Hyfciq7gEcB1DJiIiIiIiN7/69eH11+Gzz2D+fId2tW0brFply/pP\nPqnwLZdPM+AiIiIiIlI05ORAs2bw+++2quiVKzukmwcfhG+/hT17rstq92JBM+AiIiIiIiI3ExcX\nW4nykydh0CBwwGTjpk2wcCE8+6zCt1w5BXARERERESk6/P3hrbdg0SKYNavAbz9sGJQtawvgIldK\nAVxERERERIqWqCgIDoann4bk5AK77fr1sGQJvPgilClTYLeVYkQBXEREREREihZnZ4iOhuxs6N+/\nwJaiv/YaVKwIgwcXyO2kGFIAFxERERGRosfXF95911Yt7f/+75pvt3IlLF8Or7wCZ47IFrliqoIu\nIiIiIiJFU14etG0LP/wAW7ZAzZpXdRtjbOd+794Nv/0GHh4FPE5RFXQREREREZGbmpMTfPqp7c9H\nHrEF8quwYoXt3O9XX1X4lmujAC4iIiIiIkWXjw988AHExcHEiVfc3Bjb3u/bboPISAeMT4oVBXAR\nERERESnaHnkE7rsPhgyBnTuvqOnSpbbq58OGgZubg8YnxYYCuIiIiIiIFG2WZSvE5u4OfftCbu5l\nNcvLswVvX19bM5FrpQAuIiIiIiJFX9Wq8OGHtoJs7713WU0+/xw2bYLXX4cSJRw8PikWVAVdRERE\nRESKB2MgIgIWL4affoLAwItempsL9evb/ty61Xa0uDiOqqCLiIiIiIgUJZYFkyZB6dLQpw9kZ1/0\n0s8+g23bYPhwhW8pOArgIiIiIiJSfFSsCJMn29aWjxqV7yU5OfDGG3DnnfDgg9d3eFK0KYCLiIiI\niEjx0rUr9OwJI0fCzz9f8PGsWbBrF7z5pu0IcZGCoj3gIiIiIiJS/Bw7BgEBUK6cbT/4mTPGsrKg\nTh3bRPmGDbZV6+J42gMuIiIiIiJSVJUtC1On2jZ6v/GG/e1PP4W9e22T4wrfUtAUwEVEREREpHi6\n7z7o3x/GjIEffuD0aVvwbt4c2rYt7MFJUaQALiIiIiIixdfYsXDrrdC3L59MzGL/fs1+i+MogIuI\niIiISPHl7Q3Tp3NqZzKjXs8kLAxCQwt7UFJUKYCLiIiIiEjxFhbGh01n82eGFyO6birs0UgRpgAu\nIiIiIiLF2vHjMGbH/fzLM5Zm74fDiROFPSQpohTARURERESkWPvgAzh61GLEhDKQmAgvvljYQ5Ii\nSgFcRERERESKraNH4f33oUsXaNg/CJ5/Hj75BL75prCHJkWQAriIiIiIiBRb779vW3E+fPiZN0aM\nAH9/2/FkqamFOjYpehTARURERESkWDp8GMaPh27d4I47zrzp7g4zZsDBgxAVVajjk6JHAVxERERE\nRIqld96BjAx4442/fXD33TBkiC2IL1pUGEOTIkoBXEREREREip0DB+Cjj6B3b6hbN58LXnsN6teH\ngQMhJeW6j0+KJgVwEREREREpdkaNgpwcGDbsIhe4utpmwI8ehSefvK5jk6JLAVxERERERIqVfftg\nyhR49FGoVesSF9avD6+/Dp99BvPnX7fxSdGlAC4iIiIiIsXKyJG2P4cOvYyLX37Ztif8iSdshdlE\nroFDA7hlWWUsy/qPZVnbLcv61bKspmfeH3zmvW2WZY1x5BhERERERETO+u03+PRTGDQIbrvtMhq4\nuNiWop88aWtkjMPHKEWXo2fAxwPLjDH1gPrAr5ZltQLuB+obYwKA9xw8BhEREREREQDefNO2vXvI\nkCto5O9v2zS+aBHMmuWwsUnR57AAbllWaaAlMA3AGJNljEkFHgfeNsZknnn/T0eNQURERERE5Kzt\n2yEmxlZTrUqVK2z8zDMQHAxPPw3JyQ4ZnxR9jpwBrwkcBqZblrXJsqyplmWVBOoALSzLWm9ZVpxl\nWXfn19iyrIGWZW20LGvj4cOHHThMEREREREpDt54Azw94aWXrqKxszNER0N2NvTvr6XoclUcGcBd\ngLuAScaYBsAp4JUz75cD7gFeBOZblmX9vbExZooxppExplHFihUdOEwRERERESnqNm+2FTN/5hm4\n6njh6wvvvgvffgv/938FOj4pHhwZwJOBZGPM+jOv/4MtkCcD/zU2G4A8oIIDxyEiIiIiIsXcsGFQ\nujQ8//w13uixx6B1a3juOdizp0DGJsWHwwK4MeYgkGRZVt0zb7UGEoAvgFYAlmXVAVyBI44ah4iI\niIiIFG8bN8KXX8ILL0DZstd4MycnWxl1Jyd45BHIyyuQMUrx4Ogq6IOB2ZZlbQaCgFHAp0Aty7K2\nAvOAvsZoA4WIiIiIiDjGa69B+fK25ecFwscHPvgA4uJg4sQCuqkUBy6OvLkxJh5olM9HvRzZr4iI\niIiICMCaNbBsGYwZA15eBXjjRx6BhQtt55n9619Qp04B3lyKKkfPgIuIiIiIiBSa116DSpVsR48V\nKMuyFWJzd4e+fSE3t4A7kKJIAVxERERERIqk77+H//0P/v1v2/FjBa5qVfjwQ/jhB3jvPQd0IEWN\nAriIiIiIiBQ5xthmv2+9FQYOdGBHPXpAeLitzPrWrQ7sSIoCBXARERERESlyvvkG1q6FoUNtq8Qd\nxrJg0iTbGWd9+kB2tgM7k5udAriIiIiIiBQpxtiCd82atlppDlexIkyeDJs2wahR16FDuVkpgIuI\niIiISJHy5Zfw00+2VeGurtep065doWdPGDkSfv75OnUqNxvrZjiCu1GjRmbjxo2FPQwREREREbnB\n5eVBUBBkZsK2beDi0IOX/+bYMQgIgHLlbL8BcHO7jp3f3CzL+skYk98R1kWKZsBFRERERKTIWLAA\ntmyBN974K3z7+fld8X2OHDlCt27dCAsLo23btgAYY3jqqado2rQpd999N3PnzgUgOjqakSNHQtmy\nMHWqLfm/8cZ595s5cyaNGzemZcuWdO/enczMTAAiIiJo1qwZTZo0ITo6+rw2O3fupESJEqxevfqK\nxy83JgVwEREREREpEnJy4PXXITAQunW7tntFRUUxbNgwvv/+e7799lsAtm3bxrZt21i3bh3ff/89\nQ4cOvbDhffdB//4wZozteLIzgoODWbduHStXrsTHx4eYmBgARo0axdq1a4mLi2PkyJGcPn3a3mbE\niBGEhIRc24PIDeV6LsgQERERERFxmDlzYMcOWLAgjz59+pCUlMRdd90F2GapFy5cCEBycjITJkyg\nRYsW9OvXjxIlSvDHH3+QkpLCokWLKF++PFu3buX999/nt99+o1u3bjzxxBNUrVoVV1dXsrOzOXHi\nBOXKlbP3vX79ejp16mS79+jRtFi+HPr2tRVm8/SkVq1a9mvd3NxwOTM9X7t2bQBcXV1xdnbGsiz7\n/SpXroyzs/N1+e7k+tAMuIiIiIiI3PSys2H4cGjQAJycvqRkyZLExcXx4IMPkpOTc+aabBYvXszn\nn3/Os88+a28bEBDA0qVL6dy5M/Pnz+fPP/9ky5YtPPPMMyxfvpw5c+bw66+/UrZsWWrXrk2dOnUI\nCgo6bwb8vHsPHQrTp8POnfDvf583zu3bt7Ns2TK6/W2KfvTo0XTv3h23M/vG33rrLV555RVHfV1S\nSDQDLiIiIiIiN71nn4Xff4fXXoNdu3bSuHFjAJo0aWKfVb777rsBqFGjBmlpafa2DRs2BMDHx4ff\nfvuNsmXLUrVqVerXrw9AaGgoW7ZsISkpif3797N7927S0tJo0aIF7du3z/feJxs3pmPVqjB+PCN9\nfQkePJjk5GT69u3LvHnzcD/ncPKZM2eyefNm+57ypUuX0qhRI8qXL+/Ir0wKgWbARURERETkpvbF\nF/Dxx7a/v/cepKbW5uwpSj/++CNnT3766aefANi3bx/e3t729mcDOtgKrbm7u1OrVi2SkpLs7fz8\n/DDGULZsWZydnfHy8iIrK4vc3Nx8712qVClid+4k1s+P4HHjOJKYSHh4OJMnT8bX19fe35dffsmc\nOXOYNWsWTk62eBYfH09sbCzt27dn+fLlvPDCC+zdu9cRX51cZ5oBFxERERGRm9qKFXD2dOWMDEhL\nu5+0tP8QEhJCkyZN7PutPT096dChA3/88Qfjxo275D3Hjx9Pr169yM7OJiwsjLvuuovc3Fzmzp1L\ncHAwmZmZDB48GE9Pz4vfu2RJiI6GFi14o2NH9qem2pe+9+7dm/79+9OzZ0/q1atnr7Q+e/ZsXn31\nVV599VUA+vXrR2RkJNWrVy/gb00Kg84BFxERERGRm9qiRdCjB6Sng6cnzJ0LnTuff010dDTJycn5\nVy53tBdftE3NL1sG7dpd//5vAjoHXERERERE5CbQubMtdD/5ZP7hu9CNGAH+/rbjyVJTC3s0Uog0\nAy4iIiIiIuJoP/4ITZtCr162ZelyHs2Ai4iIiIiISMG4+24YMgRmzLCtmZdiSQFcRERERETkenjt\nNahfHwYOhJSUwh6NFAIFcBERERERkevB1dU2A370qG3DuhQ7CuAiIiIiIiLXS/368Prr8NlnMH9+\nYY9GrjMFcBEREZH/b+/eo+ys63uPvz9J5A5BClIkauUiJxKBAlJzgmYCxzSiJW1BWw+3YhAQlJsL\nMUe80HKQgicG6TkURQhQvFBaFEFDAJNAXQIC4RKKUpRokxJDIBRSMCTke/7YDzBMZhIIyd6Tmfdr\nrVkzz2//9vN8Z6/f+s189vN7ni1J7XTGGa1rwk84ARYu7HQ1aiMDuCRJkiS107BhraXoS5fCccfB\nBvDJVFo3DOCSJEmS1G4jR8I557TuiH7llZ2uRm1iAJckSZKkTjj5ZNh/fzjpJJg/v9PVqA0M4JIk\nSZLUCUOHwrRpsHw5TJrkUvRBwAAuSZIkSZ2y885w/vkwYwZ84xudrkbrmQFckiRJkjrp+OPhwAPh\ntNPg0Uc7XY3WIwO4JEmSJHXSkCFw6aWt70cfDStXdroirScGcEmSJEnqtLe+FaZOhdmz4e/+rtPV\naD0xgEuSJElSf3D00fDBD8JnPwsPP9zparQeGMAlSZIkqT9IWjdi22QTOOooeOGFTlekdcwALkmS\nJEn9xQ47tJag3347fOUrna5G65gBXJIkSZL6k49+FA45BL7wBZg7t9PVaB0ygEuSJElSf5LARRfB\n8OFw5JGwfHmnK9I6YgCXJEmSpP5mu+3g7/8e5syBc87pdDVaRwzgkiRJktQf/fmfw2GHwdlnwz33\ndLoarQMGcEmSJEnqry68sHU2/MgjYdmyTlej18kALkmSJEn91RvfCJdcAg8+CF/6Uqer0etkAJck\nSZKk/uygg2DSJDjvvNbHk2mDZQCXJEmSpP5uyhQYMQKOOgqefbbT1WgtGcAlSZIkqb/baiu47DJ4\n+GH43Oc6XY3WkgFckiRJkjYEBxwAn/wkTJ0Ks2d3uhqtBQO4JEmSJG0ozj0XdtkFjj4annmm09Xo\nNTKAS5IkSdKGYvPNYdo0mDcPTj+909XoNTKAS5IkSdKGZMwY+PSn4eKL4cYbO12NXgMDuCRJkiRt\naP7mb2DkyNbHkz31VKer0atkAJckSZKkDc0mm8Dll8PChXDKKZ2uRq+SAVySJEmSNkTvfjdMntwK\n4tdd1+lq9CoYwCVJkiRpQ/X5z8Oee8Kxx8ITT3S6Gq2BAVySJEmSNlQbbQRXXAFPPgknntjparQG\nBnBJkiRJ2pDtsQd88Yvw3e/C1Vd3uhqthgFckiRJkjZ0Z5zRuib8hBPgt7/tdDXqgwFckiRJkjZ0\nw4a1bsa2dGnrevCqTlekXhjAJUmSJGkgGDkSzjmndUf0K6/sdDXqhQFckiRJkgaKk0+G/feHk06C\n+fNf9dMWLlzI6NGjGTduHMuWLeOQQw6hq6uLO++8k8MOO6zP502fPp0r1yLs33vvvdx6662v+XkA\nSfZK8r7VPN6V5JJe2jdJclWS25rvm/TS5zNJ7kjykyQXpmXTJDcl+Zcktyf5QI/njEtSSUasqXYD\nuCRJkiQNFEOHwrRpsHw5TJr0qpeiz5w5k/HjxzNz5kyefPJJFi9ezKxZs9hvv/246qqr+nzehAkT\nOOKII15zma8ngAN7AX0G8NX4K+DnVfVe4BfNdk/XVtUfVdUYYHvgAGAF8PGq2h/4EDD1xc5JApwG\n3PVqCjCAS5IkSdJAsvPOcP75MGMGfOMbvXaZPHkyY8eOZfTo0Vx++eWcddZZXHHFFRxzzDEce+yx\n3H///XR1dbF06VJ22WUXAJYsWcIhhxzC2LFjGTduHAsXLmTatGmcffbZAMyePZuxY8fS1dXF8ccf\nT1Uxb9489tlnHw4//HD23ntvpk5tZdcpU6bwzW9+k66uLhYsWACwW5KpSWYkuSXJxgBJPtWcsf5p\nkmOa8k8DJiWZlWTHvl6FJNcmuTfJh5u2scD1zc8/aLZfoar+rdvmMmBFVS2vqnlN23PAym59Pgzc\nCPxXH3W8wrBX00mSJEmStAE5/vjWMvTjjmt9AWy5JTz9NNOnT2fJkiXMnj2bZ599ltGjR3PGGWew\nYMECzjzzTObNm8cxxxzDzTff/IpdfvnLX2b8+PEc1+xv5cqXc2hVccoppzBr1iyGDx/Oqaeeyg03\n3MCoUaN47LHHuO222xgyZAgjR47klFNO4bTTTmP+/PmceeaZ3Q8xq6pOSfJ14P1JfglMoHW2ewhw\nW5JrgSnAiKo6ezWvwHbA+4HNgLuS/BPwe8CS5vGngG36enKSscAOQM/T9F8Fzmv6vAE4htZZ8UNX\nU8tLDOCSJEmSNNAMGQIvvPDKtmeeAeCBBx5g9uzZdHV1AbBs2TKeeOKJNe5y7ty5fPzjH+92iJcX\nVC9evJh58+YxceJEAJYuXcpuu+3GqFGjGDlyJJttthkAQ4cOXd0h7m6+/4ZWWN4UeCcws2nfCnjL\nGgttmVNVK4CnkyyiFcifBLZuHh8OPJlkF+DF68WPqapHkuwBnAv8SdXLa/iTfFkIE9wAAAx8SURB\nVB54uqoua5qOBf6hqp5vrURfMwO4JEmSJA0iu+++O+PHj+eCCy4A4Pnnn+db3/oW89dw07ZRo0Yx\na9Ysdt11V+CVZ8C33XZbdtppJ66//nq22GILAJYvX86CBQvoLZxutNFGrFixomdz9wvWAzwEzAEO\nqapK8oaqWp7knaw5y+6VZBitEL898DgwGzgIuLf5PruqHgG6XjpoK5Bf2hxzcbf2TwK7Akd1f0lo\nLXX/n8AewJVJPlBVv+urKK8BlyRJkqRB5KCDDmLLLbekq6uLcePGMWnSpFf1vMmTJ/PDH/6QsWPH\ncsABB7Bo0aKXHkvClClTOPjggxk3bhwHHnggDz30UJ/7GjNmDDNmzODQQw9l4cKFvfapqrnAzcDs\nJDOB7zeh+ifA+CTXJPn9Pg7xH8A/ArcBZ1bVSmAa8K4ktwHvarZ7mkrrLPnlzTXmH0zyJuACYCdg\nZtM+tKo+UVXjq2oCcD9wxOrCN0BqA/iA9n333bfuuutV3VROkiRJkgSw1VYvLTsHXroGvD9KcndV\n7dvpOtY3l6BLkiRJ0kDUT8P2upTkPGC/bk3PV9X4TtWzJgZwSZIkSdIGqao+0+kaXguvAZckSZIk\nqQ0M4JIkSZIktYEBXJIkSZKkNjCAS5IkSZLUBgZwSZIkSZLawAAuSZIkSVIbGMAlSZIkSWoDA7gk\nSZIkSW1gAJckSZIkqQ0M4JIkSZIktYEBXJIkSZKkNjCAS5IkSZLUBgZwSZIkSZLawAAuSZIkSVIb\nGMAlSZIkSWoDA7gkSZIkSW1gAJckSZIkqQ0M4JIkSZIktYEBXJIkSZKkNjCAS5IkSZLUBqmqTtew\nRkkeB369Dna1LbB4HexHA4PjQT05JtSTY0I9OSbUneNBPTkm1t7bqmq7Thexvm0QAXxdSXJXVe3b\n6TrUPzge1JNjQj05JtSTY0LdOR7Uk2NCa+ISdEmSJEmS2sAALkmSJElSGwy2AP71ThegfsXxoJ4c\nE+rJMaGeHBPqzvGgnhwTWq1BdQ24JEmSJEmdMtjOgEuSJEmS1BEGcEmSJEmS2mBAB/AkQ5PMSXJ9\ns/32JHckeSTJd5Ns1Oka1T5Jtk5yTZKfJ3koyegk2yS5Kcm/Nd/f2Ok61T5JTk3yYJK5Sb6dZBPn\nicElyaVJFiWZ262t13khLV9rxsb9SfbuXOVaH/oYD+c3fzfuT3Jtkq27PTa5GQ+/SPLHnala61Nv\nY6LbY59OUkm2bbadIwaBvsZEkk81c8WDSc7r1u48oVcY0AEcOBl4qNv23wJfrapdgCXApI5UpU65\nAJheVf8N2JPW2PgscEtV7Qrc0mxrEEiyI3ASsG9VjQKGAn+J88RgMw2Y0KOtr3nhA8CuzdexwEVt\nqlHtM41Vx8NNwKiq2gN4GJgMkOSdtOaM3Zvn/L8kQ9tXqtpkGquOCZK8BRgP/KZbs3PE4DCNHmMi\nyThgIrBnVe0OfKVpd57QKgZsAE8yAvggcEmzHeAA4Jqmy+XAn3amOrVbkuHA+4BvAlTV81X1FK3J\n8vKmm2Ni8BkGbJpkGLAZ8BjOE4NKVd0KPNmjua95YSJwRbXcDmydZIf2VKp26G08VNWMqlrRbN4O\njGh+ngh8p6qWVdWjwCPAfm0rVm3RxxwB8FXgM0D3uxk7RwwCfYyJTwDnVtWyps+ipt15QqsYsAEc\nmEprYlzZbP8e8FS3P6LzgR07UZg64u3A48BlzWUJlyTZHNi+qh5r+iwEtu9YhWqrqlpA6x3q39AK\n3v8J3I3zhPqeF3YE/r1bP8fH4PMx4EfNz46HQSrJRGBBVd3X4yHHxOD1DuC9zSVss5O8u2l3TGgV\nAzKAJ/kQsKiq7u50Leo3hgF7AxdV1R8C/0WP5ebV+kw+P5dvkGiu651I682ZNwOb08syQw1uzgt6\nUZLPASuAqzpdizonyWbA/wK+0Ola1K8MA7YB3gOcDlzdrL6VVjEgAzgwBjg4yTzgO7SWlF5AaynQ\nsKbPCGBBZ8pTB8wH5lfVHc32NbQC+W9fXB7WfF/Ux/M18PwP4NGqeryqlgP/TGvucJ5QX/PCAuAt\n3fo5PgaJJH8FfAg4rHlTBhwPg9XOtN64va/5P3MEcE+S38cxMZjNB/65ufzgTlorcLfFMaFeDMgA\nXlWTq2pEVf0BrRsf/LiqDgNmAoc23Y4Cvt+hEtVmVbUQ+PckuzVNBwL/ClxHayyAY2Kw+Q3wniSb\nNe9SvzgmnCfU17xwHXBkc6fj9wD/2W2pugaoJBNoXdJ2cFU92+2h64C/TLJxkrfTuvHWnZ2oUe1T\nVQ9U1Zuq6g+a/zPnA3s3/2c4Rwxe3wPGASR5B7ARsBjnCfVi2Jq7DChnAN9JcjYwh+aGXBo0PgVc\n1Xys1K+Ao2m9CXV1kknAr4GPdLA+tVFV3ZHkGuAeWstK5wBfB27AeWLQSPJtoAvYNsl84IvAufQ+\nL/wQOIjWTXSepTWHaADpYzxMBjYGbmpWlN5eVcdX1YNJrqb1xt0K4MSqeqEzlWt96W1MVFVffxec\nIwaBPuaJS4FLm48mex44qlkt4zyhVeTllVSSJEmSJGl9GZBL0CVJkiRJ6m8M4JIkSZIktYEBXJIk\nSZKkNjCAS5IkSZLUBgZwSZIkSZLawAAuSRqQkryQ5N4kc5P8IMnWa+i/dZIT1uI4SfLjJFs120vX\ntuZ2SLJDkhnrYb/bJZm+rvcrSdJAYgCXJA1Uz1XVXlU1CngSOHEN/bcGXnMAp/W5v/dV1dNr8dxO\nmADcuK53WlWPA48lGbOu9y1J0kBhAJckDQY/BXYESLJFkluS3JPkgSQTmz7nAjs3Z83Pb/qenuRn\nSe5PclYf+z4M+H7PxiRdSWYn+X6SXyU5N8lhSe5sjrtz0+9PktyRZE6Sm5Ns37Rvl+SmJA8muSTJ\nr5Ns2zx2eLOfe5NcnGRo8zWtOeP/QJJT+6h3AvCjHrVunuSGJPc1z/+Lpn2f5ne4O8mNSXZo2ndp\nar2veR13bnb1veb1kCRJvTCAS5IGtCRDgQOB65qm3wF/VlV7A+OA/5MkwGeBXzZnzU9PMh7YFdgP\n2AvYJ8n7ejnEGODuPg6/J3A8MBI4AnhHVe0HXAJ8qunzL8B7quoPge8An2navwj8uKp2B64B3tr8\nPiOBvwDGVNVewAu0Qu9ewI5VNaqq3gVc1sdrsVtV/WuPhyYA/1FVezYrBqYneQNwIXBoVe0DXAr8\n76b/VcD/rao9gf8OPNa03wW8t4/XQpKkQW9YpwuQJGk92TTJvbTOfD8E3NS0BzinCdMrm8e37+X5\n45uvOc32FrQC+a09+m1TVc/0UcPPquoxgCS/BF689voBWuEfYATw3ebs8kbAo037/sCfAVTV9CRL\nmvYDgX2An7XeN2BTYBHwA2CnJBcCN3Q7Vnd/BNzRS/sDtN6I+Fvg+qq6LckoYBRwU3OcobSWmG9J\nK+hf29T2u277WQS8uY/XQpKkQc8ALkkaqJ6rqr2SbEbrmucTga/ROlu8HbBPVS1PMg/YpJfnB/hy\nVV28huOsSDKkqlb28tiybj+v7La9kpf/Bl8ITKmq65J0AV9aw/ECXF5Vk1d5INkT+GNaZ90/Anys\nR5cPAKvcKK2qHk6yN63r2c9OcgtwLfBgVY3ucYwtV1PbJsBza6hfkqRByyXokqQBraqeBU4CPp1k\nGDAcWNSE73HA25quzwDdw+WNwMeSbAGQZMckb+rlEL8AdnodJQ4HFjQ/H9Wt/Se0QjTNcvg3Nu23\nAIe+WEuSbZK8rbk+fEhV/RNwJrB3L8c6ELi5Z2OSNwPPVtU/AOc3z/0FsF2S0U2fNyTZvTnbPz/J\nnzbtGzdvcgC8A5i7Ni+CJEmDgWfAJUkDXlXNSXI/8FFa1y//IMkDtK5Z/nnT54kkP0kyF/hRcx34\nSOCnzRLspcDhtJZZd3cD0AU8spblfQn4x2aJ+Y+BtzftZwHfTnIErZvILQSeqarFSc4EZiQZAiyn\ndXb/OeCypg3gFWfIk2wH/K6P5fLvAs5PsrLZ3yeq6vkkhwJfSzKc1v8MU4EHaV3PfnGSv276fxj4\nFa1l9Tes5esgSdKAl6rqdA2SJG2wmmu3r6iq96/j/W4MvFBVK5qz0Bc1N11b2/0dDoyoqnPXWZGr\nHuNWYGJVLVljZ0mSBiEDuCRJr1OSjwDT1+VngSfZFbia1uVizwMnVNXP1tX+17XmDPuYqvpep2uR\nJKm/MoBLkiRJktQG3oRNkiRJkqQ2MIBLkiRJktQGBnBJkiRJktrAAC5JkiRJUhsYwCVJkiRJaoP/\nD2Ap/+Uzoo3LAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Y6-2sm9W50JB", + "colab_type": "text" + }, + "source": [ + "# GPU Memory Usage\n", + "\n", + "Measuring the 'practical' GPU memory consumption is a bit of a challenge. By 'practical', what I want to capture is relative GPU memory usage that indicates what the likely maximum batch sizes will be. With `cudnn.benchmark = True` set, the torch memory allocator metrics didn't prove reliable. In the end, using pynvml (same output as nvidia-smi) and taking a sample part way through the validation set is the most consistent. \n", + "\n", + "I've verified the sampling by pushing batch sizes for several of the models to the point where they fail with OOM exception. The relative measures of the memory usage match the relative batch sizes -- I can roughly predict where the largest batch size will be from the measure. \n", + "\n", + "On a T4 colab instance I pushed:\n", + "- efficientnet_b2-260 to a batch size of 480\n", + "- tf_efficientnet_b2-260 to a batch size 448 (failed at 480)\n", + "- ig_resnext101_32x8d-224 to a batch size of 512\n", + "\n", + "Overall, the EfficientNets are not particularly memory efficient. The monster ResNext101-32x8d with 88M params is more memory efficient at 224x224 than the EfficientNet-B2 at 260x260 with 9.1M. This is especially true for the 'tf' variants with the 'SAME' padding hack enabled, there is up to a 20% penalty for this in memory churn that does impact the max useable batch size." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Qmr4J7-EgifY", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 340 + }, + "outputId": "d8d0db4a-ccca-4ac2-85e6-011535f29c1e" + }, + "source": [ + "print('Results by GPU memory usage:')\n", + "results_by_mem = list(sorted(results.keys(), key=lambda x: results[x]['gpu_used'], reverse=False))\n", + "for m in results_by_mem:\n", + " print(' {:32} Mem: {}, Rate: {:>6.2f}, Top-1 {:.2f}, Top-5: {:.2f}'.format(\n", + " m, results[m]['gpu_used'], results[m]['rate'], results[m]['top1'], results[m]['top5']))" + ], + "execution_count": 46, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Results by GPU memory usage:\n", + " resnet50-224 Mem: 1530, Rate: 159.51, Top-1 66.81, Top-5: 87.00\n", + " gluon_seresnext50_32x4d-224 Mem: 1670, Rate: 150.43, Top-1 68.67, Top-5: 88.32\n", + " gluon_seresnext101_32x4d-224 Mem: 1814, Rate: 131.57, Top-1 70.01, Top-5: 88.91\n", + " resnet50-240-ttp Mem: 2084, Rate: 154.35, Top-1 67.02, Top-5: 87.04\n", + " gluon_seresnext101_32x4d-260-ttp Mem: 2452, Rate: 95.84, Top-1 71.14, Top-5: 89.47\n", + " resnet50-260-ttp Mem: 2532, Rate: 135.92, Top-1 67.63, Top-5: 87.63\n", + " gluon_seresnext50_32x4d-260-ttp Mem: 2586, Rate: 126.52, Top-1 69.67, Top-5: 88.62\n", + " dpn68b-224 Mem: 2898, Rate: 155.15, Top-1 65.60, Top-5: 85.94\n", + " efficientnet_b0-224 Mem: 2930, Rate: 165.73, Top-1 64.58, Top-5: 85.89\n", + " gluon_seresnext101_32x4d-300-ttp Mem: 3252, Rate: 74.87, Top-1 71.99, Top-5: 90.10\n", + " gluon_seresnext50_32x4d-300-ttp Mem: 3300, Rate: 104.69, Top-1 70.47, Top-5: 89.18\n", + " efficientnet_b1-240 Mem: 3370, Rate: 151.63, Top-1 67.55, Top-5: 87.29\n", + " ig_resnext101_32x8d-224 Mem: 3382, Rate: 83.35, Top-1 73.83, Top-5: 92.28\n", + " efficientnet_b2-260 Mem: 3992, Rate: 144.20, Top-1 67.80, Top-5: 88.20\n", + " ig_resnext101_32x8d-300-ttp Mem: 4658, Rate: 43.62, Top-1 75.17, Top-5: 92.66\n", + " tf_efficientnet_b2-260 Mem: 4690, Rate: 142.73, Top-1 67.40, Top-5: 87.58\n", + " tf_efficientnet_b3-300 Mem: 8638, Rate: 119.13, Top-1 68.52, Top-5: 88.70\n", + " tf_efficientnet_b4-380 Mem: 11754, Rate: 69.10, Top-1 71.34, Top-5: 90.11\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "dLlD9SUufV4A", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 621 + }, + "outputId": "ab03124d-b28e-4615-d4d9-b3012a774328" + }, + "source": [ + "mem_effnet = np.array([results[m]['gpu_used'] for m in names_effnet])\n", + "mem_effnet_tf = np.array([results[m]['gpu_used'] for m in names_effnet_tf])\n", + "mem_resnet = np.array([results[m]['gpu_used'] for m in names_resnet])\n", + "mem_resnet_ttp = np.array([results[m]['gpu_used'] for m in names_resnet_ttp])\n", + "\n", + "fig = plt.figure()\n", + "ax1 = fig.add_subplot(111)\n", + "ax1.scatter(mem_effnet, acc_effnet, s=10, c='r', marker=\"s\", label='EfficientNet')\n", + "ax1.plot(mem_effnet, acc_effnet, c='r')\n", + "annotate(ax1, mem_effnet, acc_effnet, names_effnet, xo=-.3, align='right')\n", + "\n", + "ax1.scatter(mem_effnet_tf, acc_effnet_tf, s=10, c='#8C001A', marker=\"v\", label='TF-EfficientNet')\n", + "ax1.plot(mem_effnet_tf, acc_effnet_tf, c='#8C001A')\n", + "annotate(ax1, mem_effnet_tf, acc_effnet_tf, names_effnet_tf, xo=-.3, align='right')\n", + "\n", + "ax1.scatter(mem_resnet, acc_resnet, s=10, c='b', marker=\"o\", label='ResNet')\n", + "ax1.plot(mem_resnet, acc_resnet, c='b')\n", + "annotate(ax1, mem_resnet, acc_resnet, names_resnet, xo=.5, align='left')\n", + "\n", + "# Too busy\n", + "#ax1.scatter(mem_resnet_ttp, acc_resnet_ttp, s=10, c='#43C6DB', marker=\"o\", label='ResNet TTP')\n", + "#ax1.plot(mem_resnet_ttp, acc_resnet_ttp, c='#43C6DB')\n", + "#annotate(ax1, mem_resnet_ttp, acc_resnet_ttp, names_resnet_ttp, xo=.5, align='left')\n", + "\n", + "ax1.set_title('Top-1 vs GPU Memory')\n", + "ax1.set_ylabel('Top-1 Accuracy (%)')\n", + "ax1.set_xlabel('GPU Memory (MB)')\n", + "ax1.legend()\n", + "plt.show()" + ], + "execution_count": 47, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA7AAAAJcCAYAAADATEiPAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzs3Xd0VVXexvHvJoQaRKQMTSkqSosB\nIi0IAQKoNAFFVBgQYaQLSMBIVXpHugUEhEEERRhERUqoIjUjUiaiBgmI1EiHJOz3jxvum1BjSHJy\nk+ezVpY5Ze/znBtnzC97n32MtRYRERERERGRtC6T0wFEREREREREEkMFrIiIiIiIiHgEFbAiIiIi\nIiLiEVTAioiIiIiIiEdQASsiIiIiIiIeQQWsiIiIiIiIeAQVsCIiIiIiIuIRVMCKiEiaYYw5H+/r\nmjHmUrztV5L5WjmNMV8YYw4ZY6wxpmpy9n+H6xY1xsw2xvwRd1+/GGNmGWMejTv+eFye6/f9qzHm\nzXjHYm7R56fGmAG3ud6ouP5ev2F/v7j9b6XEfYqIiKQEFbAiIpJmWGt9rn8BvwON4+1bkNyXA0KB\nl4Azydz3LRlj/gFsxfXf3+pALsA/bl/deKfGxvsc2gEjjDGB93DpcOCfN+z7Z9x+RxljMjudQURE\nPIcKWBER8RjGmOzGmGlxo5eRxpixxhjvuGNPG2MOGmPeMcacNsb8Zox54XZ9WWsvWmsnW2u3ANfu\nct22xphNN+wLMcZ8Fvd9U2PMAWPMOWPMYWNMj9t0FQwctda2s9b+Zl3OWGs/tNbOvE3ODbgKzXJ3\nyngXm4BCxpiH4/L6A1eBPTfcUzNjzI/GmChjzEZjTJl4x44ZY3obY/bGjQzPMMYUMsZ8Z4w5a4z5\nxhhzX7zzWxhj9sX1tfr6CHO8vvoYY/YCZ40xA40xC27I8oExZvQ93LOIiKRDKmBFRMSTvAP4AuWB\nSkAg0Dfe8eJAFqAg0BGYa4wpkQzXXQpUNMY8FG/fy8C/476fDfzTWpsL8AM23qafIOCLxF7UuAQC\npYCwvxs6HgvM5/9HYf8JzLvhWlWB6cCrQF7gE+DLG0ZImwG1gDJAK2AZ0Bv4B+ADdI7rqzwwB+gC\nFADWA8tu6OtFoF68azUxxuSMa58VeOHGjCIiIipgRUTEk7wCDLbWnrTW/gkMA9rEOx4DvGOtvWqt\nXQ2sBp6/14taa88CK3EVbdcLtKJx+wBigbLGmFzW2lPW2t236SofcOz6hjGmZdwI5TljzPJ453kZ\nY6KA08A04A1r7SbuzTygtTEmC67P5N83HH8dmGqt3WmtjbXWfgBkxfWHgusmxX32vwNbgM3W2j3W\n2ku4itkKcee1ApZaa0OttVeBEUB+XNOlr5torT1qrb1krY0AdgDN4441Bn6z1u69x3sWEZF0RgWs\niIh4BGOMwTWyeije7kNAkXjbJ6y1l284XtgYUyreokgnkxjh37ielwXX6OuSuOIMoCnQAvjdGLPW\nGPPkbfo4BRS6vmGt/cxaez8Qgmvk+LpYa+391to81tqy8aYXxwCZjDE3/vfbG4i+U3hr7UHgT2A4\nsDvuDwDxFQPejiuoo+IK6Pwk/Hzjt7l0i22fuO8LE+/nZK2NBY7c0NfhG64/F2gd931rXKOyIiIi\nCaiAFRERj2CttbhGL4vF2/0QrsLounzGmGw3HD9qrQ2PtxhUviRGWAmUMMaUxjXC6B7BtNZ+b61t\nhGsq7SpuHt28bg3QLK4YT4rIuH8Wu2F/CRIW9rczD3iTW0/NPQwMiiucr3/lsNYmespzPEfjZzTG\neOEqXuP/rOwNbZYAVY0xZYH63P4zFBGRDEwFrIiIeJKFwGBjTF5jTAGgP65nO6/zBgYaY7IYY+rg\nesby89t1ZozJGq/gzXJD8ZtA3MjuUmBy3HXWx/WR0xjTKm4Bo2jgHLdfFGoMrqnHHxtjSsQ945ob\n13O9dxWXYRkw0hiTxxjjbYxph6tY/C4RXXyCqzj88hbHPgC6G2P843L5GGOaGGNyJCbbDRbhKtRr\nxi2y9Rau0ecdt2tgrT0PLMf1Mw69xQixiIiIClgREfEog4B9wF5cixptxlUUXheBa5rtMVwLK71q\nrf31Dv0dwjX1NS+ugvSSMabgHc7/N66FmBZZa+MXqe3j+voL1wJJN76yBgBr7TGgCmCA73EVuzsB\nL+B2KxffqCNwGddn8GfctZ+x1p66W0Nr7QVr7Wpr7ZVbHNscl+F9IArXyscvc/NI6V1Za38EXovr\n6wSuVwQ1tdbe9A7bG8zFtUCXpg+LiMgtGdeMLBEREc9mjHka1yJEjzidRZLGGFMK1yjtP+IWhhIR\nEUlAI7AiIiLiuLjnZHsD81W8iojI7WS++ykiIiIiKccY8wDwO/Ar0MDhOCIikoZpCrGIiIiIiIh4\nBE0hFhEREREREY/gEVOI8+XLZ4sXL+50DBEREREREUkBO3fuPGmtzX+38zyigC1evDg7dtz21XEi\nIiIiIiLiwYwxhxJznqYQi4iIiIiIiEdQASsiIiIiIiIeQQWsiIiIiIiIeASPeAZWRERERETkRtHR\n0URGRnL58mWno0giZcuWjaJFi+Lt7Z2k9ipgRURERETEI0VGRpIrVy6KFy+OMcbpOHIX1lpOnTpF\nZGQkJUqUSFIfmkIsIiIiIiIe6fLly+TNm1fFq4cwxpA3b957GjFXASsiIiIiIh5LxatnudeflwpY\nERERERER8QgqYEVERERERJLIy8sLPz8/99eoUaMA2LhxI2XLlsXPz49Lly4RHBxM2bJlCQ4OZubM\nmcybN++2fR49epTnn38+yZkmTZrExYsX3dvFixenRYsW7u0lS5bQrl27O/YRFhbGypUrk5whpWgR\nJxERERERkSTKnj07YWFhN+1fsGABISEhtG7dGoAPPviA06dP4+Xlddc+CxcuzJIlS5KcadKkSbRu\n3ZocOXK49+3cuZN9+/ZRpkyZRPURFhbGjh07ePbZZ5OcIyVoBFZERERERCQZffTRR3z22WcMHDiQ\nV155hSZNmnD+/HkqVarEokWLGDJkCOPGjQPg4MGDBAUF8cQTT1CxYkV++eUXIiIiKFeuHACxsbEE\nBwfz5JNP4uvry/vvvw9AaGgogYGBPP/88zz++OO88sorWGuZPHkyR48epXbt2tSuXdud6c0332T4\n8OE3Zb1w4QLt27encuXKVKhQgWXLlnH16lUGDRrEokWL8PPzY9GiRanwqSWORmBFRERERCRjuO8+\nOHfu/7dz5YKzZ++py0uXLuHn5+feDgkJoUOHDmzatIlGjRq5pwL7+Pi4R2qHDBniPv+VV17hrbfe\nolmzZly+fJlr165x/Phx9/FZs2aRO3dutm/fzpUrVwgICKB+/foA7N69m71791K4cGECAgLYvHkz\nPXr0YMKECaxbt458+fK5+2nZsiXTp0/n4MGDCfIPHz6cOnXqMHv2bKKioqhcuTJBQUG8++677Nix\ng6lTp97T55PcUqyANcY8BsQv1UsCg6y1k+KOvwmMA/Jba0+mVA4REREREREgYfF6q+0kuN0U4sTF\nOceRI0do1qwZANmyZbvpnFWrVvHjjz+6pxT/9ddf/Pzzz2TJkoXKlStTtGhRAPz8/IiIiKBGjRq3\nvJaXlxfBwcGMHDmSZ555JkH/y5cvd48IX758md9//z1J95MaUqyAtdb+D/ADMMZ4AUeApXHbDwL1\ngbT7yYiIiIiIiDjMWsuUKVNo0KBBgv2hoaFkzZrVve3l5UVMTMwd+2rTpg0jR450T0++3v/nn3/O\nY489luDcH374IRnSJ7/Uega2LvCLtfZQ3PZEoC9gU+n6IiIiIiIiaUquXLkoWrQoX375JQBXrlxJ\nsHowQIMGDZgxYwbR0dEAhIeHc+HChbv2e+4Wo8ve3t706tWLiRMnJuh/ypQpWOsqzXbv3n3HPpyW\nWgVsK2AhgDGmKXDEWvvfOzUwxvzLGLPDGLPjxIkTqZFRRERERETSs1y57rydBNefgb3+9dZbb/2t\n9p988gmTJ0/G19eX6tWrc+zYsQTHO3ToQJkyZahYsSLlypXj9ddfv+tI67/+9S+efvrpBIs4Xffa\na68laD9w4ECio6Px9fWlbNmyDBw4EIDatWuzb9++NLeIk7leaafYBYzJAhwFygLngHVAfWvtX8aY\nCMD/bs/A+vv72x07dqRoThERERER8Sz79++ndOnSTseQv+lWPzdjzE5rrf/d2qbGCOwzwC5r7Z/A\nw0AJ4L9xxWtRYJcxpmAq5BAREREREREPlhoF7EvETR+21u6x1haw1ha31hYHIoGK1tpjd+pAJCM6\nduwYb775ptMxEiU0NJQff/zRvT1w4ECKFStGUFBQgvPmzJlD9erVCQgIYNeuXQD88ssvVKpUCR8f\nHzZt2nTba5w9e5bq1asTGBhI5cqVWbNmDQDz5s2jcuXK1KxZk1atWnHlypXb9nHmzBnq169PrVq1\nCAgISJD5umHDhjFnzpyb9vfs2ZOqVatStWpVRo0aBUBkZCS1atXiqaeeIiAggBtninz88cd4e3vf\nNo+IiIiI/D0pWsAaY3IC9YAvUvI6IulRwYIFGT9+fJLaxsbGJnOaO7uxgO3SpQvr1q1LcM6ZM2eY\nPHkyoaGhzJ8/nx49egBQqFAhvvvuO/c70m7Hx8eHDRs2EBoayqeffup+vqRGjRp8//33bNiwgYce\neoj58+ffto8FCxYQEBDA+vXrGT58+C1f5n07Xbt2ZevWrWzZsoVly5bxyy+/kCtXLhYvXszGjRv5\n8MMP6dWrl/v8y5cv8/nnn/PQQw8l+hoiIiIicmcpWsBaay9Ya/Naa/+6zfHiegesyK1FREQQFBTE\n3r17qVy5Mg0bNuSf//xnghdfxxcaGkqDBg144YUX6N+/P4cPH6Zhw4bUqVOHhg0bcuLECS5evMgz\nzzxDrVq1CAwMJDw8nNDQUOrWrUvLli0pX748ixcvBrhl+9OnT/Pkk09y/Phx9u3bR82aNTl+/Dhz\n5sxh+PDhBAYGEhsbS6FChciUKeH/vWzbto2nnnqKLFmyUKJECc6dO8eVK1fIkSMHDzzwwF0/j0yZ\nMpE5s+vNX2fPnsXX1xeAkiVL4uXlBUDWrFnJnDkzV65coUaNGhw4cIBjx45RuXJlzpw5Q+nSpTkb\n97LyM2fOUKBAAQA2bNhAhQoVaNy48W2XjH/00UcT5PDy8iJ37tzuPq5f+7rJkyfTqVMnjDF3vTcR\nERERSZwUew+siCSPkJAQJk+eTNWqVenYseMdzz169CgrVqzA29ubVq1aMXDgQKpWrcqyZcsYPXo0\nL7/8Mnny5OHrr78G4Nq1axw9epSoqChWrVrFn3/+SZMmTXjhhRcIDg6+qf24ceMYP348bdu25ezZ\ns8ydO5cCBQrQrl07HnnkEVq3bn3bbKdOnSJPnjzu7fvvv5/Tp09TqFChRH8WR44c4cUXXyQ8PJzZ\ns2cnOHbgwAG++eYbNm7cSNasWZk1axavvvoquXPnZtKkSeTJk4dKlSoxaNAgypUrR1RUlHvKcu/e\nvVm2bBkPPvjgTe9Yu9GCBQsoWbIkxYsXd++LjY2lR48e9O/fH3AVxxs2bKBv37707Nkz0fcnIiIi\nInemAlYkjVm+HFatgieecG0fPHiQJ598EoAqVaoQGRl527b+/v7uZy737NnjnmYbExPDI488QoUK\nFahUqRKtW7cmb968vPPOOwD4+fnh5eVF4cKFiYqKum17gJo1axISEoKvr697X2I88MAD7r4B/vrr\nr0SNvMZXpEgRNm3aREREBIGBgTRq1AhwPYvatm1bPv30U7JlywbAY489RokSJTh9+jTVq1cHYMyY\nMbRo0YLevXvz/fff07VrV7766ivOnj3rnupbuXJlADZt2sSAAQMAWLFiBT4+PqxevZqPP/6Y//zn\nPwlyvf766zzzzDPuZ35HjhxJ3759/9a9iYiIiMjdpdZ7YEUkERYvhmbNYNo06NEDTpyAhx9+2L04\n0Pbt2+/Y/vpUWoCyZcsyceJEQkND2bRpEx988AFXrlyhd+/ezJ8/n/z58/PJJ58A3HKa663aA8ya\nNYvKlStz8OBBd64sWbLc9X1kVapUYdOmTURHR/P777/j4+ND1qxZE/3ZxF+c6b777iNX3HvbTp48\nSYsWLZg5cyYPP/yw+5zvvvuO6Oho8uXLx/LlywGw1pIvXz4AChQowOnTpwHXi7qv/2Hg+mdco0YN\nQkNDCQ0NxcfHhx9++IGBAweyZMkSsmfP7r5Onz59KFSoEN26dXPvCw8PZ8SIETz99NP88ccfvPji\ni4m+TxEREfEcp06dcr//tWDBghQpUsS9bYxJ8H7YiIiIm9q3a9eOEiVKuM+5/kf3K1euEBQU5H4H\n68aNGylbtix+fn4cOXLkrmuHdOjQgX379iXpnkJDQ9myZYt7e8iQIeTIkYPjx4+79/n4+Ny1nxEj\nRiTp+nejEViRNGTMGLh2zfX95ctw+jTMnz+C9u3bky9fPnLnzk2xYsUS1df48ePp2rUr58+fB6B9\n+/aUKVOGHj16kDlzZq5du8bcuXM5dOhQotv7+/szZ84c1qxZw/Hjx2nRogWrV6+mXr169OzZkxUr\nVvDZZ58xffp0Pv30U/bv309QUBDvv/8+Dz/8MF26dKFWrVoYY3jvvfcA1/OszZs3Z9++fezdu5dn\nn33WPTIc308//USvXr3w8vIiJiaGSZMmAa7/Uz1y5Ih7AaU2bdrQuHFj+vfvz7fffkvmzJkJCgqi\nYsWKdO/enTZt2jB79mwuXbrE6NGj3ffauHFjChcu7C6Mb/Taa68B8Nxzz7nbWGt57733CAgIIDAw\nkPz587N48WK+/PJLd7tHHnkkTb38W0RERJJP3rx5CQsLA1y/k/j4+NCnTx/AVeRdP3YnY8eOvakg\n3b17N4C7fadOnQgJCXE/rrVkyZI79vnRRx/9vRuJ5/of768X0wD58uVj/Pjx7t+dEmPEiBG8/fbb\nSc5xO8Zam+ydJjd/f3974+spRNKb8HAoWxashdhYyJEDFi6EZ56Jdk8L7tixIw0aNLjrX91ERERE\nMoL9+/dTunRpp2MAty5grw8E3E67du1o1KhRgt/tjh8/TvXq1Tlx4gQlSpSgc+fOhISEkDt3bqpX\nr87w4cNp1KgRP/30E7GxsfTr149vvvmGTJky0bFjR7p3705gYCDjxo3D39+fVatWMXjwYK5cucLD\nDz/Mxx9/jI+PD8WLF6dt27b85z//ITo6msWLF5MtWzaqVq2Kl5cX+fPnZ8qUKe5XF86ZM4ddu3bx\nwAMPJLi3+fPnM3nyZK5evUqVKlWYPn06/fv3Z+zYsZQvX56yZcuyYMGCBPd9q5+bMWantdb/bp+z\nRmBF0gBroWtXyJkT3nsPtm+H+vWhSRPYtWsPb7zxBjExMRQvXpznnnuOvn37sm3bNnf7LFmysGrV\nKgfvIHlNmDDBPe33ui+++OJvPzMrIiIiEt/v6zaz75PP3dtl2rTgodoBKXKtS5cu4efnB0CJEiVY\nunTpLc8LDg5m2LBhAO5i76OPPmLcuHGsWLECgO+//95d6MafivzBBx8QERFBWFgYmTNndj8edd3J\nkycZNmwYq1evJmfOnIwePZoJEyYwaNAgwDWyumvXLqZPn864ceP46KOP6NSpU4JCfM2aNfj4+NC+\nfXvee++9BDPl9u/fz6JFi9i8eTPe3t506dKFBQsWMGrUKKZOnZqoEei/SwWsSBrw2WewejVMnQpt\n27q+rqtYsSIbN25McP6YMWNSOWHq6t27N71793Y6hoiIiKQzF/88yb55S7CxsRgvL4rXr5Vi18qe\nPXuSpxAn1urVq+nUqZP7VX43/rF/69at7Nu3j4AAV5F+9epVqlWr5j7evHlzACpVqsQXX3xxx2v1\n6NEDPz8/d2ELruJ2586d7gVHL1265H7FYEpRASvisLNnoVcvqFgROnVyOo2IiIhI+lXqhUasDx7K\n+cg/yFmoAKVeaJSq13/11VfZvXs3hQsXZuXKlSl+PWst9erVY+HChbc8fn1BzetrjNzJ/fffz8sv\nv8y0adMS9N+2bVtGjhyZfKHvQqsQizhs8GA4dgxmzIB4iwiLiIiISDLL5OVFzTGu1+TVHDOATKn8\ny9fHH39MWFhYshWv9erV4/3333cXnzdOIa5atSqbN2/m4MGDAFy4cIHw8PA79pkrVy7OnTt3y2O9\ne/dOcL26deuyZMkS9wrFp0+fdi8Q6u3tTXR0dNJv7jZUwIo4KCwMJk+G11+HuNePioiIiEgKeqxl\nY2pPHspjLRs7HQVwPQMb/3U7V69eTXTbDh068NBDD+Hr68sTTzzBv//97wTH8+fPz5w5c3jppZfw\n9fWlWrVqHDhw4I59Nm7cmKVLl+Ln53fTY2z58uWjWbNm7tcblilThmHDhlG/fn18fX2pV68ef/zx\nBwD/+te/8PX15ZVXXkn0/SSGViEWcci1axAQAL/8Av/7H+TJ43QiEREREc+SllYhlsTTKsQiHmj2\nbNi6FebOVfEqIiIiIpIYmkIs4oCTJ6FfP3jqKWjTxuk0IiIiIiKeQQWsiAP69XOtPjxjBhjjdBoR\nEREREc+gAlYklW3e7Jo+3Ls3lC3rdBoREREREc+hAlYkFcXEQOfO8OCDMHCg02lERERERDyLFnES\nSUWTJ8OePfDFF+Dj43QaERERERHPohFYkVQSGQmDB8Ozz8JzzzmdRkRERESSg5eXF35+fpQrV47G\njRsTFRWVpH4CAwPx9///t8js2LGDwMDAO7aJiIi46d2v6Z0KWJFU0ru3awrxlClauElEREQkvcie\nPTthYWH89NNPPPDAA0ybNi3JfR0/fpyvv/460eergBWRFPHtt7B4MfTvDyVLOp1GRERERFJCtWrV\nOHLkiHt77NixPPnkk/j6+jJ48GAALly4QMOGDXniiScoV64cixYtcp8fHBzM8OHDb+o3NjaW4OBg\nd1/vv/8+AG+99RYbN27Ez8+PiRMnpvDdpQ16BlYkhV2+DN26QalSEBzsdBoRERGRjG35cli1CurX\nhyZNkq/f2NhY1qxZw2uvvQbAqlWr+Pnnn9m2bRvWWpo0acKGDRs4ceIEhQsX5quvvgLgr7/+cvdR\nrVo1li5dyrp168iVK5d7/6xZs8idOzfbt2/nypUrBAQEUL9+fUaNGsW4ceNYsWJF8t1IGqcRWJEU\nNno0HDwI06ZB1qxOpxERERHJuJYvh5decv1e9tJLru17denSJfz8/ChYsCB//vkn9erVA1wF7KpV\nq6hQoQIVK1bkwIED/Pzzz5QvX57vvvuOfv36sXHjRnLnzp2gvwEDBjBs2LAE+1atWsW8efPw8/Oj\nSpUqnDp1ip9//vnew3sgFbAiKejgQRg5Elq1gqAgp9OIiIiIZGyrVsHFi67vL150bd+r68/AHjp0\nCGut+xlYay0hISGEhYURFhbGwYMHee211yhVqhS7du2ifPnyDBgwgHfffTdBf3Xq1OHSpUts3brV\nvc9ay5QpU9x9/fbbb9SvX//ew3sgFbAiKcRa19ThLFlg/Hin04iIiIhI/fqQI4fr+xw5XNvJJUeO\nHEyePJnx48cTExNDgwYNmD17NufPnwfgyJEjHD9+nKNHj5IjRw5at25NcHAwu3btuqmvAQMGMGbM\nGPd2gwYNmDFjBtHR0QCEh4dz4cIFcuXKxblz55LvJjyAnoEVSSGff+5avGnSJChc2Ok0IiIiItKk\nCSxcmDLPwAJUqFABX19fFi5cSJs2bdi/fz/VqlUDwMfHh/nz53Pw4EGCg4PJlCkT3t7ezJgx46Z+\nnn32WfLnz+/e7tChAxEREVSsWBFrLfnz5+fLL7/E19cXLy8vnnjiCdq1a0evXr2S94bSIGOtdTrD\nXfn7+9sdO3Y4HUMk0c6dg9KlIX9+2L4dMutPRSIiIiLJbv/+/ZQuXdrpGPI33ernZozZaa31v00T\nN/1aLZIC3nkHjhyBJUtUvIqIiIiIJBc9AyuSzPbscU0b7tgRqlZ1Oo2IiIiISPqhAlYkGV27Bp07\nQ548rtWHRURERCRlecIjkfL/7vXnpQJWJBnNnQubN8OYMZA3r9NpRERERNK3bNmycerUKRWxHsJa\ny6lTp8iWLVuS+9DTeSLJ5NQpCA6GgABo29bpNCIiIiLpX9GiRYmMjOTEiRNOR5FEypYtG0WLFk1y\nexWwIsnk7bchKgqmT4dMmtsgIiIikuK8vb0pUaKE0zEkFenXbJFksHUrfPABvPEG+Po6nUZERERE\nJH1SAStyj2JiXAs3FSkCQ4Y4nUZEREREJP3SFGKRezR9OoSFweLFkCuX02lERERERNIvjcCK3IOj\nR2HAAGjQAFq0cDqNiIiIiEj6pgJW5B68+SZcvQpTp4IxTqcREREREUnfVMCKJNHq1fDppxASAo88\n4nQaEREREZH0TwWsSBJcuQJdu8LDD0O/fk6nERERERHJGLSIk0gSjB0L4eHwzTeQLZvTaURERERE\nMgaNwIr8Tb/+CsOHwwsvuBZvEhERERGR1KECVuRvsBa6d4fMmWHiRKfTiIiIiIhkLJpCLPI3LFsG\nK1fC+PFQpIjTaUREREREMhaNwIok0vnz0KMHlC/vGoUVEREREZHUpRFYkUQaOhQOH4aFC8Hb2+k0\nIiIiIiIZj0ZgRRJh716YMAHat4eAAKfTiIiIiIhkTCpgRe7CWujSBe67D0aPdjqNiIiIiEjGpSnE\nInfxySewYQN88AHky+d0GhERERGRjEsjsCJ3cOYM9OkDVavCa685nUZEREREJGPTCKzIHfTvD6dO\nwapVkEl/7hERERERcZR+JRe5je3bYeZM1ytz/PycTiMiIiIiIipgRW4hNhY6d4aCBeHdd51OIyIi\nIiIioCnEIrc0cybs3AmffupafVhERERERJynEViRGxw75nr2NSgIWrZ0Oo2IiIiIiFynAlbkBsHB\ncOkSTJsGxjidRkRERERErlMBKxLPunUwfz706welSjmdRkRERERE4lMBKxLn6lXo0gVKlICQEKfT\niIiIiIjIjbSIk0icCRPgwAH46ivInt3pNCIiIiIiciONwIoAERGu1+U0awbPPut0GhERERERuRUV\nsCLAG29Apkzw3ntOJxEREREUo2TcAAAgAElEQVQRkdvRFGLJ8JYvd32NGQMPPuh0GhERERERuR2N\nwEqGdvEi9OgBZcpAz55OpxERERERkTvRCKxkaMOGwaFDsH49eHs7nUZERERERO5EI7CSYe3fD+PG\nQdu2ULOm02lERERERORuVMBKhmQtdO0KOXO6nn0VEREREZG0T1OIJUNauBDWrYMZM6BAAafTiIiI\niIhIYmgEVjKcqCjo3RuefBI6dnQ6jYiIiIiIJJZGYCXDGTgQTpyAr74CLy+n04iIiIiISGJpBFYy\nlJ07Yfp06NIFKlVyOo2IiIiIiPwdKmAlw4iNhc6dIX9+GDrU6TQiIiIiIvJ3aQqxZBgffgjbt8P8\n+XD//U6nERERERGRv0sjsJIhHD8OISFQuza8/LLTaUREREREJClUwEqG0LcvXLgA06aBMU6nERER\nERGRpFABK+nehg0wdy706QOlSzudRkREREREkkoFrKRr0dGuFYeLFYMBA5xOIyIiIiIi90KLOEm6\nNmkS7N0Ly5dDjhxOpxERERERkXuhEVhJtw4fhiFDoEkTaNzY6TQiIiIiInKvVMBKutWzJ1gL773n\ndBIREREREUkOmkIs6dLKlfDFFzByJBQv7nQaERERERFJDhqBlXTn0iXo1g0efxx693Y6jYiIiIiI\nJBeNwEq6M3Ik/PYbrF0LWbI4nUZERERERJKLRmAlXQkPh9Gj4ZVXoHZtp9OIiIiIiEhyUgEr6Ya1\n0LUrZM8O48Y5nUZERERERJKbphBLuvHZZ7B6NUydCgULOp1GRERERESSm0ZgJV04exZ69YKKFaFT\nJ6fTiIiIiIhIStAIrKQLgwbBsWOwbBl4eTmdRkREREREUoJGYMXjhYXBlCmukdcnn3Q6jYiIiIiI\npBQVsOLRrl2Dzp0hb14YPtzpNCIiIiIikpI0hVg82uzZsHUrzJ0LefI4nUZERERERFKSRmDFY508\nCf36Qc2a0KaN02lERERERCSlqYAVj9Wvn2v14enTwRin04iIiIiISEpTASseafNm1/Th3r2hbFmn\n04iIiIiISGpQASseJybGtXDTgw/CwIFOpxERERERkdSiRZzE40yeDHv2wNKl4OPjdBoREREREUkt\nGoEVjxIZCYMHQ8OG0LSp02lERERERCQ1qYAVj9K7t2sK8eTJWrhJRERERCSjUQErHuPbb2HxYujf\nH0qWdDqNiIiIiIikNhWw4hEuX4Zu3aBUKQgOdjqNiIiIiIg4IcUWcTLGPAYsirerJDAIKAI0Bq4C\nvwCvWmujUiqHpA+jR8PBg/Ddd5A1q9NpRERERETECSk2Amut/Z+11s9a6wdUAi4CS4HvgHLWWl8g\nHAhJqQySPhw8CCNHQqtWEBTkdBoREREREXFKak0hrgv8Yq09ZK1dZa2Nidu/FSiaShnEA1nrmjqc\nJQuMH+90GhERERERcVJqFbCtgIW32N8e+PpWDYwx/zLG7DDG7Dhx4kSKhpO06/PPXYs3DRsGhQs7\nnUZERERERJxkrLUpewFjsgBHgbLW2j/j7e8P+APN7V1C+Pv72x07dqRoTkl7zp2D0qUhf37Yvh0y\np9gT2yIiIiIi4iRjzE5rrf/dzkuNkuAZYNcNxWs7oBFQ927Fq2Rc77wDR47AkiUqXkVEREREJHUK\n2JeIN33YGPM00BeoZa29mArXFw/0448waRJ07AhVqzqdRkRERERE0oIUfQbWGJMTqAd8EW/3VCAX\n8J0xJswYMzMlM4jnuXYNOneGPHlcqw+LiIiIiIhACo/AWmsvAHlv2PdISl5TPN/cubBlC8yeDXnz\n3v18ERERERHJGFJrFWKRRDl1CoKDISAA2rZ1Oo2IiIiIiKQlKmAlTQkJgagomDEDMunfThERERER\niUclgqQZW7fChx9Cz55QvrzTaUREREREJK1RAStpQkyMa+GmIkVg8GCn04iIiIiISFqkt2tKmjB9\nOoSFweLFkCuX02lERERERCQt0gisOO7oURgwAJ5+Glq0cDqNiIiIiIikVSpgxXFvvglXr8KUKWCM\n02lERERERCStUgErjlq9Gj791LX68CN6Q7CIiIiIiNyBClhxzJUr0LWrq3Dt18/pNCIiIiIiktZp\nESdxzNixEB4O334L2bI5nUZERERERNI6jcCKI379FYYPhxdegPr1nU4jIiIiIiKeQAWspDproXt3\nyJwZJk50Oo2IiIiIiHgKTSGWVLdsGaxcCePHQ5EiTqcRERERERFPoRFYSVXnz0OPHuDr6/qniIiI\niIhIYmkEVlLV0KFw+DAsXOiaQiwiIiIiIpJYGoGVVLN3L0yYAO3bQ0CA02lERERERMTTqICVVGEt\ndOkC990Ho0c7nUZERERERDyRJnFKqvjkE9iwAT78EPLlczqNiIiIiIh4Io3ASoo7cwb69IGqVV3T\nh0VERERERJJCI7CS4vr3h1OnYNUqyKQ/mYiIiIiISBKpnJAUtW0bzJzpemWOn5/TaURERERExJOp\ngJUUExsLnTtDwYLwzjtOpxEREREREU+nKcSSYmbOhF274NNPXasPi4iIiIiI3AuNwEqKOHbM9exr\nUBC0bOl0GhERERERSQ9UwEqK6NMHLl2CadPAGKfTiIiIiEhqioqKYt68eQAcO3aMatWqUbt2ba5e\nvZroPrp160bNmjVZvnw58+fPp3Llyrz77ruMGjWKPXv23LbdK6+8kqTMkydPTlK7xLR95JFHbnss\nPDwcb29vNm3adMtj1atXJzAwkICAAP773/8C8Ouvv1KzZk0CAwOpXbs2kZGRAERERFCnTh0CAgIY\nMWJEku8nLTPWWqcz3JW/v7/dsWOH0zEkkdatgzp1YOBAePddp9OIiIiISGqLiIigQ4cOrF69moUL\nF3LgwAHe+ZuLopQqVYrw8HAAGjRowMyZMylRokRKxAVcRebBgwdTpO2djrdp04Y//viDIUOGUKNG\njQTHYmJi8PLywhjD2rVrmTFjBosXL6ZPnz6UL1+etm3bMmfOHPbv38/o0aNp1aoVXbt25amnniIo\nKIipU6fy+OOPJ+meUpsxZqe11v9u52kEVpLV1avQpQuUKAEhIU6nEREREREnTJgwgZ07d/Loo48y\naNAg5s2bR4cOHW557vr166lVqxaBgYF06tQJay3du3fn8OHDBAYG8v777/PDDz/w8ssvs2TJEtq1\na+cerXzvvfeoUqUKtWvXZu7cucD/j3b+9ddftGzZkrp161KnTh13ARkYGEjPnj2pX78+devW5cqV\nK0yYMIEjR44QGBjIrFmzmDNnDs899xzNmzenXLlybNy4EYA9e/YQFBREnTp1aNmyJZcuXbqp7e30\n6tWLWrVq0bp1a65duwbADz/8QMGCBSlatOgt22TOnBkTN53x7Nmz+Pr6AlC2bFmioqIAOHPmDAUK\nFAAgLCyMp556CoCGDRuyfv36xPy4PIu1Ns1/VapUyYpnGDnSWrD2q6+cTiIiIiIiTvntt99s3bp1\nrbXWfvzxx3bo0KG3PO/atWvWz8/PRkVFWWut7dmzp/3Pf/5jrbX24Ycfdp9Xq1Yte/jwYWuttW3b\ntrUbN260e/bssTVr1rTR0dHWWmtjYmIStOvXr59duHChtdbasLAw26JFC3dfS5cutdZa27Fjx1te\n7+OPP7ZNmza11lq7efNmd9unnnrKHjp0yFpr7aRJk+yUKVNuansrxYoVs1u2bLHWWtuhQwf39Rs3\nbmxPnjzpvqdb2bFjh61ataotXLiw3bp1q7XW2t9//92WLl3ali9f3pYqVcr9+T366KPudrNnz7Z9\nO7xuV7Z9w/7npc7261d72UNrN90xp5OAHTYRtaFWIZZkExHhmjLcvDk8+6zTaUREREQkrTt58iQR\nERE0bdoUgPPnz/PYY48lqu2+ffuoUaMGmTO7ShovL68Ex/fs2cP69euZOXMmgPs8gEqVKgHw0EMP\ncerUqVv2f6tz9u7dyz//+U8ALl++TFBQUKKyGmOoXLkyAFWqVOF///sfX331Ff7+/uTNmzfBuY0a\nNeL8+fN069aN559/nkqVKvH999+zbds2unXrxrZt2+jXrx/Dhg2jefPmLFy4kLfffptp06aRKdP/\nT7D966+/sMdOsW/FFteOTJkoXr9WovKmZSpgJdm88QZkygSTJjmdRERERESclCVLFmJiYu56Xr58\n+ShZsiQrVqzAx8cHgOjo6ERdo2zZssyYMYPY2Fi8vLy4du1aggKubNmyVKtWjWbNmgEkWEDKxFtl\n1MatCRS/7e3OKVeuHAsXLqRQoUIJ+ryx7Y2stezYsYMqVaqwfft2nn76acLCwggNDWXLli3s2bOH\nAwcOsGjRIlasWOFud/nyZbJlywbA/fffT44cOdz95cuXD4ACBQpw+vRpAJ544gm2bNmC36OP8cnY\nSdQ9ehHjlRUbe42chQpQ6oVGd/5QPYAKWEkWy5e7vsaMgQcfdDqNiIiIiDipYMGCZM+enRYtWvDs\nHabmGWOYMGECTZo0wVpLpkyZmDhxovtZzzspW7YsTZs2pXr16uTMmZO2bdvStm1b9/H+/fvTqVMn\npkyZgrWWhg0b0qdPn9v2d73YffHFF297zrRp02jXrp27yA4JCaFevXoJ2rZq1eqmdpkzZ+bzzz+n\nb9++FClShCZNmtCsWTP69+8PQLt27ejQoQPFihVL0G7NmjWMHj3aPbo8KW6kaMCAAbz++utkzpyZ\n6Oho3n//fQBGDB9Oq4ZNOP1LBI/ZLDTq15c8jz3Mqva9qTV2IJluGKX2RFqFWO7ZxYtQpgz4+MDu\n3eDt7XQiEREREZGM5c9de1jdOYRj23ZTtFY1gqaPIG+ZUlyLjSVs+lz8urRN0wVsYlch1gis3LNh\nw+DQIdiwQcWriIiIiNzavn376NKlS4J9//rXv3j55ZcdSpT81q5dy7s3vEdy0KBB1KlTJ8WueTnq\nLzYPHMt/p88le74HeOaTyZR+pbl7CnQmLy8qdm+fYtdPbRqBlXuyfz888QS8/DLMmeN0GhERERGR\njMFay4F/LyX0zXe5dOIUT3RpS8DQYLLdn9vpaEmiEVhJcdZC166QM6fr2VcREREREUl5p/aFs6br\n2xwO/Z6ClSvQ/Kt5/KPS3Z8bTg9UwEqSLVwI69bBjBkQ9+5kERERERFJIdEXLvL90EnsHP8+WXL5\nEDRzFL4dX8HcZRXk9EQFrCRJVBT07g2VK0PHjk6nERERERFJv6y1HFz2LeveGMS5349Q9tUXqTm6\nPzny571743RGBawkycCBcOIEfPUVpOHFzEREREREPFrUr4dY230Av61cS77ypXl241SK1qjsdCzH\nqICVv23nTpg+Hbp0gUqVnE4jIiIiIpL+xFy5wvYx09k2Yiomsxe1xg+iQvf2eGXw136ogJW/JTYW\nOneG/Pldr88REREREZHkFbFqPWu79efMz79RqmVjAicMJleRQk7HShNUwMrf8uGHsH07LFgAuT1z\nhW4RERERkTTp3JE/CO39DuGf/Yc8j5agxbf/pnj9Wk7HSlNUwEqiHT8OISFQuza89JLTaURERERE\n0ofY6Gh2T5nNlsHjsTGxBAwNxj+4M5mzZnU6WpqjAlYSrW9fuHDB9fyrMU6nERERERHxfJGbtrGm\ny9uc3LOfEs/Woc6UYdxfspjTsdIsFbCSKBs2wNy58Pbb8PjjTqcREREREfFsF0+cYkO/4ez9eBG5\nHixMk6WzeKRpA4xGiu5IBazcVXS0a+GmYsWgf3+n04iIiIiIeC577Ro/friATSGjuHruPJXf6kbV\nAW/gnTOH09E8ggpYuatJk2DfPli+HHLof1ciIiIiIkny584fWd3lbY5t282DgdWoO20EecuUcjqW\nR1EBK3d0+DAMGQJNmkDjxk6nERERERHxPJej/mLzwLH8d/pcsufPy7Pzp/D4y800XTgJVMDKHb3x\nBlgLkyc7nURERERExLNYa9m/4AvW9xnKpROneKJLWwKGBpPtfr2PMqlUwMptffUVLF0KI0e6nn8V\nEREREZHEObUvnNVd3iZy/fcUrFyB5is/4R8Vyzsdy+OpgJVbunQJund3rTjcu7fTaUREREREPEP0\nhYt8P3QSO8e/T5ZcPtR7fzTlO7yMyZTJ6WjpggpYuaWRI+G332DtWsiSxek0IiIiIiJpm7WWg19+\nw7o3BnHu8FHKvvoiNUf3J0f+vE5HS1dUwMpNwsNh9Gho3Rpq13Y6jYiIiIhI2hb16yHWdh/AbyvX\nkq98aRounE6RgCedjpUuqYCVBKyFrl0he3YYO9bpNCIiIiIiaVfM5ctsHzuDbSOmYjJ7EThhMBW6\ntydTZpVZKUWfrCTw2WewejVMnQoFCzqdRkREREQkbYpYtZ41Xd8m6mAEpVo2JnDCYHIVKeR0rHRP\nBay4nT0LvXpBpUrQqZPTaURERERE0p5zkUcJ7f0O4YtXkOfRErRYtZDi9Wo6HSvDUAErboMGwbFj\nsGwZeHk5nUZEREREJO2IjY5m95TZbBk8HhsTS8DQYPyDO5M5a1ano2UoKmAFgLAwmDLFNfL6pJ43\nFxERERFxi9y0jTWdQzj50wFKNqxL7clDub9kMadjZUgqYIVr16BzZ8iXD4YPdzqNiIiIiEjacPHE\nKTb0HcbeOZ+R66EiNP1yNg83qY8xxuloGZYKWGH2bNi6FebNgzx5nE4jIiIiIuKsa7Gx7Pno32wK\nGcXVc+ep/FY3qg54A++cOZyOluGpgM3gTp6Efv2gZk3Xe19FRERERDKyP3f+yOrOIRzbHsaDgdWo\nO20EecuUcjqWxFEBm8H16+dafXj6dNBMCBERERHJqC5H/cXmAWMImz6XHAXy8eyCqTz+0nOaLpzG\nqIDNwDZvdk0f7tsXypZ1Oo2IiIiISOqz1rJ/wResf/NdLp08TYVurxIwNJisue9zOprcggrYDCo6\n2rVw04MPul6fIyIiIiKS0Zzc+z/WdO1P5PrvKVi5As2/ns8/KpZ3OpbcgQrYDGrKFNizB5YuhZw5\nnU4jIiIiIpJ6rp6/wNahk9g54QOy5PKh3gdjKP/aS5hMmZyOJnehAjYDioyEwYOhYUNo2tTpNCIi\nIiIiqcNay8GlX7Ou52DOHT5KufateGrU2+TIn9fpaJJIKmAzoN69ISYGJk/Wwk0iIiIikjFE/RLB\n2u4D+e3rteT3LU3DhdMpEvCk07Hkb1IBm8F8+y0sXgzDhkHJkk6nERERERFJWTGXL7N9zAx+GDGF\nTN6ZCZw4hArdXiVTZpVCnkg/tQzk8mXo2hVKlYI+fZxOIyIiIiKSsiK+DWVNt/5EHYzgsRebUGv8\nIHIVKeR0LLkHKmAzkNGj4Zdf4LvvIGtWp9OIiIiIiKSMc5FHCe01hPAlX5GnVEme/24hxYJqOh1L\nkoEK2Azi4EEYORJatYKgIKfTiIiIiIgkv9joaHZPnsWWweOxsdcIGNYX/z6dyKzRm3RDBWwGYC10\n6wZZssCECU6nERERERFJfpEbf2BNl7c5+dMBSjasS50pw8hd4iGnY0kyUwGbAXz+uWvxpvfeg0Ka\n8i8iIiIi6cjF4yfZ0HcYe+cuJtdDRWj65WweblIfo9dtpEsqYNO5c+egZ0/w84MuXZxOIyIiIiKS\nPK7FxrLnwwVsDBlF9IWLVA7pRtX+b+CdM4fT0SQFqYBN5955B44edY3CaqVwEREREUkP/tz5I6s7\nh3BsexgP1q5O3WkjyFv6UadjSSpQSZOO/fgjTJoEHTtClSpOpxERERERuTeXo/5iU//R/HfGPHL+\nIz/PLpjK4y89p+nCGYgK2HTq2jXo3Bny5IERI5xOIyIiIiKSdNZa9s//nPV9hnLp5GkqdG9PwLt9\nyJr7PqejSSpTAZtOzZ0LW7bA7NmQN6/TaUREREREkubk3v+xpsvbRG7YSqEqFWj+zQL+UaGc07HE\nISpg06FTpyA4GGrUgLZtnU4jIiIiIvL3XT1/ge/fnciuiR+S5T4f6n0whvKvvYTJlMnpaOIgFbDp\nUEgIREXB9Omg/32LiIiIiCex1nJw6desfWMQ5yP/oNxrL/HUqLfJke8Bp6NJGqACNp3ZuhU+/BDe\nfBPKl3c6jYiIiIhI4kX9EsHa7gP57eu15PctTaNFMyhS/UmnY0kaogI2HYmJcS3cVKQIDB7sdBoR\nERERkcSJuXyZbaOns23kVDJ5ZyZw4hAqdHuVTHoPpNxA/0akI9OnQ1gYLFkCuXI5nUZERERE5O5+\n+2Yda7sNIOqXCB5r1ZTA8YPwKVzQ6ViSRqmATSeOHoUBA+Dpp6F5c6fTiIiIiIjc2dnDRwjtNYSf\nP19JnlIlef67hRQLqul0LEnjVMCmE2++CVevwpQpoPc4i4iIiEhaFRsdza73ZvH9kPHY2GsEDOuL\nf59OZM6a1elo4gFUwKYDq1fDp5/CkCHwyCNOpxERERERubXIjT+wunMIp/b+j5KNgqgzeSi5Szzk\ndCzxICpgPdyVK9C1q6tw7dfP6TQiIiIiIje7ePwk64OHsm/eEu4rVpSmyz7mkSb1nY4lHkgFrIcb\nOxbCw+HbbyFbNqfTiIiIiIj8v2uxsfz4wXw2vT2a6AsXqRzSjar938A7Zw6no4mHUgHrwX79FYYP\nhxdegPr6A5aIiIiIpCHHdvyX1Z1D+HPHf3moTgB1po0g7+N63k3ujQpYD2UtdO8OmTPDxIlOpxER\nERERcbl8JopNA8bw3xnzyPmP/Dz772k83qopRiuNSjJQAeuhvvwSVq6ECROgSBGn04iIiIhIRmet\nZd8nS9gQPIxLJ09ToXt7At7tQ9bc9zkdTdIRFbAe6Px5eOMN8PV1jcKKiIiIiDjp5E8HWNO1P5Eb\ntlKoakWaf7OAf1Qo53QsSYdUwHqgoUPh8GFYuNA1hVhERERExAlXz1/g+3cnsmvih2S5z4d6H46l\nfPtWmEyZnI4m6ZTKHw+zd69r2vBrr0FAgNNpRERERCQjstby8xcrWddzMOcj/6Dcay/x1Ki3yZHv\nAaejSTqnAtaDWAtdusB998GoUU6nEREREZGM6MzB31jbfSAR36wj/xNlaLRoBkWqP+l0LMkgNLbv\nQT75BDZsgNGjIV8+mDNnDsOGDXM6lqMiIiJYvny5e3vp0qWULl2abDe8FHfXrl0EBARQvXp15syZ\n497foEED8ufPf9fPsU2bNgQGBuLv78/EuGWfd+/eTUBAADVr1qROnTr8+uuvd80bHh6Ot7c3mzZt\nuunY/PnzGTJkyE37x4wZQ5UqVQgICKB79+5Ya7l06RL16tWjRo0aVK1ala+//jpBm3Xr1mGMITIy\n8q6ZRERERBIj5vJltrwzgbnl6nJ083ZqT3qH1ju+VvEqqUojsB7izBno0weqVYP27Z1Oc3uxsbF4\neXml2vWuF7BNmjQBoGbNmuzevZty5RIuGtC9e3fmz59PkSJFqFq1Kk2bNiVPnjzMmjWL1atX37XQ\nmzVrFlmyZCEmJobSpUvToUMHChUqxDfffEOuXLlYuXIlgwcP5pNPPrljP0OHDqVWrVp/6x6bNWtG\n3759AWjZsiVr166lZs2afPjhhxQvXpyTJ08SEBDAM//H3n3HVV2+fxx/3YAMByoKznDhBAvT3AMc\nZKm4tTQ3WpkW9lVTMY2Gmt8kNS39qpnlaFm/zJWagoNyJS5wj8AQBcHJPvfvjwNHUUA0Dwflej4e\nPPR8zme8gTQv7vu+7hdeAIxTeoKCgmjUqNEDPUcIIYQQIidnN25j66jJJJw+R+2XuuI1awrFK5a3\ndCxRCOU6AquUslVKdVNKzVJKrVJKfamUelspVTu/AgqjiRPTuXy5HykpbZg0aQJublk3gb7ztZ+f\nH8HBwQAEBgbSrFkzmjRpwrp16wB477336N+/P76+vnh6enLs2LFsnxkcHEzjxo3x9vZmyJAhABw+\nfJj27dvTtm1b+vTpQ2JiIgBVqlRh5MiRdO3aldTUVPz8/PD29qZly5bs2bMHgLFjx9KsWTO8vb35\n7rvvAHB1deXVV1+ladOmjB07FiDb67XW+Pr6EhwczK1bt2jWrBlnz54lKCiIdevW4eXlxf79+ylT\npsw9o6/JycncvHmTatWqYWtrS6tWrUyZKleunKevv62tLQBJSUm4urpStGhRypcvT4kSJQCws7PD\nJqOj1siRI/n6668xGAw8//zz7N69G4Ddu3dTvnz5LM8MDw+ncePGdOrUKctI8p1q1qxp+n3mc4oU\nKULVqlUBcHBwwOqORgk//PADzz//PMWKFcvT5yaEEEIIkZNrkRdY02s4P73wCsrGml5bvqXzqs+l\neBUWk2MBq5R6F9gNeAMHgWXAGoyjtrOVUhuVUjn2xlZK1VZKhd3xcU0p5a+UclJKbVZKncz4tfQj\n/pyeOHv2wMKFv1C/viP79oXQpUsX0tLS7ntdWFgYO3bsIDQ0lN9++40xY8ZgMBgAcHZ2Zs2aNYwf\nP57Fixdne/1PP/3Ehx9+yLZt21iyZAkAb7zxBl9++SVbt26lRYsWpuPR0dFMmDCBtWvXsmTJEtzc\n3Ni2bRurV69mzJgxAGzYsIEdO3awbds2evfuDcClS5cIDAzkjz/+YO3atVy7di3b65VSLFmyhHHj\nxjFs2DDGjBlDtWrVePvtt+nUqRPBwcE0bNgw288jLi6OUqVKmV6XKlWKK1eu5PGrf1vv3r2pXr06\nLVu2zDLKfPPmTSZPnsy4ceMACAoKYsGCBbz++uu0a9eOJk2aAPDRRx8xYcKELPecOHEic+bMYd26\ndZQsWTLX54eEhBAdHU3r1q2zHB8zZoxphDY1NZXFixczYsSIB/78hBBCCCEypaemsveTBXxV14uz\n67fS8qN3GHhwM1XatbJ0NFHI5TaF+JDW+oMc3puplKoAPJXTxVrr44AngFLKGrgA/AxMAH7XWs9Q\nSk3IeP3Ow4QvDH7+GV59FRwcTjJihHF9QZMmTVBK5XiN1hqA48eP07RpU5RSlCpVChcXF2JjYwFM\nxZ6rqyubN2/O9j7jxjJ3F7UAACAASURBVI3j448/ZtmyZbRt25Zhw4Zx9OhRBg4cCBhHI9u3bw9A\npUqVcHV1BYyjtKGhoWzcuBGAq1evAjBjxgyGDh2KlZUV48aNw93dnUqVKlG+vPEneJUrVyY+Pj7H\n652dnfHx8eHnn39m1apVef4aOjk5kZCQYHp99epVnJwevEPeDz/8wK1bt2jdujV9+/alXr16pKam\n0rdvX9555x3q1asHgL29PUOGDGH8+PFER0cDsG7dOho1akSZMmWy3PPkyZM0btwYMH5fo6KiOHXq\nFH5+fgAsXrwYNzc3Dh06xIQJE/j111+zfO8/+OADHB0dTSPk//vf/3jllVdMI8ZCCCGEEA8qavuf\nbBk5ibijx6nepQNt535Ayao5/rNfiHyVYwGrtf7l7mNKKVvARmt9S2sdDUTn8TntgNNa6/NKqa6A\nV8bxZUAwUsBma80aeOklSEkBGxs31q3bwqhRw9i7d6+pSM1UsmRJLl68iLOzM2FhYQwYMIBatWqx\naNEitNZcvXqVS5cuUbZsWYAsRdDd98pUpkwZ5s2bh9aaWrVq0bt3bzw8PFi1ahUVKlQAICUlBSDL\niKS7uztubm6mkdeUlBS01rRv354uXbqwc+dOpkyZwurVq+8pxLXW2V4PcOTIEUJDQ/H19WXu3Lm8\n+eabpnWpubG3t6dYsWL8/fffVKhQgZ07dzJ16tTcv/h3ZUpNTcXW1hZ7e3scHBxwcHDAYDDwyiuv\n0K1bN7p162Y6Pzo6miVLlvDuu+8yadIkgoKCCAsLIzg4mNDQUA4fPsyxY8f47rvvcHNzY9++fTRp\n0oS9e/dSoUIF3NzcTFPAAU6dOsXQoUNZvXq16fsHMG/ePE6ePMmyZctMx44cOcLp06dZuXIlhw4d\nYsCAAWzYsOGeadVCCCGEEHe7GXOZ7eM/JPzrH3GsUpmuvyzFzdfH0rGEyCLPTZyUUkOAfoC1UipU\naz35AZ7zEpA5ZFYuo/gFuAiUy+F5I4ARgGlkr7DZtMlYvAKkpXXj1KkfaNOmDc899xx2dnZZzh0/\nfjwdOnTA3d0dFxcXABo0aEDz5s1p1qwZBoOBWbNmZVkreT9BQUFs2rQJg8FAhw4dcHR0ZP78+Qwe\nPJjU1FTAOAW2Q4cOWa4bPnw4o0ePxtvbG4BGjRoxbdo0U5OhpKQkpkyZkuNzs7v+/fffZ8SIESxf\nvhxXV1d8fHxo1aoV9evX5/Tp0/Tq1YupU6eSkJBAYGAg//zzD+3bt2fkyJH06NGDOXPm8PLLL6O1\nZuTIkZQuXdr0rNDQUJKTk9m3bx//93//d0+etLQ0fHyMf3mnpKTQp08fqlWrxo8//si6deuIiYlh\n+fLl1K9fnzlz5jBkyBBmz55N06ZNeemll1i/fj0BAQEEBAQAMHjwYPz8/KhSpQrTpk1j6NChlClT\nJktxeid/f38SEhIYNGgQYBwZf+6553jrrbdMa4oBfv/9d7744gvTdV5eXnzzzTdSvAohhBAiV4b0\ndA79bzk7J31M6s1bNJk0miYBb1GkqIOlowlxD5XT6JtS6kWt9fo7Xn+rtX4p4/cHtdbP5OkBxlHb\nfwB3rXWMUipBa13qjvfjtda5roNt1KiR3rdvX14e90RZswZ69ID0dChaFL75JpUePYqwa9cupk+f\nztq1ay0dUQghhBBCPMYu7g1jy8hJxOw7iGvbFrSdP40yddzuf6EQj5hSar/W+r7baOQ2AvucUmo4\n8K7W+ghwVCm1EDAA2betzd4LwF9a65iM1zFKqQpa6+iMdbSXHuBehYqvL1SqBFZWMGcOLFv2EnPm\nxJKcnMzChQsf6bPGjx9v6swLxq67mzZteqTPeBxs3bqV999/P8uxKVOm0LZtWwslEkIIIYR49JLi\nE9gZ8DEHF3xDsXLOdFr1ObX7+ubaZ0WIgiDHEVgApVRF4AMgFZgCOAFFtdZ/5fkBSn0L/Ka1Xprx\n+r9A3B1NnJy01uNzu0dhHYFNSzOOvI4ZAx9/bOk0QgghhBDicae1JvybHwkZ+wFJcfE0GD2E5oFj\nsSvpaOloopB7FCOwAPHASMAd+BIIBWY9QIhiQAfg1TsOzwC+V0oNA84DffJ6v8LmzBlITYW6dS2d\nRAghhBBCPO5ijxxjy8hJXNixmwpNn6X9ppW4eOa4K6YQBVKOBaxSKhBomXHOj1rrzkqpHsB6pdQS\nrfXK+91ca30TKHPXsTiMXYnFfUREGH+VAlYIIYQQQjyslBs3+SMwiP2fLsKuZAl8Fn+Cx5C+qAdo\n7ilEQZHbCGxXrbWnMk6E3w98prX+SSn1K/Bm/sQr3DIL2Dp1LJtDCCGEEEI8frTWnPxpPdv8p3Ij\nKpr6fv1oOX0iRcs6WTqaEA8ttwI2Qin1OVAU2Jl5UGudygNMIxYPLyICKlaEkiUtnUQIIYQQQjxO\n4k+dZeuoyZz7LRjnZ+rR5fsFVGx23+WFQhR4ORawWuuXlVINgNSMLsQin0VEyPRhIYQQQgiRd2lJ\nSeyZMZ89M+ZjbVsE7znv4zlyEFY292t9I8TjIbc1sE211n/m8n5xwFVrHW6WZIWc1nDsGAwaZOkk\nQgghhBDicXB2w1a2jn6XhNPnqPNyN9rMmkLxCuUsHUuIRyq3H8X0y9jyZgPGNbCXAXvADfDO+HWs\n2RMWUv/8A9evywisEEIIIYTI3bXICwT7v8fJn9ZTunYNem35lirtWlk6lhBmkdsU4jeVUmWB3sAA\noAKQCEQAy7TWwfmSsJCSDsRCCCGEECI36amp/DV7MX8EBqENBlpOm0DDt0dgY2dn6WhCmE2uk+G1\n1rHAFxkfIh9JASuEEEIIIXIStf1PtoycRNzR49Tw9cF7zvuUrPqUpWMJYXaymruAioiAUqWgnCxb\nEEIIIYQQGW7GXGb7uA8I/2Y1jlUq0/WXpbj5+lg6lhD5RgrYAioiwrj/q1KWTiKEEEIIISzNkJ7O\noYXfsHPSx6TeSqRJwJs0mfQmRYo6WDqaEPlKCtgCKiICXnzR0imEEEIIIYSlXdwbxpbXJxKz/xCu\n7VrSbv5HONV2s3QsISzivgWsUmo38CWwSmt9zfyRRHw8xMTI+lchhBBCiMIsKT6BnZNmcHDhcoqV\nd6HTqs+p3dcXJVP0RCGWlxHYQcAQIEwpFQos1Vr/bt5YhZs0cBJCCCGEKLy01oR//QMh4z4kKS6e\nZ98aRvPAsdg5lrB0NCEs7r4FrNb6GPCOUmoS4At8rZRKwTgq+5nWOsHMGQsdKWCFEEIIIQqn2CPH\n2DJyEhd27KZCs4a037QSF08PS8cSosDI0xpYpVQ9jKOwXYBfgBVAS2Ar8KzZ0hVSERFgZwdVq1o6\niRBCCCGEyA8pN27yR2AQ+z9dhF0pR3wWf4LHkL4oKytLRxOiQMnLGtg9wC2MI65TtNaJGW/tUkq1\nMGe4wioiAmrXBmtrSycRQgghhBDmpLXm5Op1bPOfyo0LF6k/vD+tpk/AoYyTpaMJUSDlZQT2Fa31\nieze0Fr7PuI8AmMB27ixpVMIIYQQQghzij95hq2j3+Xcb8E4e7rT5YeFVGzWyNKxhCjQ8jInYYBS\nqlTmC6VUaaVUoBkzFWqJiXDunKx/FUIIIYR4UqUmJrJr6ics82jHP3/sx3vO+7yyd70Ur0LkQV4K\n2M53NmrSWsdjXAsrzODECdBaClghhBBCiCfR2Q1bWebRjj/f/5SavTox5FgIz745DCubPLWmEaLQ\ny8ufFGullK3WOgVAKWUP2Jo3VuGV2YG4Th3L5hBCCCGEEI/OtcgLBPu/x8mf1lO6dg16//4drm1b\nWjqWEI+dvBSw3wKblVJfZrweirELsTCDiAiwsoJatSydRAghhBBC/Fvpqans/3QRfwQGgda0nDaB\nRv95FWtbGQ8S4mHkZR/YaUqpw0C7jEMztdbrzBur8IqIgGrVwN7e0kmEEEIIIcS/ERnyB7+PnERc\n+AlqdH0e79mBlKz6lKVjCfFYy9Nke631r8CvZs4iMBawsv5VCCGEEOLxdTPmMtvHfUD4N6txrPoU\n3dYspUYXH0vHEuKJkJd9YJ8DPgPqAnaAApK11o5mzlbopKUZmzi98IKlkwghhBBCiAdlSE/n4IJv\n2BXwMam3EmkS8CZNJr1JkaIOlo4mxBMjLyOwnwOvYFwL2xgYDFQxY6ZC6+xZSEmREVghhBBCiMdN\n9J4D/D5yEjH7D+HariXt5n+EU203S8cS4omTlwLWSmt9XCllo7VOBRYppQ4Ak82crdDJ7EAsBawQ\nQgghxOMh8Uo8uwI+5uDC5RQr70Knbz+ndh9flFKWjibEEykvBexNpZQtcFApNQ2IBqzNG6twkgJW\nCCGEEOLxoLXm6LLv2T7uQ5Lir/LsW8NoHjgWO8cSlo4mxBMtLwXsYMAKGAX8B6gJ9DJjpkIrIgIq\nVICSJS2dRAghhBBC5OTy4Qh+HzmJCzv3UKFZQ9p/MR2XZ9wtHUuIQiHXAlYpZQ28p7UeCCQB7+ZL\nqkJKOhALIYQQQhRcKddvEPreLP6aswS7Uo74LJmFx+A+KCsrS0cTotDItYDVWqcrpaorpYpkrH8V\nZqI1HDsGAwZYOokQQgghhLiT1poTP64l2P89bvxzkfrD+9Nq+gQcyjhZOpoQhU5ephCfBnYopX4B\nbmYe1FrPNVuqQig6Gq5dgzp1LJ1ECCGEEEJkij95ht9HTeb8phCcPd3psvp/VGza0NKxhCi08lLA\n/p3xUTTjQ5iBNHASQgghhCg4UhMT2TNjPntnzMfa3g7vuR/g+fpArGzy8s9nIYS53PdPoNZa1r3m\nAylghRBCCCEKhjPrf2fr6He5euY8dfp1p80n71K8QjlLxxJCkIcCVim1GdB3H9da+5glUSEVEQGO\njsYuxEIIIYQQIv9d+/sC2/yncurnDTjVcaP379/h2ralpWMJIe6QlzkQk+/4vT3QE0g2T5zCK7MD\nsex5LYQQQgiRv9JTUtg/ezF/BAaB1rScPpFGb4/A2tbW0tGEEHfJyxTi3XcdClFK3X1M/EsREdCx\no6VTCCGEEEIULpEhf/D7yEnEhZ+gRtfnaTvnfRyrVLZ0LCFEDvIyhdjxjpdWQEOgtNkSFUIJCXDx\noqx/FUIIIYTILzcvXiJk3AdELP8Jx6pP0e3Xr6jRuYOlYwkh7iMvU4iPYlwDq4A04Cww3JyhChtp\n4CSEEEIIkT8M6ekcXPANuwI+Ji0xiaaT36LxxNEUKepg6WhCiDzIyxTip/IjSGEmBawQQgghhPlF\n7znAltcncumvw7i2b0W7eR/iVNvN0rGEEA/A6n4nKKVeU0qVuuN1aaXUCPPGKlwiIsDWFqpVs3QS\nIYQQQognT+KVeDa/9g4rm3bhZvQlOn37Ob02rZLiVYjH0H0LWOA1rXVC5gutdTzwuvkiFT7HjkGt\nWmBtbekkQgghhBBPDm0wcOSr71hauzWHF6+iob8fQ46FUKdvV5Rs/SDEYykva2CzlFVKKSugiHni\nFE4REfDss5ZOIYQQQgjx5Lh8OILfR07iws49VGzeiHafT8PlGXdLxxJC/Et5KWA3K6VWAQsyXr8G\nbDFfpMIlKQnOnoX+/S2dRAghhBDi8Zdy/Qah783irzlLsCvliM+SWXgM7oOyysvEQyFEQZeXAnYc\nxinDYzJebwYWmi1RIXPiBBgM0sBJCCGEEOLf0Fpz4se1BPu/x43oGJ4e3o+W0ybgUMbJ0tGEEI9Q\nXgrYIsDnWut5YJpCbItxSx3xL0kHYiGEEEKIf+fKidNsHTWZ85u349LAgy6r/0fFpg0tHUsIYQZ5\nKWC3AT7A9YzXxYDfgObmClWYRESAUsYmTkIIIYQQIu9SExPZM30eez/+HGt7O7znfoDn6wOxssnL\nP3GFEI+jvPzpdtBaZxavaK2vK6WKmjFToRIRYdw+x0H2zhZCCCGEyLMz639n66jJXD37N3X796DN\nJ+9SrLyLpWMJIcwsLwXsLaXUM1rrgwBKKU8gybyxCo+ICJk+LIQQQgiRV9f+vsC2t6Zw6v824lTH\njd5bv8fVu4WlYwkh8kleCtgxwM9KqfOAAp4C+pk1VSGRnm5s4vT885ZOIoQQQghRsKWnpLD/00X8\n8f6nALSaMYmGY4ZjbWtr4WRCiPx03wJWa71bKVUXyBwnDAfSzZqqkDh7FpKTZQRWCCGEECI3kcGh\nbBk5iSsRJ3Hr1hHv2YE4Vqls6VhCCAvI04ZYWutkrXUYUBL4DLhg1lSFhHQgFkIIIYTI2c2Ll1j/\nymi+9+5NWmIS3X79iq4/L5HiVYhC7L4jsEqpRhinDPcEygJvAgFmzlUoZBawdepYNocQQgghREFi\nSE/n4BdfszPgY9KTkmn6rj+NJ46iiHS9FKLQy7GAVUq9D/QFLgKrgEbAHq31knzK9sQ7dgzKlYPS\npS2dRAghhBCiYIje/RdbRk7i0l+HcW3finbzP8KpVg1LxxJCFBC5jcC+ARwFPgXWa61TlFI6f2IV\nDtKBWAghhBDCKPFKPDsnTufQopUUr1COzt99Qa3eXVBKWTqaEKIAya2ALQ88D7wMzFNKbQYclFJW\nWmtDvqR7gmltLGD7ST9nIYQQQhRi2mDg6LIf2D7+Q5Lir9JwzHCav/cfbEsUt3Q0IUQBlGMBq7VO\nBdYCa5VSDoAvUBq4oJTarLUemE8Zn0gXL8LVqzICK4QQQojC6/KhcLaMnMQ/u/ZSsXkj2n8xHeen\n61k6lhCiAMvLPrBorROB74DvlFKlgB5mTVUISAdiIYQQQhRWKddvEDr1E/6a+yX2pUvy/JdBuA/q\njbLK0wYZQohCLE8F7J201gnAl2bIUqhIASuEEEKIwkZrzYkffiV4TCA3omN4ekR/Wk6bgIOTdLQU\nQuTNAxew4tGIiIASJaBiRUsnEUIIIYQwvysnTrN11GTOb96OSwMPfH9aRIUmz1o6lhDiMSMFrIVk\ndiCWxnpCCCGEeJKlJiayZ9pn7J35Bdb2drT97EOeeX0gVtbWlo4mhHgMPdRCA6WU96MOUtjktIXO\nV199xYcffpj/gQqQc+fOsWbNGtPr9957j7p16+Ll5YWXlxfp6ekA/PXXX7Ro0YLmzZvz1Vdf5Xi/\noKAgWrduTYsWLRg4cCCpqakkJibSoUMHWrZsSdOmTdmwYcN9c6WmplKzZs1svz9RUVF4eXndc/y3\n336jadOmtGnThhdffJG4uDgA/P39adq0KU2bNmXGjBlZrrly5QpOTk4sX778vpmEEEKIguzMui0s\nc2/Lnx/OoVafzgw9vp0Go4ZI8SqEeGgPu1J+2SNNUchcvQrR0Y/P+tfMgjG/3F3AAgQEBBAcHExw\ncDDWGf/TGz16NMuXLyc4OJi5c+cSHx+f7f1GjRrF9u3b2bVrFwCbNm3CxsaGRYsWsXPnTtauXYu/\nv/99cy1cuJA6deo80OdSt25dQkJCCAkJoXPnzsyePRuAN954gz///JPQ0FB++eUXTp8+bbpm+vTp\nNG/e/IGeI4QQQhQk185H8Uv3YfzceRDW9nb02fYDL37zGcXKu1g6mhDiMZdjAauU+imHj5+BMvmY\n8YmT2cCpZs10+vXrR5s2bZgwYQJubm5ZzrvztZ+fH8HBwQAEBgbSrFkzmjRpwrp16wDjKGX//v3x\n9fXF09OTY8eOZfvs4OBgGjdujLe3N0OGDAHg8OHDtG/fnrZt29KnTx8SExMBqFKlCiNHjqRr166k\npqbi5+eHt7c3LVu2ZM+ePQCMHTuWZs2a4e3tzXfffQeAq6srr776Kk2bNmXs2LEA2V6vtcbX15fg\n4GBu3bpFs2bNOHv2LEFBQaxbtw4vLy/2798PwMyZM2nZsiVz584FIDk5mZs3b1KtWjVsbW1p1aqV\nKdPdbG1tAWPjCIPBgJubG0WKFKFq1aoAODg4YJXR9fD7779n2LBhAEydOpWgoCAAbty4wYYNG+jZ\ns6fpvjdu3KBTp060b9+eadOmZftsV1dX7OzsALCzs8PGxjhrv2bNmgBYWVlhY2NjKsr//vtvoqOj\nadSoUbb3E0IIIQqy9JQU9nw8n6X1vDi3KYRWMyYxMGwTT3nJD2aFEI9GbmtgvYFBwM27jitA/hb6\nFzIL2H/++QVHR0dWrlzJrl27+Pbbb+97bVhYGDt27CA0NJSrV6/SuHFjXnjhBQCcnZ1ZsWIFK1eu\nZPHixXzyySf3XP/TTz/x4Ycf4uPjg8FgAIyjgcuXL8fV1ZU5c+awZMkSRo0aRXR0NBMmTMDV1ZUF\nCxbg5ubG4sWLiYmJoUePHuzatYsNGzZw8OBBbGxsTPe7dOkSgYGBlCtXjrp16zJlyhRWrlyZ7fVL\nlizhxRdfxM3NjTFjxlCtWjXefvttli9fzuLFiwGoWrUqU6dOJSkpiS5dutCgQQNq1KhBqVKlTJ9X\nqVKluHLlSo5ft48++oivvvqKmjVr8tRTT2V5b8yYMYwfPx6APn36sHnzZvz9/Tlz5gy//PILAP/9\n73/x9/fnwoULpusWLVpEy5YtmThxIitWrCA8PDzH58fExDBv3jx+++23LMdXrFhB9erVTcV0YGAg\nAQEBph8GCCGEEI+Lv7ft4vc3ArgScRK37i/gPTsQR9dKlo4lhHjC5DaFeDdwXWv9+10fW4DTuVwn\n7mPtWrCygt27T/Lcc88B0KRJE1QuHZ201gAcP36cpk2bopSiVKlSuLi4EBsbC0DDhg0B46hf5lrL\nu40bN441a9bQv39/li5dCsDRo0cZOHAgXl5erFq1iosXLwJQqVIlXF1dAeMo7XfffYeXlxd9+/bl\n6tWrAMyYMYOhQ4cyePBgIjIq80qVKlG+fHmUUlSuXJn4+Pgcr3d2dsbHx4eDBw/Sp0+fbDOXKVMG\npRQODg706NGDffv24eTkREJCgumcq1ev4uTklOPXLyAggBMnTlCtWrUs62U/+OADHB0dTaPRAOPH\nj2fOnDkEBASglCImJoYDBw7QoUOHLPc8ceIEjRs3Bozfv0ydO3fGy8uLH3/8EYBr167Rq1cvFixY\ngIvL7alTW7ZsYenSpSxYsMD0NVZKUfdxmVsuhBBCADcvXmL9K6P5oW0f0pOS6b52GV1/WizFqxDC\nLHIbgX1BZ1ZNd9FaywjsQ1qzBn75BQwG+P57N2JjtzBs2DD27t3L3V/ukiVLcvHiRZydnQkLC2PA\ngAHUqlWLRYsWobXm6tWrXLp0ibJlywJkKYBz+NZRpkwZ5s2bh9aaWrVq0bt3bzw8PFi1ahUVKlQA\nICUlBcA0rRXA3d3dNEqaeY7Wmvbt29OlSxd27tzJlClTWL169T2FuNY62+sBjhw5QmhoKL6+vsyd\nO5c333wTW1tb0tLSTNcnJCRQqlQptNYEBwczePBg7O3tKVasGH///TcVKlRg586dTJ06NdvPOSkp\nCXt7e5RSlCxZkqJFiwIwb948Tp48ybJlt5d0GwwG3njjDZYuXco777zD5s2bOXz4MJcvX6Zjx45c\nuHCB5ORknnnmGWrWrMm+ffto164de/fuNd1j7dq1pt8nJibSvXt3AgICshS5u3fv5t1332XDhg04\nODgAsH//fo4fP07Hjh05deoUxYoVo1atWqYiWQghhChIDOnpHPzia3YGfEx6UjJN3/Wn8cRRFMn4\n/5oQQphDjgVsdsWrUqqj1nqjeSM92TZtgsyeSCkp3Th16gfatGnDc889Z1ormWn8+PF06NABd3d3\n08hdgwYNaN68Oc2aNcNgMDBr1izT+s28CAoKYtOmTRgMBjp06ICjoyPz589n8ODBpKamAjBx4sR7\nRhuHDx/O6NGj8fY2NqBu1KgR06ZNM01fTkpKYsqUKTk+N7vr33//fUaMGGGavuzj40OrVq2oX78+\np0+fplevXkydOpVZs2Zx/PhxtNZ4eXnx4osvAjBnzhxefvlltNaMHDmS0qWz3wT9P//5D0ePHjWt\nfw0MDOTSpUu89dZbpvW7AL///jsfffQRPj4+DB48mMTERAICApg5cybt27cHjF2io6Ki6NKlC9ev\nXzdNOfbw8Mj22fPnz+fgwYPMmDGDGTNm0KFDBwICAkzrbLt16wbArFmzGDx4MIMHDwaMa5rd3Nyk\neBVCCFEgRe/+iy2vT+TSgSNU6dCadvM/onTN6paOJYQoBFROI3XZnqzUX1rrfN9xulGjRnrfvn35\n/VizWLMGunY1/r5oUfjmm1R69CjCrl27mD59epbROyGEEEKIgiQx7go7J83g0KKVFK9QDq/Z71Gr\nV+dcl0EJIUReKKX2a63v28k0tynE2d73IfOIDBmDfTRrBhMmwLJlLzFnTizJycksXLjwkT5r/Pjx\nWTrz2trasmnTpkf6jILkypUr9OjRI8sxX19f3n77bQslEkIIIZ4M2mDg6LIf2D7+Q5Lir9JwzHCa\nv/cfbEsUt3Q0IUQh86AjsM201n+YMU+2nqQR2PBwcHeHFSugXz9LpxFCCCEsKyEhgTVr1jBw4EAu\nXrxI9+7dsbe357fffjNtg3Y/o0aN4tChQ4wdO5Zr164xd+5cOnfujK2tLZ06daJ+/frZXte/f39W\nrFjxwJkzezY8jPtd6+bmxqlTp7Icu3btGh07dsTW1pZbt24xffp02rVrl6dztNa8+eabhIWFUbJk\nSb7++mucnJy4cuUKAwcO5OrVq3h6ejJ37twcR1EvHwpny+sT+Sd0HxVbPEf7L6bjXF8aDgohHq28\njsDet4BVStkBrwItAQ3sBP6ntU5+FEHz4kkqYDdtguefh+3boVUrS6cRQgghLOvcuXP4+fmxZcsW\nVq1axbFjxwgMDHyge9SqVYsTJ04A8Pzzz7NgwQKqVatmjrhA9kXmo7o2u/cNBgMGgwEbGxvOnDlD\n3759szQPzO2cjRs38sMPP7BkyRK+/vprwsPDmTFjBhMmTMDd3Z0BAwYwdOhQ+vTpQ8eOHbPcM/na\ndf54bxZ/zf0S+9Ilaf3fybgP7I16gN4bQgiRV3ktYPPyN9AyoCGwCFgMPJtxTDyEyEjjr3dtRSqE\nEEIUSkFBQezf7d1E2wAAIABJREFUv5+aNWsyZcoUvv76a/z8/LI9NyQkhDZt2uDl5cVrr72G1prR\no0cTGRmJl5cXCxcuZPfu3fTr148ff/yRwYMHs3PnTsDY+K9JkyZ4e3ubus+7ubkBxq3Y+vTpQ7t2\n7Wjbtq2pgPTy8sLf3x8fHx/atWtHcnIyQUFBXLhwAS8vL5YsWcJXX31Ft27d6NGjBx4eHuzYsQMw\nbo3Wvn172rZtS58+fUhMTLzn2pyMGTOGNm3a8Morr2AwGLCyssLGxrjq69q1azz99NP3XJPTOSEh\nIXTu3BmALl26EBISkutxMO4ecPz7NXxV14v9sxdT3+9lhhzfjsfgvlK8CiEsT2ud6wcQnpdj5vxo\n2LChflJMnaq1UlonJ1s6iRBCCGF5Z8+e1e3atdNaa7106VL9wQcfZHuewWDQnp6eOiEhQWuttb+/\nv/7111+11lrXqFHDdF6bNm10ZGSk1lrrQYMG6R07dujDhw/r1q1b69TUVK211mlpaVmue+edd/Sq\nVau01lqHhYXpnj17mu71888/a621Hj58eLbPW7p0qe7atavWWutdu3aZrm3VqpU+f/681lrr2bNn\n688+++yea7NTpUoVHRoaqrXW2s/Pz/T8qKgo3aJFC+3s7GzKcbfszhk+fLjetm2b6WtYu3ZtrbXW\ntWrV0gaDQWut9datW3W/zr56w5Ax+v96+OkvKjbQn1BRf/3s8/qf3X/lmlcIIR4VYJ/OQ22YlyZO\nB5VSz2mt9wIopRoCB8xWUT/hoqKgXDnI47IeIYQQQgCxsbGcO3eOrhmt/G/cuEHt2rXzdG14eDgt\nW7Y0jVDeuc85GEdLQ0JCWLBgAYDpPICGDRsC4OrqSlxcXLb3z+6co0ePMnDgQMC41Vzmdmz3o5Qy\nbaHWpEkTjh8/DkClSpXYuXMn586dw8vLi86dO+Pn58epU6fo1asXo0aNyvYcJycnEhISAONIc+aW\nc6VLl+bq1avYphk4GRJKyulIjq7bDxlLy9yH9MVn0X+xuutrJYQQlpaXArY+sFspdSbjdTUgQil1\nAON2sfm+rc7jLDJSpg8LIYQQmWxtbUlLS7vveWXLlqV69eqsXbuW4sWNnW8z9y+/H3d3d7744gvS\n09OxtrY2Tcu98/1mzZrRvXt3AFJSUkzv3dnYSGcUd3fvv57dOR4eHqxatYoKFSpkuef99m7XWrNv\n3z6aNGnC3r176dixI8nJyaa94h0dHSlRogQAixcvNl2X0zlt2rTh559/pmvXrvy47BvqV3iKXVP+\nS4XYG/ynegPqxafwAwnUxx5lXQydnk7RCi5SvAohCqy8FLBdzZ6iEImMhDp1LJ1CCCGEKBjKly+P\ng4MDPXv25MUXX8zxPKUUQUFB+Pr6orXGysqKTz/9NNv1oHdzd3ena9euNG/enGLFijFo0CAGDRpk\nej8gIIDXXnuNzz77DK01nTp1YuzYsTneL7PY7du3b47nzJ8/n8GDB5uK7IkTJ9KhQ4cs17700kv3\nXGdjY8Pq1asZP348lSpVwtfXl7CwMMaMGYO1tTVpaWnMnj37nuuOHDly+5zUVALfepvwb37E7q/D\nnF+/lZq2xSiSZuBlSrP7lz9oV7Mqyxxu8FcpB+rXb8LsJYv4e/MO1vd7A69ZU6V4FUIUWHnaRkcp\n5Q5k9szdobU+atZUd3mSuhA7OsKQITBnjqWTCCGEEOJxl3orkcuHwrkcdpRLB45w6cBRYg8fIy0p\nCQAbe3vKPl0XlwbuuDTwwKWBB2Xr16GIg8M99zKkpxP2+TI8Rw6SAlYIke/y2oX4viOwSqlRwEjg\n/zIOfa+Umq+1/vxfZix0rl6F69dlCrEQQgiRm/DwcEaOHJnl2IgRI+j3BG2gvnXrVt5///0sx6ZM\nmULbtm1zvCYx7gqXwo5y6UBmsXqE+OOn0QYDAPalS+HSwJ1n3hhEuYxitXSt6ljZ5GXCHVhZW/Ps\n6KEP/0kJIUQ+yMs+sIeA5lrrGxmviwOhWuv7z9l5RJ6UEdgjR6B+fVi1CrKZNSSEEEIIgdaa65H/\nmIrUzI/rkf+YzinxVEXTiGrmR4mnKmZZjyuEEI+TRzYCCygg5Y7XqRnHxAOKijL+KiOwQgghhADj\ntN3446e5dOAIMQeOcPnAES6FHSXpirFzsLKyonTtGlRq1cRYqHrWw9nTg6JlnSycXAghLCPHAlYp\nZaO1TgO+wdiFeHXGW92BZfkR7kkTGWn8VQpYIYQQovBJTUwk9vCxO0ZVjxJ7OIK0RON6VWs7O5yf\nrkutXp1w9jSuWXV+uh5Fit67XlUIIQqr3EZg9wDPaq1nKqWCgZYZx1/L3BNWPJjISFAKMjrqCyGE\nEOIJlXglPktjpUthR7ly7BQ6PR0Au1IljetVXxtgmgLsVMctz+tVhRCisMrtb0nTNGGt9R6MBa34\nF6KijMVrkSKWTiKEEEKIR0FrzfWof0yNlTKL1mvno0znFK9cARdPd2r2eMFUrDpWqSzrVYUQ4iHk\nVsA6K6XezulNrXWQGfI80SIjZfqwEEII8bgypKcTf+KMcVQ17HYn4KS4eOMJSlG6VnUqNGvIMyMH\nZaxZdaeocxnLBhdCiCdIbgWsNVAcadj0yERGgoeHpVMIIYQQ4n7SkpKM61XvKFQvH4og7VYiANa2\ntpStX4ea3V8w7bFatn5dbIsXs3ByIYR4suVWwEZrrd/P5X3xALQ2TiF+4QVLJxFCCCHEnZLiE7h8\nMDxLc6W4iJO316uWdMTZ052nR/Q3jao61a2JtawJEkKIfJenNbDi30tIgJs3ZQqxEEIIYSlaa278\nczFLoXrpwBGunYs0nVO8YnlcGrjj1u15UyfgktVcZb2qEEIUELkVsO3yLUUhkLmFTuXKls0hhBBC\nFAbaYCD+5BlTkZq5bjXxcpzxBKUoXbMa5Rt78vSrr1Auo7lSUZeylg0uhBAiVzkWsFrrK/kZ5EkX\nldGMUEZghRBCiEcrLTmZuCPHiTGNrB4h9lAEqTdvAWBVpAhlPWpTo0sHUxdg56frYluiuIWTCyGE\neFCy2Vg+yRyBlQJWCCGEeHjJV6+ZGitlblkTF34SQ1oaALYliuPs6Y7HsJdNzZXK1K2Jta2thZML\nIYR4FKSAzSdRUWBlBeXLWzqJEEIIUfBprbkZHXPXljVHuXrmvOmcYuVdcGngQfXO7U0jqyWruaKs\nrCyYXAghhDlJAZtPIiOhYkWwka+4EEIIkYU2GIg/dTZjRPX2mtVbl2JN55Ryq0q5hvWp7/eyqRNw\nsfIuFkwthBDCEqScyieRkTJ9WAghhEhLTiYu/OQdnYCPcPlgOKk3bgLG9apl3GtRrVM7U6Hq/Ew9\n7BxLWDi5EEKIgkAK2HwSFQXPPGPpFEIIIUT+Sb52/a79VTPWq6amAlCkeDFcPN3xGNLXtGVNWfda\nsl5VCCFEjqSAzQdaG0dgO3e2dBIhhBDCPG5evJSlUL104CgJp8+Z3i9azhmXBh5Ue6GtqblSqRpV\nZb2qEEKIByIFbD64cgUSE2UPWCGEEI8/bTCQcOZ8lkL1cthRbl68ZDqnVI2qOHvWw31IH1NzpeIV\nylkwtRBCiCeFFLD5QPaAFUII8ThKT0khLvzE7cZKYcZiNeX6DQCsbGwoU68mVZ9vc3t/1WfqYVfS\n0cLJhRBCPKmkgM0HsgesEEKIgi7l+g0uZaxXNe2vevQE6SkpABQpVhTnZ+pRb2AvU7Faxr0WNnZ2\nFk4uhBCiMJECNh9kFrAyhVgIIURBcDPmMpfDjhKT2QX4wBHiT50zNm0AHJzL4NLAg2fHDDeuV/V0\np5RbNaysrS0bXAghRKFn1gJWKVUKWAx4ABoYCiQCCwB7IA0YqbXeY84clhYVZdz/tXx5SycRQghR\nmGituXr273uaK92MjjGdU7KaKy4NPKg3sJepE3DxiuVRSlkwuRBCCJE9c4/AzgE2aq17KaVsgaLA\n90Cg1nqDUupFYCbgZeYcFhUZCRUrgvzgWgghhLmkp6ZyJeJklkL1UthRUq5dB0BZW1OmXi2qdGiF\nS0ah6uzpjn2pkhZOLoQQQuSd2QpYpVRJoDUwGEBrnQKkKKU0kNndoSTwj7kyFBSRkTJ9WAghxKOT\ncuMmlw+F326udOAIcUeOm9ar2hR1wPmZetTt3920XrWsR21s7O0tnFwIIYT4d8w5AlsNuAwsVUo9\nA+wH3gL8gd+UUp8AVkDz7C5WSo0ARgC4urqaMab5RUVBw4aWTiGEEOJxdOtyXNYpwGFHiT9xxrRe\n1b5MaVwaeNDgrWGmYrV0TVmvKoQQ4slkzgLWBngWGK213q2UmgNMwDjqOkZrvVop1QdYArS/+2Kt\n9f+A/wE0atRImzGnWWltLGC7dbN0EiGEEAWZ1ppr5yKzFKqXDhzhxoWLpnMcq1TGpYEHdft1NzZX\nauBB8UoVZL2qEEKIQsOcBWwUEKW13p3x+keMBWxLjCOxAD9gbPL0xIqNhaQkmUIshBDiNkNaGnER\nJ03b1RgL1nCSE64CoKyscKpbk6e8m2fZX9XBqbSFkwshhBCWZbYCVmt9USkVqZSqrbU+DrQDwoHq\nQBsgGGgLnDRXhoIgKsr4q+wBK4QQhVPqzVtcPhxxu7HSgSPEHj5GenIyADYO9jg/XZc6L/maugCX\nrV+HIg4OFk4uhBBCFDzm7kI8GliR0YH4DDAE+AWYo5SyAZLIWOf6pMrcA1YKWCGEePIlxl3JUqhe\nOnCE+BNn0AYDAPZOpXBp4IHnqMGUy1yvWqs6VjayLbsQQgiRF2b9P6bWOgxodNfhnUChaWmUWcDK\nFOKC7+LFi3Tv3h17e3s2btxIv379iIuLY+bMmcyZM4cVK1Zke93GjRu5fPkyAwYMeKDnhYWFce3a\nNVq3bv3AWe93bXBwMMuXL2fx4qwz9GfOnMnq1auxsbHh2WefZe7cufesnfP39+fPP/8EoFu3bkyY\nMAGAM2fO8NZbb3Hz5k0qV67M119/DcC0adNYt24ddnZ2fPnll1StWvWBPx8hHjdaa67/fYGYLPur\nHuFGVLTpnBKulXDxdKd2X1/TNOAST1WU9apCCCHEvyA/8jWzqCgoUgTKlbN0EnE/27Ztw8fHh8DA\nQKKjo4mNjSUkJAQgx+IVoGPHjg/1vLCwMKKioh66gH2Ya7t378748eMB6NOnD1u3bqVdu3ZZznnj\njTeYPXs2BoOBFi1a0Lt3b2rUqMGoUaNYsmQJFSpUMJ177Ngxtm7dyq5du9i+fTsTJkzg22+/feDP\nR4iCzJCWxpXjp7MUqpfDwkmKTwCM61VL165B5dZNMwpVd1w83XEo42Th5EIIIcSTRwpYM4uMhEqV\nwMrK0knE3SZOnEhoaCgpKSm89tprTJ8+neTkZC5cuEBMTAyHDh3Cy8uLtWvX4unpyalTp4iPj8fP\nz4/Y2FisrKxYtWoVGzduJCoqismTJxMSEsKUKVNQSlGnTh2++OILzp8/T8+ePalbty7h4eEMHDgQ\nf39/goKCuH79Olu2bGHFihX0798fT09PwsPDSU9PZ/369djZ2fHZZ5/x/fffk5aWxrBhw/Dz87vn\n2kqVKt3z+Z0+fZru3btz9uxZAgIC6N27NzVr1jS9b2dnh0020xYzz7GyssLGxgZra2vOnz/PrVu3\neOutt4iJieHNN9+kZ8+ehISE0KlTJwBat27Nq6++aqbvlhD5I/VWIrGm9arGxkqxhyJIS0oCwMbe\nnrL161Crd6fb+6vWr0uRorJeVQghhMgPUsCaWVSUTB8uiDZu3Eh8fDwhISHcunWLZs2a8c4773Dh\nwgUmT57MuXPn8PPzY8uWLVmumz59Oj4+PqZCzZCxrg2MUwr9/f0JDg6mZMmSjBkzhnXr1uHh4UF0\ndDQ7duzAysqKunXr4u/vz9tvv20qfDN5eXkxe/ZsRowYwebNm6lRowYbN25k+/btGAwGWrVqRffu\n3bO99m6XL19m8+bN3Lp1i0aNGtGzZ0+sMn6SEhISQnR0dK4juCtWrKB69epUrVqVP/74gwMHDhAe\nHk6JEiVo3rw5bdu2JS4ujooVK5quST9+HO6cHlmiBFy7lrdvihD5LPFKvGm9amY34CvHTpnWq9qV\nKolLA3eeGTnQVKw61a4h61WFEEIIC5L/C5tZZCQ0aWLpFOJuhw8fJiQkBC8vLwCSk5OJi4u773VH\njhxh+PDhptdWdwytx8bGcu7cObp27QrAjRs3qF27Nh4eHtStW5eiRYsCYG1tneP9GzY0Lg93dXUl\nLi6OxMREwsPD8fb2BuDatWtEZi6svo8GDRpgY2ODo6MjLi4uXL58mXLlynHo0CEmTJjAr7/+ilKK\nnTt3mgrhtWvXUrx4cbZs2cLSpUv59ddfAXBycqJ+/fqmkV5PT09OnjyJk5MTCQkJpmda67u2bL5+\nPU9ZhTAnrTXXI//JsrfqpQNHuP73BdM5xStXwKWBBzV7ZYyserrjWKWyrFcVQgghChgpYM3IYDCO\nwPbqZekk4m7u7u74+PgwZ84cAFJSUli5ciVRmfse5cDDw4Pg4GDTNNs7R2DLli1L9erVTUUgQGpq\nKhcuXMj2H8G2trakpaVlOXbneVpr6tatS4MGDVi9ejVKKVJTUylSpAjh4eH3XHu3sLAw0tLSSExM\nJCYmBmdnZ06dOsXQoUNZvXo1ZcuWBaBly5YEBwebrtu9ezfvvvsuGzZswCFjGw83Nzdu3brF9evX\ncXBwIDw8nCpVqlCiaFH8/fzwv3WL0B9/5JlcEwlhfob0dOKPn85SqF4KO0pSXLzxBKVwql2DSi2e\nw2XUENO2NUXLynpVIYQQ4nEgBawZxcZCSopMIS6IXnzxRUJDQ/Hy8kIpReXKle9pZpSdiRMnMnTo\nUJYvX461tTUrV640vaeUIigoCF9fX7TWWFlZ8emnn+Lo6JjtvVq0aMG8efM4cuQI8+bNy/YcDw8P\n2rdvT5s2bbC2tsbBwYE1a9bcc2358uXvubZixYr07t2bs2fP8uGHH2JlZYW/vz8JCQkMGjQIgHHj\nxpnWsGYaNmwYYOxADDBr1iwaNmzIzJkzeeGFF0i9cYPh7u6UGzWKclu30vLKFVrs3o1t8eIsue9X\nUIhHJzUxkbgjx7M2VzoUQVqicb2qtZ0dZevXoWb3F4yNlRp44Px0PYoUK2rh5EIIIYR4WErfPeWv\nAGrUqJHet2+fpWM8sP37oVEj+PlnyKgFhHj8JCTAtm2webPx49Qp4/FKlcDHBzp0gHbtwMUFHB2z\nThuWNbDiEUmKT8g6qnrgqHG9ano6AHYlHTNGU91vr1et44Z1kSIWTi6EEEKIvFBK7dda370F6z1k\nBNaMMpcqPvWUZXOIJ9v48ePZs2eP6bWtrS2bNm16+BumpsLu3bcL1j17ID0dihcHLy8YPdpYtNap\nk7VhE0ixKv41rTU3LkSbitTMgvXa+dvT+4tXLI9LA3dqdu9oKlYdqz4l61WFEEKIQkAKWDPKXE4p\nU4iFOc2cOfPf3UBrOHHCWKxu2gTBwcZRVCsreO45mDjRWLA2bQq2to8ksxCQsV715Nm79lc9SmLs\nFeMJSlG6ZjUqNH2WZ14faGquVNSlrGWDCyGEEMJipIA1o8hI47/3nZ0tnUSIu8TGwpYtt0dZM6cL\n1KgB/fsbC1Zvbyhd2rI5xRMjLSmJ2Mz1qhlTgS8fDCftViIA1ra2lPGoTY2uz2eMqrrj/HQ9bIsX\ns3ByIYQQQhQkUsCaUWSkcfT1jp1WhLCMpCTYtet2wXrggHHktVQp4/rVgABj0Vq9uqWTiidAUsJV\nLh8Mz7peNeIkhozO2baOJXDxdOfp4f2MjZU83SlTtybWMsIvhBBCiPuQAtaMoqJk+rCwEK3h8OHb\nBev27ZCYCDY20Lw5vP++sWBt1Ahy2ZdWiNxorbkZHZOlUL104AhXz/5tOqdYhXK4NHCnhm8HXDK2\nrClZzRUlP9kTQgghxEOQAtaMIiOhRQtLpxCFxj//3C5Yt2yBmBjj8bp1YfhwY8fgNm2MzZiEeEDa\nYCD+1Nl7mislXo4znVO6ZjXKNXqa+hkjqy4NPChWTtZQCCGEEOLRkQLWTAwGuHCh8HQgvnjxIt27\nd8fe3p6NGzfSr18/4uLimDlzJnPmzGHFihXZXrdx40YuX77MgAEDHuh5YWFhXLt2jdatWz9w1vtd\nGxwczPLly1m8eHGW46Ghobz66qucPHmSU6dOUTmb4fWZM2eyevVqbGxsePbZZ5k7d66pM2pqair1\n6tVj0KBBTJ48GYBp06axbt067Ozs+PLLL6latWreP5GbNyEk5HbRevSo8bizs3F0tUMHaN9epgGI\nB5aWnEzc0RNZmysdDCf15i0ArIoUoYx7Lap3bm8qVF2eqYdtCfnhiBBCCCHMSwpYM7l0ybgbSWGp\nHbZt24aPjw+BgYFER0cTGxtLSEgIQI7FK0DHjh0f6nlhYWFERUU9dAH7MNe6u7vzxx9/0Llz5xzP\n6d69O+PHjwegT58+bN26lXbt2gGwcOFC6tSpYzr32LFjbN26lV27drF9+3YmTJjAt99+m3OA9HT4\n66/bBeuuXcb/yOztoVUrGDTIWLQ+/bQsvBZ5lnztOpfv3F817ChxR0+Y1qsWKV4MF093PIa+ZGqu\nVKZeLVmvKoQQQgiLkALWTJ70PWAnTpxIaGgoKSkpvPbaa0yfPp3k5GQuXLhATEwMhw4dwsvLi7Vr\n1+Lp6cmpU6eIj4/Hz8+P2NhYrKysWLVqFRs3biQqKorJkycTEhLClClTUEpRp04dvvjiC86fP0/P\nnj2pW7cu4eHhDBw4EH9/f4KCgrh+/TpbtmxhxYoV9O/fH09PT8LDw0lPT2f9+vXY2dnx2Wef8f33\n35OWlsawYcPw8/O759pKlSrd8/mdPn2a7t27c/bsWQICAujduzclS5a879elZs2apt/b2dlhY2P8\nI3bjxg02bNhA7969icrYXykkJIROnToB0Lp1a1599dV7b3ju3O3tbbZuhSsZ24t4eoK/v7FgbdkS\nHBwe8DsoCqMbd6xXNRatR0k4fc70ftFy/9/enYdXVd37H38vEuZBEJwVUVFGFQRBFExAi7YqSNEr\nFhQqSHtVLNbW4eqv1mq17W1xFq7FioigdcahKA5BLAkIioCAiBUFCggoIKBMWb8/9iENECBowknC\n+/U85znn7Ol8T9xu8slae60DOLB1S476UZeCKWvqHtPI+1UlSVKZYYAtJRU5wI4bN46vvvqKCRMm\nsH79ejp06MD111/P4sWLufnmm1mwYAEDBgzg9ddf32a/O++8k65duxYEtfz8/IJ1MUYGDx5MTk4O\n++23H9dccw0vv/wyLVu2ZMmSJUycOJFKlSrRrFkzBg8ezC9/+cuC4LtVdnY2d999NwMHDmT8+PEc\nc8wxjBs3jrfffpv8/Hw6depEjx49itx3e8uXL2f8+PGsX7+etm3b0rNnTyrtwS/xEyZMYMmSJQWt\nvP/7v//L4MGDWbx4ccE2K1eu5NBDDy14v+WjjyDV3RhIXseYvD7sMOjePQmsZ5wBBx5Y7Fq074n5\n+az6ZEGhKWuSFtb1y5YXbFP3mEYc2LoFLS+7iANSgyvVOuSgNFYtSZK0ewbYUpJqZKuQXYhnzpzJ\nhAkTyM7OBmDDhg2sXLly1zsBs2bN4vLLLy94XzgQrlixggULFtC9e3cgabFs0qQJLVu2pFmzZtSo\nUQOAjF2MmNumTRsAGjZsyMqVK/nmm2+YPXs2nTt3BmDNmjUs3PqXhd1o3bo1mZmZ1KlThwMPPJDl\ny5dz0EE7/nI/f/58BgwYAMDw4cNp3LgxM2bM4IYbbuDFF18khMCyZct4//33ufXWWxkxYkTBvvvv\nvz+rVq0qeJ+xNaxuFSPcc08SWps23TbcSilbNm5kxYfztukGvPyD2Wz8ei0AlTIzqd/iOI76YeeC\nUYAPOLE5Vferk+bKJUmS9pwBtpQsXJjcmtigQborKXktWrSga9eu3HPPPQBs3LiR0aNHF3SN3ZmW\nLVuSk5NT0M22cAtsgwYNOProo3nppZeolRold9OmTSxevLhgEKTCqlSpwubUPXpbFd4uxkizZs1o\n3bo1zzzzDCEENm3aROXKlZk9e/YO+25v+vTpbN68mW+++YZly5ZxwAFFj6TauHFjcnJyCt7Pnz+f\nyy67jGeeeYYGqf/4M2fOZPny5Zx99tksXryYDRs2cOKJJ5KVlcXgwYMZPHgwkyZN4sSiPuDqq3dZ\np/YtG79eyxfbzK86K7lfddMmACrXrMEBrVrQ/NILCgZXqt/iODKrVk1z5ZIkSSXDAFtKFi5MWl8r\nYqPZj370IyZNmkR2djYhBA4//PCCgYp25cYbb+Syyy5j1KhRZGRkMHr06IJ1IQSGDBlCt27diDFS\nqVIl7rrrLurUKbqV6LTTTuP+++9n1qxZ3H///UVu07JlS84880yysrLIyMigevXqjB07dod9Dz74\n4B32PfTQQ7nwwgv59NNPuf3226lUqRLz5s3jiiuu4IMPPuDiiy/mJz/5Cf/93/+9zX6DBw9m1apV\n9O3bF4Bf//rXnHPOOZx55pkAjBgxgkWLFnHeeecB0LFjR0477TSqVKnCw7v9CWpfsm7Z8m2C6hfv\nz2LV/AUF66sfUJ8DW7ek0VnZHNg6aVmt1/go71eVJEkVWojbd1ssg9q2bRunTp2a7jL2SMeOULky\nvPVWuitRuZGZmYw0vFXt2rBmTfrq0V4R8/NZ/ennhYJq0hV43dIvCrbZ76iGBSMAF8yveshBRfZO\nkCRJKo9CCNNijG13t50tsKVk4ULIykp3Fdqd6667jilTphS8r1KlCq+99treLyRGqFsXunWDv/1t\nh9WrVq1i7NixXHrppdvMufvqq69SpZjTmVx11VXMmDGDX/3qV6xZs4Z7772Xc889lypVqnDOOedw\n/PHHF7lf7969dzkV0s7ce++9XP0du0Dvbt/GjRszf/78bZa9//77XHXVVWRkZJCZmcnw4cM5+uij\nt9nm1VdOLxTaAAAgAElEQVRf5ZZbbqFq1arUrFmTxx57jPr167Nlyxauv/76gq7jDz74IM2bN+e9\n995j0KBBxBgZOHAg/fr1+07fZ6stmzaxcva8bYLq8g9ms3HN1wCEjAzqNz+OI7ueXhBUDzixOdXq\n7n4EbEmSpH2BLbClYMuW5P7X666D3/8+3dWoXJg/H449Fh56CAoNdLVV4ZGdx4wZw9y5c7n11lv3\n6COOO+445s2bB8BZZ53FsGHDOOqoo0qk/KIUFTJLat+i1i9dupSaNWtSu3ZtXnnlFcaMGcNjjz22\nzTaff/45Bx10EFWrVuXBBx9kyZIl3HbbbQwdOpSMjAwGDhy4zfannXYao0aN4rDDDuOUU07hjTfe\noF69esX6DhvXrmN54ftVp3/IylkfsWXjRgAya1TngBObFwTVg7ber1qtWrGOL0mSVJHYAptGy5bB\n5s0VcwRilZLc3OT5lFOKXD1kyBCmTZtWMADW5s2bWbx4McOHD99h26Lm07366qtZuHAh2dnZXHzx\nxUyePJmf/OQnXHvttbz00ksMGDCAjh07cs899zB69Ghq1KhBv3796Nu3b0FYXL16NZdffjkrV64k\nxshDDz1E48aNyc7O3mEO3gceeIDFixeTnZ3NJZdcQkZGBs8//3zBvcRDhw6lU6dOzJw5k2uuuYb8\n/HwaNGjAo48+ytChQ7fZt3///kX+TK655hree+89jjjiCEaOHLnNvcyF5+AtrGHDhkVu89RTT9Gh\nQwc6d+5MixYtGDJkCDFG1q1bVxDyO3XqxJQpUzjrrLN2OO76L1YUmrImCaxfffxpwTRI1Rvsz4Gt\nW3LS4AEFU9bUO/YoKu1iVG1JkiQVIcZY5h9t2rSJ5UleXowQ44svprsSlRtXXBFj7doxbt5c5OpP\nP/00nnHGGTHGGB955JF42223Fbldfn5+bNWqVVy1alWMMcbBgwfHF1Mn4jHHHFOwXVZWVly4cGGM\nMca+ffvGiRMnxpkzZ8bTTz89btq0KcYY4+ZULVv3u/766+OYMWNijDFOnz499uzZs+BYzz33XIwx\nxssvv7zIz3vkkUdi9+7dY4wx/vOf/yzYt1OnTvGzzz6LMcZ49913x/vuu2+HfYty5JFHxkmTJsUY\nYxwwYEDB58cY49q1a+Mpp5wSP/zww53uv3Tp0tiqVau4bNmyGGOMxx13XMFnX3vttXHo0KFx8eLF\nMSsrq2CfX1z603jd6WfHFy64PD7Z+cL4cp+r4rPnXBKHHXpS/DOHFjweatQ+Pt+jf5x065A4f+yr\ncc3CxTE/P3+X30eSJGlfB0yNxciGtsCWgq2zyRxxRHrrUDmSmwvt2sH3bJHb2Xy6xTF79mw6duxY\n0Cq5/Zy7W+f/HTZsGMA2LZzbz8FblKK2+fDDD7n00ksB+PbbbwtGa96dEALt2rUDoH379nz00UdA\nMvXSRRddxPXXX0/z5s0BOPfcc1m7di1XXXUVF1xwAWvWrOGCCy5g2LBhHHjggUAyJ+/ZZ58NwNln\nn82zzz5Lv379tpmn98svllPt7Xf5mBlJDZUqUb/5cRzR5dSCbsAHtmpBtXp1i/UdJEmStOcMsKVg\n4cLk2S7EKpZ162DGDLjhhp1uUtS8t0XZ2Xy6xdGiRQuGDh3Kli1byMjIID8/n0qFpmRp0aIFHTp0\noEePHkAy/+9W28/BC2yz7862admyJWPGjOGQQw7Z5pjb77u9GCNTp06lffv2vPvuu5x99tnk5+fT\np08fzj//fM4///yCbV966aWC19988w09evTgpptuon379gXLs7OzmTp1Ko0bNy54rlatGjVr1uTz\nzz/nkEMOYfbyJfRqcBCsWEX1A+sz4JNcqtSqucs6JUmSVLKcMLAULFwI1avD/vunuxKVC9OmJSN/\ndeiw000OPvhgqlevTs+ePdlSeKqd7RSeT7dz586cccYZzJkzp1hltGjRgu7du3PqqafSpUuXHQZA\nuummm/j73/9Oly5d6Ny5M/fee+8uj7c17D7xxBM73eaBBx6gX79+dOnShS5dujBhwoRi7ZuZmckz\nzzxDVlYWX3/9Nd26dePZZ5/l5ZdfZtSoUWRnZzNo0KAiP++DDz7gD3/4A9nZ2fw+NcraddddxxNP\nPEF2djZTpkzhZz/7GQD33HMPF198MVlZWVxxxRWcc2+yfee7f2d4lSRJSgNHIS4FF10E06dDqlej\ntGt//GPS+rp8OTRokO5qtAv5W7Yw/cFHaXVFXwdgkiRJKkGOQpxGCxfafVh7IC8vmUJnD8Pr7Nmz\nueKKK7ZZNnDgQH7yk5+UZHVp9eabb/K73/1um2W/+c1v6NKlS1rqqZSRwUmDLkvLZ0uSJMkAWyoW\nLoQzzkh3FSoXYkwGcOradY93bd68OTk5OSVfUxmytWuxJEmSBN4DW+I2b4YlSxyBWMX02WfJxME7\nmf9VkiRJ0n8YYEvY0qXJeDx2IVax5OUlz7sYwEmSJElSwgBbwrZOoWMLrIolNxdq1IDjj093JZIk\nSVKZZ4AtYYsWJc8GWBVLXh6cfDJkeju6JEmStDsG2BK2tQXWLsTarW+/hfff9/5XSZIkqZgMsCVs\n4UKoWRPq1k13JSrz3nsPNm0ywEqSJEnFZIAtYYsWJa2vIaS7EpV5WwdwMsBKkiRJxWKALWELF3r/\nq4opNxcaNYKDD053JZIkSVK5YIAtYQZYFVtentPnSJIkSXvAAFuCNm2CJUscwEnFsGhR8rD7sCRJ\nklRsBtgStGQJxGgLrIrB+18lSZKkPWaALUHOAatiy8uDqlWhVat0VyJJkiSVGwbYEuQcsCq23Fxo\n0waqVEl3JZIkSVK5YYAtQVsDrC2w2qWNG2HaNAdwkiRJkvaQAbYELVoEtWvDfvuluxKVadOnw4YN\n3v8qSZIk7SEDbAlauNDuwyoGB3CSJEmSvhMDbAlyDlgVS15e8pcO/9ohSZIk7REDbAlatMgAq2LI\nzbX1VZIkSfoODLAlZONGWLrURjXtxtKlsGCBAzhJkiRJ34EBtoT8+98Qoy2w2g3vf5UkSZK+MwNs\nCVm0KHneGwF2xIgRrFmzpuB99erVyc7OJjs7m4cffhiAGCODBg2iU6dOnHvuuXz55Zc7HGfkyJG0\na9eO008/nV69erFhwwYALrzwQk499VTat2/PiBEjttln3rx5VK5cmXfeeaf0vmBFlpcHlSvDSSel\nuxJJkiSp3DHAlpCtc8AW7kK8ZcuWUvms7QPsYYcdRk5ODjk5OfTv3x+AV199lfXr1zNx4kT+67/+\niz/96U87HKdjx47k5uby9ttv07BhQ0aNGgXAHXfcwaRJk5gwYQK333473377bcE+t912G1lZWaXy\nvfYJeXnQujVUq5buSiRJkqRyJzPdBVQUWwNsfv4CTj75Qpo2bUpmZibr1q1j5cqVxBh56KGHOOaY\nY+jduzcLFy4kMzOTW2+9lYYNG9KzZ0+aNWvG7NmzufTSSxk8eDCrV6/m8ssv32b/zz//nOnTp3Ph\nhRfStm1b7rvvPpYuXUpWVhb169dnyJAhNGrUiAkTJnDuuecCcN555zF06NAdaj766KMLXletWpXM\nzOR0OPbYYwGoUqUKGRkZhBAAmDx5MgcffDAZGRml+aOsuDZvhnffhQED0l2JJEmSVC4ZYEvIokWw\n335QqxYsWLCAN954gzvuuINWrVrRq1cvPvjgA2644Qb+7//+j88++4x33nmHEAL5+fl8/vnnLFmy\nhIkTJ1KpUiWaNWvG4MGDufPOO/nxj3+8zf5PP/00rVq1YtSoURyeau5dsGABDRo04NVXX6V///68\n8cYbrFy5knr16gFQt25dvvrqq53WPnfuXMaNG8fEiRO3WX7nnXfSq1cvqlatCsDvf/97HnnkEa69\n9tpS+ilWcDNnwvr1DuAkSZIkfUcG2BIyZQpkZMD48dCyZUvq1KnDzJkzmTBhAsOGDQMgMzOT+vXr\nc/nll3PJJZdQo0YNfvOb3wDQrFkzatSoAVDQwlnU/kVp0KABAGeddRZXXnklAPvvvz+rVq0CYPXq\n1dSrV4+1a9cWtMrefvvtdOzYkUWLFtG3b1+eeOIJqhXq1jpy5EhmzJjBmDFjAHj55Zdp27Yt9evX\nL7kf2r4mNzd5dgAnSZIk6TsxwJaAsWOTnqH5+TBoEDRpkgTQFi1a0KFDB3r06AHAxo0b2bRpE336\n9KFfv36MGjWKu+66i0GDBhV00y2sqP0h6dq7efNmANauXUv16tXJyMhgxowZBWE2KyuL5557jvPP\nP59XXnmFrKwsatWqRU5OTsHxV6xYQc+ePRk2bBjHHHNMwfIXXniB0aNHM3bsWCpVSm6Tnj59Ojk5\nOUyaNImZM2cyd+5cnnzySY488sgS/mlWYHl5cNBB4M9MkiRJ+k5CjDHdNexW27Zt49SpU9Ndxk5d\ndRU88MDWdws44IABfPHF66xevZqf//znLFu2jBgj55xzDhdffDG9evUiIyODjRs3cu+999KgQQMG\nDBjA66+/DkDjxo2ZP39+kfv/6le/YtiwYTz11FOceuqpnHfeefzsZz+jdu3ahBC49957OfHEE8nP\nz2fQoEHMmDGDOnXqMHLkyB1aT6+66iqef/55GjduDMAll1xC//79qVWrFk2bNqVWrVoAPP744xx2\n2GEF+/Xr148BAwbQsWPHUv/ZVijHHQctWsBzz6W7EkmSJKlMCSFMizG23e12Btjvb+xY6NULvvkG\nQkjmg/3pT+EPf4ADD0x3dSoTVqyAAw5ITorrr093NZIkSVKZUtwA6zQ6JaBbN3jiCbjyShg9Gq67\nDkaNShrc7rsvGXxW+7jJk5NnB3CSJEmSvjMDbAnp1g3uvz9pif3jH2HGDGjXDq6+Gk46Cd5+O90V\nKq1yc5NRvtq0SXclkiRJUrllgC0lTZvCq6/CM8/A6tWQlQV9+sC//53uypQWeXlw4olQs2a6K5Ek\nSZLKLQNsKQoBfvxjmDMH/t//g6efhiZN4M9/htSAwtoXbNmSzLPk9DmSJEnS92KA3Qtq1IDf/Q4+\n/BCys+HXv04a41KDDquimz0bvv7aACtJkiR9TwbYveiYY+DFF5PHpk3wgx/AhRfC55+nuzKVqry8\n5NkBnCRJkqTvxQCbBueeC7NmwW23wcsvQ7NmcMcdsGFDuitTqcjNhQYNkr9gSJIkSfrODLBpUq0a\n3Hxzcn/sD38IN90ELVvCK6+kuzKVuLy8pPtwCOmuRJIkSSrXDLBpduSRyeBOr72WzLJyzjnJlDz/\n+le6K1OJWLUq+SuF979KkiRJ35sBtoz4wQ+SuWP/9Cd4801o3hxuuQXWr093ZfpeJk9Ong2wkiRJ\n0vdmgC1DqlRJRij+6CPo2TMZubh5c3j+eYgx3dXpO8nLS7oOt2uX7kokSZKkcs8AWwYddhg8/jjk\n5EDt2tCjR3Kf7Ecfpbsy7bHc3OTm5tq1012JJEmSVO4ZYMuwrCx4/324554kBx1/PNxwA6xdm+7K\nVCz5+UkXYqfPkSRJkkqEAbaMy8yEq6+GefOgd2/44x+haVN48km7FZd58+Ylgzh5/6skSZJUIgyw\n5cRBB8Ejj8CkScnrXr2gS5dkPlmVUbm5ybMBVpIkSSoRBthypkMHmDIFhg1LRi1u1Qp++UtYvTrd\nlWkHeXlQty40aZLuSiRJkqQKwQBbDmVkwM9+lvRQHTAA7r47yUgjRya3XaqMyM2F9u2hkv+bSZIk\nSSXB36zLsfr1k5bYKVOgUSPo2xc6dYLp09Ndmfj666R/twM4SZIkSSXGAFsBtG2b3Bv7t7/Bxx9D\nmzZw5ZXw5Zfprmwf9u67yShb3v8qSZIklRgDbAVRqRL89KdJt+Irr0xaZo87Dv76V7sVp8XWAZza\ntUtvHZIkSVIFYoCtYOrWhXvvTeaPbd4cBg5MGgGnTEl3ZfuYvDxo1gzq1Ut3JZIkSVKFYYCtoE44\nASZMgMcfh0WLkrGEBgyA5cvTXdk+IMYkwNp9WJIkSSpRBtgKLAT4yU9g7lz41a/g0UeTbsUPPACb\nN6e7ugrsk09gxQoHcJIkSZJKmAF2H1CnDvzv/ybzxrZpA1ddlQz89M476a6sgsrLS55tgZUkSZJK\nlAF2H9KsGYwfD089lYxQ3KkTXHIJLFmS7soqmNxcqF07uQlZkiRJUokxwO5jQoALLoA5c+Cmm+Dv\nf4cmTWDIENi0Kd3VVRB5ecnowxkZ6a5EkiRJqlAMsPuomjXh9tvhww+Tlthrr4VWreDNN9NdWTm3\nbh188IHdhyVJkqRSYIDdxzVuDC+9BGPHwjffwBlnwEUXwcKF6a6snJo2DbZscQAnSZIkqRQYYEUI\ncN55SWvs736XhNmmTeHOO2HDhnRXV87k5ibP7duntw5JkiSpAjLAqkD16vD//l9yf+xZZ8H//A8c\nfzyMG5fuysqRvLykWbtBg3RXIkmSJFU4BljtoFEjePbZJLiGAD/8IZx/Pnz6aborK+NiTAKs3Ycl\nSZKkUmGA1U6ddRbMnAl/+AO8/noyK8yttyb3yqoIn30GS5c6gJMkSZJUSgyw2qUqVeD662HuXOje\nHX772yTIvvBC0uCoQvLykmdbYCVJkqRSYYBVsRx+ODzxRDLNTs2aSZfic86Bjz9Od2VlSG5uciPx\n8cenuxJJkiSpQjLAao907gzvvw933QX//Ce0bJkM9rRuXborKwPy8uDkkyEzM92VSJIkSRWSAVZ7\nrHJlGDwYPvoILr44mW6naVN46ql9uFvxt98myd7uw5IkSVKpMcDqOzv4YBgxAt55J5k15r/+C848\nE2bPTndlafDee7BpkwM4SZIkSaXIAKvv7bTTYOpUeOCBJMedeCJcey2sWZPuyvairQM4GWAlSZKk\nUlOqATaEUDeE8HQIYW4IYU4IoUNq+aDUsg9DCH8qzRq0d2RkwBVXwLx58NOfJvfINmkCo0btI92K\nc3OTCXQPPjjdlUiSJEkVVmm3wN4DjIsxNgVOBOaEEDoD3YETY4wtgD+Xcg3aiw44AB56CCZPhoYN\n4ZJL4PTT4YMP0l1ZKcvLs/VVkiRJKmWlFmBDCPsBpwMPA8QYN8YYVwH/DfwhxrghtfyL0qpB6XPy\nyUmj5PDhyRyyJ50EgwbBV1+lu7JSsGhR8nAAJ0mSJKlUlWYL7FHAcuCREML7IYThIYSawHFApxDC\n5BDChBDCyUXtHEIYGEKYGkKYunz58lIsU6WlUiXo3z/pVnzFFfDgg3DccfDww5Cfn+7qSpD3v0qS\nJEl7RWkG2EzgJGBojLE1sA64IbV8f+AU4NfA30MIYfudY4wPxRjbxhjbHnDAAaVYpkpbvXpw330w\nbVpyX+yAAUlj5dSp6a6shOTlQdWq0KpVuiuRJEmSKrTSDLCLgEUxxsmp90+TBNpFwLMxMQXIBxqU\nYh0qI1q1gokT4bHH4PPPoV07GDgQVqxId2XfU24utGkDVaqkuxJJkiSpQiu1ABtjXAosDCE0SS06\nA5gNPA90BgghHAdUAcp7hFExhQB9+sBHH8E118Df/pZ0Kx46FLZsSXd138HGjUnTst2HJUmSpFJX\n2qMQDwIeDyHMAFoBdwB/A44OIcwCngD6xrhPTLSiQurUgb/8JRmduHXr5B7Ztm1h0qR0V7aHPvgA\nNmxwACdJkiRpLyjVABtjnJ66j/WEGOP5McavUqMR94kxtowxnhRjfLM0a1DZ1qIFvP46/P3vSVfi\n006Dvn1h6dJ0V1ZMubnJsy2wkiRJUqkr7RZYabdCgAsvhDlz4MYbYcyYZLCnu++GTZvSXd1u5OXB\n4YcnD0mSJEmlygCrMqNWLbjjDpg1C049NblH9qSTICcn3ZXtQm6ura+SJEnSXmKAVZlz3HHwyivw\n/POwdi107gwXXwyLFqW7su0sXQoLFhhgJUmSpL3EAKsyKQTo3h1mz4bf/jYJs02bwh//mAz8WyZM\nTs0Q5QBOkiRJ0l5hgFWZVr063HJLEmTPPBNuuAGOPx5eey3dlZF0H65cOennLEmSJKnUGWBVLhx1\nVNIK+8orECOcdRb8+MdJD960yctL5gCqVi2NRUiSJEn7DgOsypUf/hBmzkwGe3r1VWjWDG67Db79\ndi8XsnkzvPuu979KkiRJe5EBVuVO1arJdDtz50K3bvCb3yTzyb744l4sYuZMWL/eACtJkiTtRQZY\nlVtHHAFPPglvvJH04u3WDc49F+bP3wsfnpeXPDuAkyRJkrTXGGBV7nXpAtOnw1/+Am+/nbTG3nwz\nrFtXih+amwsHHQRHHlmKHyJJkiSpMAOsKoTKleGXv4SPPoKLLoLf/z65P/aZZ5JBn4qrcePGxdsw\nLy9pfQ2BFStWcNFFF9GlSxe6du0KQIyRq666ig4dOnDyySczZswYAEaMGMHtt9++y0OPHDmSdu3a\ncfrpp9OrVy82bNgAwIUXXsipp55K+/btGTFixDb7zJs3j8qVK/POO+8U/8tKkiRJ5YwBVhXKIYfA\nyJEwcSLUqwcXXABdu8KcOSX4IStWwMcfF9z/OnjwYH7zm9/w5ptv8lpqfp8PP/yQDz/8kNzcXN58\n801uvvnmYh++Y8eO5Obm8vbbb9OwYUNGjRoFwB133MGkSZOYMGECt99+O98WGrnqtttuIysrqwS/\npCRJklT2GGBVIXXsCNOmwf33w9SpcMIJ8Otfw9dfb7tdfn4+ffr0ISsri2uuuQZIWknPO+88zjvv\nPFq3bs3EiRMB6NevH5dffjnn/PCHnAJ80aQJW7ZsYdasWfzlL38hKyuLBx98EIBDDz2UKlWqsGnT\nJr7++mv233//gs+cPHnyDscu7OijjyYjIwOAqlWrkpmZCcCxxx4LQJUqVcjIyCCEUHC8gw8+mMMP\nP7zkfoCSJElSGWSAVYWVmQlXXpl0K+7bF/78Z2jSBEaP/k+34hdeeIGaNWsyYcIELrjgAjZv3gzA\npk2bePHFF3nuuecKgi1AixYtePnss+kWAn//5BO++OILZs6cyS9+8QvGjx/P6NGjmTNnDvXq1ePY\nY4/luOOOo1WrVtu0wO7s2NubO3cu48aN46KLLtpm+Z133kmvXr2oWrUqAL///e+54YYbSurHJkmS\nJJVZBlhVeAceCMOHw+TJcNhh0Ls3tGwJvXrB88/Po127dgC0b9++oFXz5JNPBqBRo0asXr264Fht\n2rSB3FwaNmzIyrVrqVevHoceeignnngiVapUITs7m5kzZzJ+/HgWL17M/PnzmTt3Lv/zP/9TcC/r\n9sdeu3Yt2dnZZGdnF9zDumjRIvr27csTTzxBtWrVCj5/5MiRzJgxg1tuuQWAl19+mbZt21K/fv1S\n/ilKkiRJ6ZeZ7gKkvaVduyTEXn01PPAAzJ4NlSsfyxdfjKd///68++67xFTT7LRp0wD4/PPPqVOn\nTsExQn4+TJkC7doRY6RatWocffTRLFy4kCOOOIJp06bx4x//mOXLl1OvXj0yMjKoXbs2GzduZMuW\nLUUeu1atWuTk5BR8xooVK+jZsyfDhg3jmGOOKVj+wgsvMHr0aMaOHUulSsnfnqZPn05OTg6TJk1i\n5syZzJ07lyeffJIjHR1ZkiRJFZABVvuUStv1Odi0qTuffPI0WVlZtG/fvuB+0xo1anDOOefw73//\nm7vuuus/OyxYkNxIW2i04nvuuYc+ffqwadMmunTpwkknncSWLVsYM2YMHTt2ZMOGDQwaNIgaNWrs\n+tgpv/3tb1m8eHFB9+JLLrmE/v3707t3b5o2bVow0vHjjz/OTTfdxE033QQk9+gOGDDA8CpJkqQK\nK8Q9mWMkTdq2bRunTp2a7jJUQYwdCxdfDOvXQ40aMGYMdOv2n/UjRoxg0aJFRY8c/Ne/wsCBMG8e\npAZVkiRJkvT9hBCmxRjb7m47W2C1z+nWLQmtr72WTLFTOLzuVl4e1K+/TQusJEmSpL3DFlhpTzRv\nDkcfDS+9lO5KJEmSpAqjuC2wjkIsFdeqVTBnDnTokO5KJEmSpH2SAVYqrsmTk+dTTklvHZIkSdI+\nygArFVdeHoQAqXlcJUmSJO1dBlipuPLyoGVLKDQvrCRJkqS9xwArFUd+fhJg7T4sSZIkpY0BViqO\nefOSQZwcwEmSJElKGwOsVBy5ucmzLbCSJElS2hhgpeLIy4O6daFJk3RXIkmSJO2zDLBSceTlQfv2\nUMn/ZSRJkqR08bdxaXe+/hpmzbL7sCRJkpRmBlhpd959NxmF2AGcJEmSpLQywEq7s3UAp3bt0luH\nJEmStI8zwEq7k5cHTZtCvXrprkSSJEnapxlgpV2JMQmwdh+WJEmS0s4AK+3KJ5/AihUO4CRJkiSV\nAQZYaVfy8pJnW2AlSZKktDPASruSmwu1a0Pz5umuRJIkSdrnGWClXcnLS0YfzshIdyWSJEnSPs8A\nK+3M+vXwwQfe/ypJkiSVEQZYaWemToUtWwywkiRJUhlhgJV2ZusATgZYSZIkqUwwwEo7k5sLjRtD\ngwbprkSSJEkSBlipaDEmLbBOnyNJkiSVGQZYqSiffw5Ll9p9WJIkSSpDDLBSUXJzk2cDrCRJklRm\nGGClouTlQfXqcMIJ6a5EkiRJUooBVipKbi6cfDJkZqa7EkmSJEkpBlhpe99+C++/7wBOkiRJUhlj\ngJW29/77sGmT979KkiRJZYwBVtqeAzhJkiRJZZIBVtpeXh40agQHH5zuSiRJkiQVYoCVtpeba+ur\nJEmSVAYZYKXCFi1KHg7gJEmSJJU5BlipsMmTk2dbYCVJkqQyxwArFZabC1WrQqtW6a5EkiRJ0nYM\nsFJheXnQpg1UqZLuSiRJkiRtxwArbbVxI0ydavdhSZIkqYwywEpbffABbNjgAE6SJElSGWWAlbbK\ny0uebYGVJEmSyiQDrLRVbi4cdhgcfni6K5EkSZJUBAOstFVent2HJUmSpDLMACsBLFsGn35q92FJ\nkiSpDDPASvCf+19tgZUkSZLKLAOsBEmArVwZWrdOdyWSJEmSdsIAK0EygFOrVlC9erorkSRJkrQT\nBtc+NXQAABEdSURBVNhybOnSpXTo0IHOnTuzYcMGevbsSXZ2NlOmTKF379473W/cuHE89thje/x5\n06dP5+233/5Ote5u35ycHAYMGLDD8m+//ZbevXvTqVMnevfuzbfffrvDNn/6059o3749p512GoMG\nDSLGyDfffMMPfvADOnbsyCmnnMI//vGPbfZ56623CCGwaNEi2LwZ3n3X7sOSJElSGWeALcfeeust\nunbtyltvvcWXX37JihUryMnJoV27djz++OM73e/ss8/mkksu2ePPK80AuzMjRoygadOmTJw4kSZN\nmjBixIgdtunRoweTJ0/mn//8J8uWLePNN98kMzOTv/71r7zzzju89NJLDB48uGD7GCNDhgyhbdu2\nyYKZM2H9egdwkiRJkso4A2w5cuONN5KVlUWHDh149NFHufXWWxk5ciQDBgxg4MCBzJgxg+zsbNau\nXUvjxo0B+Oqrr+jZsydZWVl07tyZpUuXMmLECG6//XYAJkyYQFZWFtnZ2fz85z8nxsiCBQto06YN\nffr04aSTTuLuu+8GYMiQITz88MNkZ2ezePFisrOzGTx4MF27duWMM85gw4YNANx333106tSJDh06\nMHz48CL3Lconn3xCjx49aNWqFU899VRBfeeeey4A5513HhMmTNhhv2OPPbbgddWqVcnMzKRy5co0\natQIgOrVq1Op0n9O9aeeeoqzzjqLmjVrJgscwEmSJEkqFzLTXYCKZ9y4cXz11VdMmDCB9evX06FD\nB66//noWL17MzTffzIIFCxgwYACvv/76NvvdeeeddO3alZ/97GcA5OfnF6yLMTJ48GBycnLYb7/9\nuOaaa3j55Zdp2bIlS5YsYeLEiVSqVIlmzZoxePBgfvnLX7Jo0SJuvvnmgmNkZ2dz9913M3DgQMaP\nH88xxxzDuHHjePvtt8nPz6dTp0706NGjyH23t3z5csaPH8/69etp27YtPXv2ZOXKldSrVw+AunXr\n8uWXX+50/wkTJrBkyRJOP/30bZZfc801XHfddQBs2rSJ4cOH89JLL/H0009D06awbl2y4VFHQe3a\nsGZNMf6LSJIkSdrbDLDlxMyZM5kwYQLZ2dkAbNiwgZUrV+52v1mzZnH55ZcXvC/cErlixQoWLFhA\n9+7dAVi7di1NmjShZcuWNGvWjBo1agCQkZGx0+O3adMGgIYNG7Jy5Uq++eYbZs+eTefOnQFYs2YN\nCxcuLNZ3bN26NZmZmdSpU4cDDzyQ5cuXs//++7Nq1SoAVq9ezf7778/8+fML7pcdPnw4jRs3ZsaM\nGdxwww28+OKLhBAKjnnbbbdRp04dfvrTnwLw0EMP0adPH6pUqZJssDW8bvX118WqVZIkSdLeZ4At\nJ1q0aEHXrl255557ANi4cSOjR49OBiHahZYtW5KTk1PQzbZwC2yDBg04+uijeemll6hVqxaQtFAu\nXrx4mxC4VZUqVdi8efM2ywpvF2OkWbNmtG7dmmeeeYYQAps2baJy5crMnj17h323N336dDZv3sw3\n33zDsmXLOOCAA8jKyuKVV16hVatWvPLKK2RlZdG4cWNycnIK9ps/fz6XXXYZzzzzDA0aNChYfv/9\n9/Pxxx/z6KOPFiybNWsWn3zyCaNHj2bGjBlcAvwDqLbLyiRJkiSVBd4DW0786Ec/onbt2mRnZ9O5\nc2f69+9frP1uvPHGguDXpUsXvvjii4J1IQSGDBlCt27d6Ny5M2eccQZz5szZ6bFOO+00XnvtNS64\n4AKWLl1a5DYtW7bkzDPPLLjntnv37mzevLlY+x566KFceOGFdOrUidtvv51KlSrRr18/Zs6cSadO\nnZg5cyb9+vXbYb/BgwezatUq+vbtS3Z2Ni+//DJffPEFv/jFL/jXv/5F586dyc7OZsuWLQwdOpTX\nXnuNcePGccIJJ/AYhldJkiSpvAgxxnTXsFtt27aNU6dOTXcZqojq1Nm227D3wEqSJEl7XQhhWoyx\n7e62swux9rrrrruOKVOmFLyvUqUKr732WnqKMaxKkiRJ5YYBVnvdn/70p3SXIEmSJKkc8h5YSZIk\nSVK5YICVJEmSJJULBlhJkiRJUrlggJUkSZIklQsGWEmSJElSuWCAlSRJkiSVCwZYSZIkSVK5YICV\nJEmSJJULBlhJkiRJUrlggJUkSZIklQsGWEmSJElSuWCAlSRJkiSVCwZYSZIkSVK5YICVJEmSJJUL\nBlhJkiRJUrlggJUkSZIklQsGWEmSJElSuWCAlSRJkiSVCwZYSZIkSVK5YICVJEmSJJULIcaY7hp2\nK4SwHPgs3XWUUw2AFekuQvskzz2lg+ed0sVzT+ngead0KK3z7sgY4wG726hcBFh9dyGEqTHGtumu\nQ/sezz2lg+ed0sVzT+ngead0SPd5ZxdiSZIkSVK5YICVJEmSJJULBtiK76F0F6B9luee0sHzTuni\nuad08LxTOqT1vPMeWEmSJElSuWALrCRJkiSpXDDASpIkSZLKBQNsORNCOCKE8FYIYXYI4cMQwi9S\ny/cPIYwPIXyceq6XWh5CCPeGEOaHEGaEEE4qdKy+qe0/DiH0Tdd3UvkSQsgIIbwfQngp9f6oEMLk\n1Dn2ZAihSmp51dT7+an1jQod48bU8o9CCGel55uovAgh1A0hPB1CmBtCmBNC6OA1T3tDCOGa1L+1\ns0IIY0II1bzmqaSFEP4WQvgihDCr0LISu8aFENqEEGam9rk3hBD27jdUWbWTc+9/U//ezgghPBdC\nqFtoXZHXshDC2all80MINxRaXuT18vsywJY/m4FrY4zNgVOAK0MIzYEbgDdijMcCb6TeA/wQODb1\nGAgMheTCCNwCtAfaAbdsvThKu/ELYE6h938E7ooxNga+AvqnlvcHvkotvyu1HanztRfQAjgbeDCE\nkLGXalf5dA8wLsbYFDiR5PzzmqdSFUI4DLgaaBtjbAlkkFy7vOappI0gOTcKK8lr3FDg8kL7bf9Z\n2neNYMfzYTzQMsZ4AjAPuBF2fi1LXc8eIDk3mwMXp7aFnV8vvxcDbDkTY1wSY3wv9fprkl/kDgO6\nA4+mNnsUOD/1ujswMibygLohhEOAs4DxMcYvY4xfkZysXtC0SyGEw4FzgOGp9wHoAjyd2mT7c2/r\nOfk0cEZq++7AEzHGDTHGT4H5JP/YSjsIIewHnA48DBBj3BhjXIXXPO0dmUD1EEImUANYgtc8lbAY\n49vAl9stLpFrXGpdnRhjXkxGbh1Z6FjaxxV17sUYX4sxbk69zQMOT73e2bWsHTA/xvivGONG4Amg\n+25+R/xeDLDlWKp7UmtgMnBQjHFJatVS4KDU68OAhYV2W5RatrPl0q7cDVwH5Kfe1wdWFbrQFT6P\nCs6x1PrVqe0997QnjgKWA4+EpOv68BBCTbzmqZTFGBcDfwY+Jwmuq4FpeM3T3lFS17jDUq+3Xy4V\nx2XAP1Kv9/Tc29XviN+LAbacCiHUAp4BBscY1xRel/oLm/MjqUSFEM4FvogxTkt3LdqnZAInAUNj\njK2BdfynKx3gNU+lI9X9sjvJH1EOBWpiq73SwGuc0iGEcBPJrYuPp7uW7Rlgy6EQQmWS8Pp4jPHZ\n1OJlqW4ipJ6/SC1fDBxRaPfDU8t2tlzamdOAbiGEBSTdQ7qQ3JtYN9W9DrY9jwrOsdT6/YCVeO5p\nzywCFsUYJ6feP00SaL3mqbSdCXwaY1weY9wEPEtyHfSap72hpK5xi/lPF9DCy6WdCiH0A84Feqf+\ngAJ7fu6tZOfXy+/FAFvOpPqTPwzMiTEOKbRqLLB1xLm+wAuFll+aGrXuFGB1qkvKq0DXEEK91F+Z\nu6aWSUWKMd4YYzw8xtiI5Cb+N2OMvYG3gAtSm21/7m09Jy9IbR9Ty3ulRuw8imRAiSl76WuonIkx\nLgUWhhCapBadAczGa55K3+fAKSGEGql/e7eee17ztDeUyDUutW5NCOGU1Hl8aaFjSTsIIZxNcrtY\ntxjj+kKrdnYtexc4NjXicBWS3xHHpq5/O7tefj8xRh/l6AF0JOlGMgOYnnr8iKSf+RvAx8DrwP6p\n7QPJyGCfADNJRlPceqzLSG7Ang/8NN3fzUf5eQDZwEup10enLmDzgaeAqqnl1VLv56fWH11o/5tS\n5+RHwA/T/X18lO0H0AqYmrruPQ/U85rnY288gFuBucAs4DGgqtc8HyX9AMaQ3Ge9iaTXSf+SvMYB\nbVPn8CfA/UBI93f2UTYeOzn35pPc07o1ZwwrtH2R17JUFpmXWndToeVFXi+/7yOkDi5JkiRJUplm\nF2JJkiRJUrlggJUkSZIklQsGWEmSJElSuWCAlSRJkiSVCwZYSZIkSVK5YICVJFV4IYSDQgijQwj/\nCiFMCyHkhhB6pNZlhxBWhxCmhxDmhBBuSS3vF0K4f7vj5IQQ2hZx/JwQwuepeRa3Lns+hLC2tL/b\ndxFCaB1CeDj1ul8IIYYQziy0/vzUsgtS73NCCB8V+hkNLLTt66l5JyVJKnUGWElShZYKlc8Db8cY\nj44xtiGZaP3wQptNjDG2IpkvsU8I4aTv8FGrgNNSn1kXOOT7Vb5nQgiZe7D5/wD3Fno/k+RnstXF\nwAfb7dM79TM6DfhjasJ6SOZHvWIPy5Uk6TsxwEqSKrouwMYY47CtC2KMn8UY79t+wxjjOmAa0Pg7\nfM4T/CcE/hh4tvDKEMKvQwjvhhBmhBBuTS1rFEKYG0IYEUKYF0J4PIRwZgjhnyGEj0MI7VLb7Z9q\n0Z0RQsgLIZyQWv7bEMJjIYR/Ao+FEN4OIbQq9JnvhBBO3K6O2sAJMcbCAXUi0C6EUDmEUCv1/afv\n5HvWAtYBW1Lvx5IEXkmSSp0BVpJU0bUA3ivOhiGE+sApwIff4XPeAE4PIWSQBNknCx23K3As0A5o\nBbQJIZyeWt0Y+AvQNPX4CdAR+BVJSynArcD7McYTUstGFvrc5sCZMcaLgYeBfqnPPA6otl1QhaSV\nedZ2yyLwOnAW0J0klG7v8RDCDOAj4LYY4xaAGONXQNXUz06SpFJlgJUk7VNCCA+EED4IIbxbaHGn\nEML7wGvAH2KMH5KEuqLsbPkW4B2S8Fo9xrig0Lquqcf7JGG6KUmgBfg0xjgzxphPEpzfiDFGkm69\njVLbdCTpqkuM8U2gfgihTmrd2BjjN6nXTwHnhhAqA5cBI4qo8xBgeRHLt7Yg9wLGFLG+dypANwR+\nFUI4stC6L4BDi9hHkqQStSf3y0iSVB59CPTc+ibGeGUIoQEwtdA2E2OM526330pg+8GJ9gdW7OKz\nngCeA3673fIA3Blj/L9tFobQCNhQaFF+off5FO/f6XVbX8QY14cQxpO0ov4X0KaI7b8Bqm2/MMY4\nJYRwPLA+xjiv0HhU22+3PITwHtAe+Cy1uFrquJIklSpbYCVJFd2bQLUQwn8XWlajGPu9C5wWQjgY\nIDX6cFVg4S72mQjcyY4tmK8Cl6XuLyWEcFgI4cBi1r/1uL1T+2YDK2KMa3ay7XCSAZreTXXv3d4c\ndn6P7w38p9tykUIINYDWwCep9wE4GFiwy28gSVIJsAVWklShxRhjCOF84K4QwnUk3WfXAdfvZr9l\nIYRfAK+EECoBa4GLU119d/pZwJ+LWP5aCKEZkJtq2VwL9OE/AyHtzm+Bv6XuQV0P9N1FDdNCCGuA\nR3ayfm4IYb8QQu0Y49fbrfvHLmp4PITwDUmIHxFjnJZa3gbIizFuLuZ3kSTpOwvJv7WSJKkiCCEc\nCuQATXcWtkMI1wBfxxiHl8Dn3UNyH+4b3/dYkiTtjl2IJUmqIEIIlwKTgZt21VIMDGXbe2+/j1mG\nV0nS3mILrCRJkiSpXLAFVpIkSZJULhhgJUmSJEnlggFWkiRJklQuGGAlSZIkSeWCAVaSJEmSVC78\nf8ob4+4BdmuoAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vU-2SHss55jw", + "colab_type": "text" + }, + "source": [ + "# 1 on 1 Comparisons\n", + "A few model to model comparisons, pairing models that are a little more fair than the original paper when you consider all of accuracy, rate, and memory efficiency." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "SKA-MF-yShDW", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 187 + }, + "outputId": "83f55196-040a-4a2a-a49e-e6629c38ce83" + }, + "source": [ + "def compare_results(results, namea, nameb):\n", + " resa, resb = results[namea], results[nameb]\n", + " top1r = 100. * (resa['top1'] - resb['top1']) / resb['top1']\n", + " top5r = 100. * (resa['top5'] - resb['top5']) / resb['top5']\n", + " rater = 100. * (resa['rate'] - resb['rate']) / resb['rate']\n", + " memr = 100. * (resa['gpu_used'] - resb['gpu_used']) / resb['gpu_used']\n", + " print('{:22} vs {:28} top1: {:+4.2f}%, top5: {:+4.2f}%, rate: {:+4.2f}%, mem: {:+.2f}%'.format(\n", + " namea, nameb, top1r, top5r, rater, memr))\n", + " \n", + "#compare_results(results, 'efficientnet_b0-224', 'seresnext26_32x4d-224')\n", + "compare_results(results, 'efficientnet_b0-224', 'dpn68b-224')\n", + "compare_results(results, 'efficientnet_b1-240', 'resnet50-224')\n", + "compare_results(results, 'efficientnet_b1-240', 'resnet50-240-ttp')\n", + "compare_results(results, 'efficientnet_b2-260', 'gluon_seresnext50_32x4d-224')\n", + "compare_results(results, 'tf_efficientnet_b3-300', 'gluon_seresnext50_32x4d-224')\n", + "compare_results(results, 'tf_efficientnet_b3-300', 'gluon_seresnext101_32x4d-224')\n", + "compare_results(results, 'tf_efficientnet_b4-380', 'ig_resnext101_32x8d-224')\n", + "\n", + "print('\\nNote the cost of running with the SAME padding hack:')\n", + "compare_results(results, 'tf_efficientnet_b2-260', 'efficientnet_b2-260')" + ], + "execution_count": 34, + "outputs": [ + { + "output_type": "stream", + "text": [ + "efficientnet_b0-224 vs dpn68b-224 top1: -1.55%, top5: -0.06%, rate: +6.82%, mem: +1.10%\n", + "efficientnet_b1-240 vs resnet50-224 top1: +1.11%, top5: +0.33%, rate: -4.94%, mem: +120.26%\n", + "efficientnet_b1-240 vs resnet50-240-ttp top1: +0.79%, top5: +0.29%, rate: -1.76%, mem: +61.71%\n", + "efficientnet_b2-260 vs gluon_seresnext50_32x4d-224 top1: -1.27%, top5: -0.14%, rate: -4.14%, mem: +139.04%\n", + "tf_efficientnet_b3-300 vs gluon_seresnext50_32x4d-224 top1: -0.22%, top5: +0.43%, rate: -20.81%, mem: +417.25%\n", + "tf_efficientnet_b3-300 vs gluon_seresnext101_32x4d-224 top1: -2.13%, top5: -0.24%, rate: -9.45%, mem: +376.19%\n", + "tf_efficientnet_b4-380 vs ig_resnext101_32x8d-224 top1: -3.37%, top5: -2.35%, rate: -17.10%, mem: +247.55%\n", + "\n", + "Note the cost of running with the SAME padding hack:\n", + "tf_efficientnet_b2-260 vs efficientnet_b2-260 top1: -0.59%, top5: -0.70%, rate: -1.02%, mem: +17.48%\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aSibvBwp5-CX", + "colab_type": "text" + }, + "source": [ + "# How are we generalizing to ImageNet-V2?\n", + "\n", + "This is often an interesting comparison. The results for the IG ResNeXt are impressive, it's the lowest gap between ImageNet-1k and ImageNet-V2 validation scores that I've seen (http://people.csail.mit.edu/ludwigs/papers/imagenet.pdf)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "aahwcXGnSOab", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 442 + }, + "outputId": "7a33b7ad-619e-4479-e585-ee9068a3bc13" + }, + "source": [ + "print('Results by absolute accuracy gap between ImageNet-V2 Matched-Frequency and original ImageNet top-1:')\n", + "no_ttp_keys = [k for k in results.keys() if 'ttp' not in k]\n", + "gaps = {x: (results[x]['top1'] - orig_top1[results[x]['model_name']]) for x in no_ttp_keys}\n", + "sorted_keys = list(sorted(no_ttp_keys, key=lambda x: gaps[x], reverse=True))\n", + "for m in sorted_keys:\n", + " print(' Model: {:34} {:4.2f}%'.format(m, gaps[m]))\n", + "print()\n", + "\n", + "print('Results by relative accuracy gap between ImageNet-V2 Matched-Frequency and original ImageNet top-1:')\n", + "gaps = {x: 100 * (results[x]['top1'] - orig_top1[results[x]['model_name']]) / orig_top1[results[x]['model_name']] for x in no_ttp_keys}\n", + "sorted_keys = list(sorted(no_ttp_keys, key=lambda x: gaps[x], reverse=True))\n", + "for m in sorted_keys:\n", + " print(' Model: {:34} {:4.2f}%'.format(m, gaps[m]))" + ], + "execution_count": 18, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Results by absolute accuracy gap between ImageNet-V2 Matched-Frequency and original ImageNet top-1:\n", + " Model: ig_resnext101_32x8d-224 -8.86%\n", + " Model: gluon_seresnext101_32x4d-224 -10.89%\n", + " Model: efficientnet_b1-240 -11.14%\n", + " Model: gluon_seresnext50_32x4d-224 -11.24%\n", + " Model: tf_efficientnet_b4-380 -11.26%\n", + " Model: resnet50-224 -11.68%\n", + " Model: dpn68b-224 -11.91%\n", + " Model: efficientnet_b2-260 -11.96%\n", + " Model: tf_efficientnet_b2-260 -12.21%\n", + " Model: efficientnet_b0-224 -12.33%\n", + " Model: tf_efficientnet_b3-300 -12.35%\n", + "\n", + "Results by relative accuracy gap between ImageNet-V2 Matched-Frequency and original ImageNet top-1:\n", + " Model: ig_resnext101_32x8d-224 -10.71%\n", + " Model: gluon_seresnext101_32x4d-224 -13.46%\n", + " Model: tf_efficientnet_b4-380 -13.64%\n", + " Model: gluon_seresnext50_32x4d-224 -14.07%\n", + " Model: efficientnet_b1-240 -14.16%\n", + " Model: resnet50-224 -14.88%\n", + " Model: efficientnet_b2-260 -14.99%\n", + " Model: tf_efficientnet_b3-300 -15.28%\n", + " Model: tf_efficientnet_b2-260 -15.33%\n", + " Model: dpn68b-224 -15.37%\n", + " Model: efficientnet_b0-224 -16.03%\n" + ], + "name": "stdout" + } + ] + } + ] +} \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/requirements-docs.txt b/testbed/huggingface__pytorch-image-models/requirements-docs.txt new file mode 100644 index 0000000000000000000000000000000000000000..716a3bf73c9e3db5ea77a53c40b34465989982b1 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/requirements-docs.txt @@ -0,0 +1,4 @@ +mkdocs +mkdocs-material +mdx_truly_sane_lists +mkdocs-awesome-pages-plugin \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/requirements-modelindex.txt b/testbed/huggingface__pytorch-image-models/requirements-modelindex.txt new file mode 100644 index 0000000000000000000000000000000000000000..d0a1470cce391024f753e6b6cf9ed088600803c0 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/requirements-modelindex.txt @@ -0,0 +1,2 @@ +model-index==0.1.10 +jinja2==2.11.3 diff --git a/testbed/huggingface__pytorch-image-models/requirements.txt b/testbed/huggingface__pytorch-image-models/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d29a27ced7a4e86323fada114fb2dfa7157dd2a --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/requirements.txt @@ -0,0 +1,3 @@ +torch>=1.4.0 +torchvision>=0.5.0 +pyyaml diff --git a/testbed/huggingface__pytorch-image-models/results/README.md b/testbed/huggingface__pytorch-image-models/results/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b3fcec07088bf26a5b42860e2fbff9f40e1f296e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/README.md @@ -0,0 +1,59 @@ +# Validation Results + +This folder contains validation results for the models in this collection having pretrained weights. Since the focus for this repository is currently ImageNet-1k classification, all of the results are based on datasets compatible with ImageNet-1k classes. + +## Datasets + +There are currently results for the ImageNet validation set and 5 additional test / label sets. + +The test set results include rank and top-1/top-5 differences from clean validation. For the "Real Labels", ImageNetV2, and Sketch test sets, the differences were calculated against the full 1000 class ImageNet-1k validation set. For both the Adversarial and Rendition sets, the differences were calculated against 'clean' runs on the ImageNet-1k validation set with the same 200 classes used in each test set respectively. + +### ImageNet Validation - [`results-imagenet.csv`](results-imagenet.csv) + +The standard 50,000 image ImageNet-1k validation set. Model selection during training utilizes this validation set, so it is not a true test set. Question: Does anyone have the official ImageNet-1k test set classification labels now that challenges are done? + +* Source: http://image-net.org/challenges/LSVRC/2012/index +* Paper: "ImageNet Large Scale Visual Recognition Challenge" - https://arxiv.org/abs/1409.0575 + +### ImageNet-"Real Labels" - [`results-imagenet-real.csv`](results-imagenet-real.csv) + +The usual ImageNet-1k validation set with a fresh new set of labels intended to improve on mistakes in the original annotation process. + +* Source: https://github.com/google-research/reassessed-imagenet +* Paper: "Are we done with ImageNet?" - https://arxiv.org/abs/2006.07159 + +### ImageNetV2 Matched Frequency - [`results-imagenetv2-matched-frequency.csv`](results-imagenetv2-matched-frequency.csv) + +An ImageNet test set of 10,000 images sampled from new images roughly 10 years after the original. Care was taken to replicate the original ImageNet curation/sampling process. + +* Source: https://github.com/modestyachts/ImageNetV2 +* Paper: "Do ImageNet Classifiers Generalize to ImageNet?" - https://arxiv.org/abs/1902.10811 + +### ImageNet-Sketch - [`results-sketch.csv`](results-sketch.csv) + +50,000 non photographic (or photos of such) images (sketches, doodles, mostly monochromatic) covering all 1000 ImageNet classes. + +* Source: https://github.com/HaohanWang/ImageNet-Sketch +* Paper: "Learning Robust Global Representations by Penalizing Local Predictive Power" - https://arxiv.org/abs/1905.13549 + +### ImageNet-Adversarial - [`results-imagenet-a.csv`](results-imagenet-a.csv) + +A collection of 7500 images covering 200 of the 1000 ImageNet classes. Images are naturally occuring adversarial examples that confuse typical ImageNet classifiers. This is a challenging dataset, your typical ResNet-50 will score 0% top-1. + +For clean validation with same 200 classes, see [`results-imagenet-a-clean.csv`](results-imagenet-a-clean.csv) + +* Source: https://github.com/hendrycks/natural-adv-examples +* Paper: "Natural Adversarial Examples" - https://arxiv.org/abs/1907.07174 + + +### ImageNet-Rendition - [`results-imagenet-r.csv`](results-imagenet-r.csv) + +Renditions of 200 ImageNet classes resulting in 30,000 images for testing robustness. + +For clean validation with same 200 classes, see [`results-imagenet-r-clean.csv`](results-imagenet-r-clean.csv) + +* Source: https://github.com/hendrycks/imagenet-r +* Paper: "The Many Faces of Robustness" - https://arxiv.org/abs/2006.16241 + +## TODO +* Explore adding a reduced version of ImageNet-C (Corruptions) and ImageNet-P (Perturbations) from https://github.com/hendrycks/robustness. The originals are huge and image size specific. diff --git a/testbed/huggingface__pytorch-image-models/results/generate_csv_results.py b/testbed/huggingface__pytorch-image-models/results/generate_csv_results.py new file mode 100644 index 0000000000000000000000000000000000000000..04cf710ad77162e32904ed11053bf29b9bb8fcbf --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/generate_csv_results.py @@ -0,0 +1,75 @@ +import numpy as np +import pandas as pd + + +results = { + 'results-imagenet.csv': [ + 'results-imagenet-real.csv', + 'results-imagenetv2-matched-frequency.csv', + 'results-sketch.csv' + ], + 'results-imagenet-a-clean.csv': [ + 'results-imagenet-a.csv', + ], + 'results-imagenet-r-clean.csv': [ + 'results-imagenet-r.csv', + ], +} + + +def diff(base_df, test_csv): + base_models = base_df['model'].values + test_df = pd.read_csv(test_csv) + test_models = test_df['model'].values + + rank_diff = np.zeros_like(test_models, dtype='object') + top1_diff = np.zeros_like(test_models, dtype='object') + top5_diff = np.zeros_like(test_models, dtype='object') + + for rank, model in enumerate(test_models): + if model in base_models: + base_rank = int(np.where(base_models == model)[0]) + top1_d = test_df['top1'][rank] - base_df['top1'][base_rank] + top5_d = test_df['top5'][rank] - base_df['top5'][base_rank] + + # rank_diff + if rank == base_rank: + rank_diff[rank] = f'0' + elif rank > base_rank: + rank_diff[rank] = f'-{rank - base_rank}' + else: + rank_diff[rank] = f'+{base_rank - rank}' + + # top1_diff + if top1_d >= .0: + top1_diff[rank] = f'+{top1_d:.3f}' + else: + top1_diff[rank] = f'-{abs(top1_d):.3f}' + + # top5_diff + if top5_d >= .0: + top5_diff[rank] = f'+{top5_d:.3f}' + else: + top5_diff[rank] = f'-{abs(top5_d):.3f}' + + else: + rank_diff[rank] = '' + top1_diff[rank] = '' + top5_diff[rank] = '' + + test_df['top1_diff'] = top1_diff + test_df['top5_diff'] = top5_diff + test_df['rank_diff'] = rank_diff + + test_df['param_count'] = test_df['param_count'].map('{:,.2f}'.format) + test_df.sort_values('top1', ascending=False, inplace=True) + test_df.to_csv(test_csv, index=False, float_format='%.3f') + + +for base_results, test_results in results.items(): + base_df = pd.read_csv(base_results) + base_df.sort_values('top1', ascending=False, inplace=True) + for test_csv in test_results: + diff(base_df, test_csv) + base_df['param_count'] = base_df['param_count'].map('{:,.2f}'.format) + base_df.to_csv(base_results, index=False, float_format='%.3f') diff --git a/testbed/huggingface__pytorch-image-models/results/imagenet21k_goog_synsets.txt b/testbed/huggingface__pytorch-image-models/results/imagenet21k_goog_synsets.txt new file mode 100644 index 0000000000000000000000000000000000000000..e276a97bdd01bd9ce46033cb190a0d56ac1548c9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/imagenet21k_goog_synsets.txt @@ -0,0 +1,21843 @@ +n00004475 +n00005787 +n00006024 +n00006484 +n00007846 +n00015388 +n00017222 +n00021265 +n00021939 +n00120010 +n00141669 +n00288000 +n00288190 +n00288384 +n00324978 +n00326094 +n00433458 +n00433661 +n00433802 +n00434075 +n00439826 +n00440039 +n00440218 +n00440382 +n00440509 +n00440643 +n00440747 +n00440941 +n00441073 +n00441824 +n00442115 +n00442437 +n00442847 +n00442981 +n00443231 +n00443375 +n00443517 +n00443692 +n00443803 +n00443917 +n00444142 +n00444340 +n00444490 +n00444651 +n00444846 +n00444937 +n00445055 +n00445226 +n00445351 +n00445685 +n00445802 +n00446311 +n00446411 +n00446493 +n00446632 +n00446804 +n00446980 +n00447073 +n00447221 +n00447361 +n00447463 +n00447540 +n00447957 +n00448126 +n00448232 +n00448466 +n00448640 +n00448748 +n00448872 +n00448958 +n00449054 +n00449168 +n00449295 +n00449517 +n00449695 +n00449796 +n00449892 +n00449977 +n00450070 +n00450335 +n00450700 +n00450866 +n00450998 +n00451186 +n00451370 +n00451563 +n00451635 +n00451768 +n00451866 +n00452034 +n00452152 +n00452293 +n00452734 +n00452864 +n00453126 +n00453313 +n00453396 +n00453478 +n00453631 +n00453935 +n00454237 +n00454395 +n00454493 +n00454624 +n00454855 +n00454983 +n00455076 +n00455173 +n00456465 +n00463246 +n00463543 +n00464277 +n00464478 +n00464651 +n00464894 +n00466273 +n00466377 +n00466524 +n00466630 +n00466712 +n00466880 +n00467320 +n00467536 +n00467719 +n00467995 +n00468299 +n00468480 +n00469651 +n00470554 +n00470682 +n00470830 +n00470966 +n00471437 +n00471613 +n00474568 +n00474657 +n00474769 +n00474881 +n00475014 +n00475142 +n00475273 +n00475403 +n00475535 +n00475661 +n00475787 +n00476140 +n00476235 +n00476389 +n00477392 +n00477639 +n00477827 +n00478262 +n00479076 +n00479440 +n00479616 +n00479734 +n00479887 +n00480211 +n00480366 +n00480508 +n00480885 +n00480993 +n00481803 +n00481938 +n00482122 +n00482298 +n00483205 +n00483313 +n00483409 +n00483508 +n00483605 +n00483705 +n00483848 +n00523513 +n00812526 +n00825773 +n00887544 +n01035504 +n01035667 +n01055165 +n01314388 +n01314663 +n01314781 +n01314910 +n01315213 +n01315330 +n01315581 +n01315805 +n01316422 +n01316579 +n01316734 +n01316949 +n01317089 +n01317294 +n01317391 +n01317541 +n01317813 +n01317916 +n01318053 +n01318279 +n01318381 +n01318478 +n01318660 +n01318894 +n01319001 +n01319187 +n01319467 +n01319685 +n01320872 +n01321123 +n01321230 +n01321456 +n01321579 +n01321770 +n01321854 +n01322221 +n01322343 +n01322508 +n01322604 +n01322685 +n01322898 +n01322983 +n01323068 +n01323155 +n01323261 +n01323355 +n01323493 +n01323599 +n01323781 +n01324305 +n01324431 +n01324610 +n01324799 +n01324916 +n01325060 +n01326291 +n01327909 +n01329186 +n01330126 +n01330497 +n01332181 +n01333082 +n01333483 +n01333610 +n01334217 +n01334690 +n01335218 +n01337191 +n01337734 +n01338685 +n01339083 +n01339336 +n01339471 +n01339801 +n01340014 +n01340522 +n01340785 +n01340935 +n01341090 +n01342269 +n01347583 +n01349735 +n01350226 +n01350701 +n01351170 +n01351315 +n01357328 +n01357507 +n01358572 +n01359762 +n01362336 +n01363719 +n01365474 +n01365885 +n01366700 +n01367772 +n01368672 +n01369358 +n01369484 +n01374703 +n01374846 +n01375204 +n01376237 +n01376437 +n01376543 +n01377278 +n01377510 +n01377694 +n01378545 +n01379389 +n01380610 +n01380754 +n01381044 +n01382033 +n01384084 +n01384164 +n01384687 +n01385017 +n01385330 +n01386007 +n01386182 +n01386354 +n01387065 +n01389507 +n01390123 +n01390763 +n01392275 +n01392380 +n01393486 +n01394040 +n01394492 +n01394771 +n01395254 +n01396048 +n01396617 +n01397114 +n01397690 +n01397871 +n01400247 +n01400391 +n01402600 +n01403457 +n01404365 +n01404495 +n01405007 +n01405616 +n01407798 +n01410457 +n01411450 +n01412694 +n01413457 +n01414216 +n01415626 +n01415920 +n01416213 +n01418498 +n01418620 +n01419332 +n01419573 +n01419888 +n01421333 +n01421807 +n01422185 +n01422335 +n01422450 +n01423302 +n01423617 +n01424420 +n01425223 +n01427399 +n01429172 +n01438208 +n01438581 +n01439121 +n01439514 +n01439808 +n01440160 +n01440242 +n01440467 +n01440764 +n01441117 +n01441272 +n01441425 +n01441910 +n01442450 +n01442710 +n01442972 +n01443243 +n01443537 +n01443831 +n01444339 +n01444783 +n01445429 +n01445593 +n01445857 +n01446152 +n01446589 +n01446760 +n01447139 +n01447331 +n01447658 +n01447946 +n01448291 +n01448594 +n01448951 +n01449374 +n01449712 +n01449980 +n01450661 +n01450950 +n01451115 +n01451295 +n01451426 +n01451863 +n01452345 +n01453087 +n01453475 +n01453742 +n01454545 +n01454856 +n01455317 +n01455461 +n01455778 +n01456137 +n01456454 +n01456756 +n01457082 +n01457407 +n01457852 +n01458746 +n01458842 +n01459791 +n01460303 +n01461315 +n01461646 +n01462042 +n01462544 +n01462803 +n01464844 +n01466257 +n01467336 +n01467804 +n01468238 +n01468712 +n01469103 +n01469723 +n01470145 +n01470479 +n01470733 +n01470895 +n01471682 +n01472303 +n01472502 +n01473806 +n01474283 +n01474864 +n01475232 +n01475940 +n01476418 +n01477080 +n01477525 +n01477875 +n01478511 +n01478969 +n01479213 +n01479820 +n01480106 +n01480516 +n01480880 +n01481331 +n01481498 +n01482071 +n01482330 +n01483021 +n01483522 +n01483830 +n01484097 +n01484285 +n01484447 +n01484562 +n01484850 +n01485479 +n01486010 +n01486540 +n01486838 +n01487506 +n01488038 +n01488918 +n01489501 +n01489709 +n01489920 +n01490112 +n01490360 +n01490670 +n01491006 +n01491361 +n01491661 +n01491874 +n01492357 +n01492569 +n01492708 +n01492860 +n01493146 +n01493541 +n01493829 +n01494041 +n01494475 +n01494757 +n01494882 +n01495006 +n01495493 +n01495701 +n01496331 +n01497118 +n01497413 +n01497738 +n01498041 +n01498406 +n01498699 +n01498989 +n01499396 +n01499732 +n01500091 +n01500476 +n01500854 +n01501160 +n01501641 +n01501777 +n01501948 +n01502101 +n01503061 +n01503976 +n01504179 +n01504344 +n01514668 +n01514752 +n01514859 +n01514926 +n01515078 +n01515217 +n01515303 +n01516212 +n01517389 +n01517565 +n01517966 +n01518878 +n01519563 +n01519873 +n01520576 +n01521399 +n01521756 +n01522450 +n01523105 +n01524359 +n01524761 +n01525720 +n01526521 +n01526766 +n01527194 +n01527347 +n01527617 +n01527917 +n01528396 +n01528654 +n01528845 +n01529672 +n01530439 +n01530575 +n01531178 +n01531344 +n01531512 +n01531639 +n01531811 +n01531971 +n01532325 +n01532511 +n01532829 +n01533000 +n01533339 +n01533481 +n01533651 +n01533893 +n01534155 +n01534433 +n01534582 +n01534762 +n01535140 +n01535469 +n01535690 +n01536035 +n01536186 +n01536334 +n01536644 +n01536780 +n01537134 +n01537544 +n01537895 +n01538059 +n01538200 +n01538362 +n01538630 +n01538955 +n01539272 +n01539573 +n01539925 +n01540090 +n01540233 +n01540566 +n01540832 +n01541102 +n01541386 +n01541760 +n01541922 +n01542168 +n01542433 +n01542786 +n01543175 +n01543383 +n01543632 +n01543936 +n01544208 +n01544389 +n01544704 +n01545574 +n01546039 +n01546506 +n01546921 +n01547832 +n01548301 +n01548492 +n01548694 +n01548865 +n01549053 +n01549430 +n01549641 +n01549886 +n01550172 +n01550761 +n01551080 +n01551300 +n01551711 +n01552034 +n01552333 +n01552813 +n01553142 +n01553527 +n01553762 +n01554017 +n01554448 +n01555004 +n01555305 +n01555809 +n01556182 +n01556514 +n01557185 +n01557962 +n01558149 +n01558307 +n01558461 +n01558594 +n01558765 +n01558993 +n01559160 +n01559477 +n01559639 +n01559804 +n01560105 +n01560280 +n01560419 +n01560636 +n01560793 +n01560935 +n01561181 +n01561452 +n01561732 +n01562014 +n01562265 +n01562451 +n01563128 +n01563449 +n01563746 +n01563945 +n01564101 +n01564217 +n01564394 +n01564773 +n01564914 +n01565078 +n01565345 +n01565599 +n01565930 +n01566207 +n01566645 +n01567133 +n01567678 +n01567879 +n01568132 +n01568294 +n01568720 +n01568892 +n01569060 +n01569262 +n01569423 +n01569566 +n01569836 +n01569971 +n01570267 +n01570421 +n01570676 +n01570839 +n01571410 +n01571904 +n01572328 +n01572489 +n01572654 +n01572782 +n01573074 +n01573240 +n01573360 +n01573627 +n01573898 +n01574045 +n01574390 +n01574560 +n01574801 +n01575117 +n01575401 +n01575745 +n01576076 +n01576358 +n01576695 +n01577035 +n01577458 +n01577659 +n01577941 +n01578180 +n01578575 +n01579028 +n01579149 +n01579260 +n01579410 +n01579578 +n01579729 +n01580077 +n01580379 +n01580490 +n01580772 +n01580870 +n01581166 +n01581434 +n01581730 +n01581874 +n01581984 +n01582220 +n01582398 +n01582498 +n01582856 +n01583209 +n01583495 +n01583828 +n01584225 +n01584695 +n01584853 +n01585121 +n01585287 +n01585422 +n01585715 +n01586020 +n01586374 +n01586941 +n01587278 +n01587526 +n01587834 +n01588002 +n01588431 +n01588725 +n01588996 +n01589286 +n01589718 +n01589893 +n01590220 +n01591005 +n01591123 +n01591301 +n01591697 +n01592084 +n01592257 +n01592387 +n01592540 +n01592694 +n01593028 +n01593282 +n01593553 +n01594004 +n01594372 +n01594787 +n01594968 +n01595168 +n01595450 +n01595624 +n01595974 +n01596273 +n01596608 +n01597022 +n01597336 +n01597737 +n01597906 +n01598074 +n01598271 +n01598588 +n01598988 +n01599159 +n01599269 +n01599388 +n01599556 +n01599741 +n01600085 +n01600341 +n01600657 +n01601068 +n01601410 +n01601694 +n01602080 +n01602209 +n01602630 +n01602832 +n01603000 +n01603152 +n01603600 +n01603812 +n01603953 +n01604330 +n01604968 +n01605630 +n01606097 +n01606177 +n01606522 +n01606672 +n01606809 +n01606978 +n01607309 +n01607429 +n01607600 +n01607812 +n01607962 +n01608265 +n01608432 +n01608814 +n01609062 +n01609391 +n01609751 +n01609956 +n01610100 +n01610226 +n01610552 +n01610955 +n01611472 +n01611674 +n01611800 +n01611969 +n01612122 +n01612275 +n01612476 +n01612628 +n01612955 +n01613177 +n01613294 +n01613615 +n01613807 +n01614038 +n01614343 +n01614556 +n01614925 +n01615121 +n01615303 +n01615458 +n01615703 +n01616086 +n01616318 +n01616551 +n01616764 +n01617095 +n01617443 +n01617766 +n01618082 +n01618503 +n01618922 +n01619310 +n01619536 +n01619835 +n01620135 +n01620414 +n01620735 +n01621127 +n01621635 +n01622120 +n01622352 +n01622483 +n01622779 +n01622959 +n01623110 +n01623425 +n01623615 +n01623706 +n01623880 +n01624115 +n01624212 +n01624305 +n01624537 +n01624833 +n01625121 +n01625562 +n01627424 +n01628331 +n01628770 +n01629276 +n01629819 +n01629962 +n01630148 +n01630284 +n01630670 +n01630901 +n01631175 +n01631354 +n01631512 +n01631663 +n01632047 +n01632308 +n01632458 +n01632601 +n01632777 +n01632952 +n01633406 +n01633781 +n01634227 +n01634522 +n01635027 +n01635176 +n01635480 +n01636127 +n01636352 +n01636510 +n01636829 +n01637112 +n01637338 +n01637615 +n01637932 +n01638194 +n01638329 +n01638722 +n01639187 +n01639765 +n01640846 +n01641206 +n01641391 +n01641577 +n01641739 +n01641930 +n01642097 +n01642257 +n01642391 +n01642539 +n01642943 +n01643255 +n01643507 +n01643896 +n01644373 +n01644900 +n01645466 +n01645776 +n01646292 +n01646388 +n01646555 +n01646648 +n01646802 +n01646902 +n01647033 +n01647180 +n01647303 +n01647466 +n01647640 +n01648139 +n01648356 +n01648620 +n01649170 +n01649412 +n01649556 +n01649726 +n01650167 +n01650690 +n01650901 +n01651059 +n01651285 +n01651487 +n01651641 +n01651778 +n01652026 +n01652297 +n01653026 +n01653223 +n01653509 +n01653773 +n01654083 +n01654637 +n01654863 +n01655344 +n01661091 +n01661592 +n01661818 +n01662060 +n01662622 +n01662784 +n01663401 +n01663782 +n01664065 +n01664369 +n01664492 +n01664674 +n01664990 +n01665541 +n01665932 +n01666228 +n01666585 +n01667114 +n01667432 +n01667778 +n01668091 +n01668436 +n01668665 +n01668892 +n01669191 +n01669372 +n01669654 +n01670092 +n01670535 +n01670802 +n01671125 +n01671479 +n01671705 +n01672032 +n01672432 +n01672611 +n01673282 +n01674216 +n01674464 +n01674990 +n01675352 +n01675722 +n01676755 +n01677366 +n01677747 +n01678043 +n01678343 +n01678657 +n01679005 +n01679307 +n01679626 +n01679962 +n01680264 +n01680478 +n01680655 +n01680813 +n01680983 +n01681328 +n01681653 +n01681940 +n01682172 +n01682435 +n01682714 +n01683201 +n01683558 +n01684133 +n01684578 +n01684741 +n01685439 +n01685808 +n01686044 +n01686220 +n01686403 +n01686609 +n01686808 +n01687128 +n01687290 +n01687665 +n01687978 +n01688243 +n01688961 +n01689081 +n01689411 +n01689811 +n01690149 +n01690466 +n01691217 +n01691652 +n01691951 +n01692333 +n01692523 +n01692864 +n01693175 +n01693334 +n01693783 +n01694178 +n01694311 +n01694709 +n01694955 +n01695060 +n01696633 +n01697178 +n01697457 +n01697611 +n01697749 +n01697978 +n01698434 +n01698640 +n01698782 +n01699040 +n01699254 +n01699675 +n01701551 +n01701859 +n01702256 +n01702479 +n01703011 +n01703161 +n01703569 +n01704103 +n01704323 +n01704626 +n01705010 +n01705591 +n01705934 +n01707294 +n01708106 +n01708998 +n01709484 +n01709876 +n01710177 +n01711160 +n01712008 +n01712752 +n01713170 +n01713764 +n01714231 +n01715888 +n01717016 +n01717229 +n01717467 +n01718096 +n01718414 +n01719403 +n01721174 +n01721898 +n01722670 +n01722998 +n01723579 +n01724231 +n01724840 +n01725086 +n01725713 +n01726203 +n01726692 +n01727646 +n01728266 +n01728572 +n01728920 +n01729322 +n01729672 +n01729977 +n01730185 +n01730307 +n01730563 +n01730812 +n01730960 +n01731137 +n01731277 +n01731545 +n01731764 +n01731941 +n01732093 +n01732244 +n01732614 +n01732789 +n01732989 +n01733214 +n01733466 +n01733757 +n01733957 +n01734104 +n01734418 +n01734637 +n01734808 +n01735189 +n01735439 +n01735577 +n01735728 +n01736032 +n01736375 +n01736796 +n01737021 +n01737472 +n01737728 +n01737875 +n01738065 +n01738306 +n01738601 +n01738731 +n01739094 +n01739381 +n01739647 +n01739871 +n01740131 +n01740551 +n01740885 +n01741232 +n01741442 +n01741562 +n01741943 +n01742172 +n01742447 +n01742821 +n01743086 +n01743605 +n01743936 +n01744100 +n01744270 +n01744401 +n01744555 +n01745125 +n01745484 +n01745902 +n01746191 +n01746359 +n01746952 +n01747285 +n01747589 +n01747885 +n01748264 +n01748389 +n01748686 +n01748906 +n01749244 +n01749582 +n01749742 +n01749939 +n01750167 +n01750437 +n01750743 +n01751036 +n01751215 +n01751472 +n01751748 +n01752165 +n01752585 +n01752736 +n01753032 +n01753180 +n01753488 +n01753959 +n01754370 +n01754533 +n01754876 +n01755581 +n01755740 +n01755952 +n01756089 +n01756291 +n01756508 +n01756733 +n01756916 +n01757115 +n01757343 +n01757677 +n01757901 +n01758141 +n01758757 +n01758895 +n01767661 +n01768244 +n01769347 +n01770081 +n01770393 +n01770795 +n01771100 +n01771417 +n01771766 +n01772222 +n01772664 +n01773157 +n01773549 +n01773797 +n01774097 +n01774384 +n01774750 +n01775062 +n01775370 +n01775730 +n01776192 +n01776313 +n01776705 +n01777304 +n01777467 +n01777649 +n01777909 +n01778217 +n01778487 +n01778621 +n01778801 +n01779148 +n01779463 +n01779629 +n01779939 +n01780142 +n01780426 +n01780696 +n01781071 +n01781570 +n01781698 +n01781875 +n01782209 +n01782516 +n01783017 +n01783706 +n01784293 +n01784675 +n01785667 +n01786646 +n01787006 +n01787191 +n01787835 +n01788291 +n01788579 +n01788864 +n01789386 +n01789740 +n01790171 +n01790304 +n01790398 +n01790557 +n01790711 +n01790812 +n01791107 +n01791314 +n01791388 +n01791463 +n01791625 +n01791954 +n01792042 +n01792158 +n01792429 +n01792530 +n01792640 +n01792808 +n01792955 +n01793085 +n01793159 +n01793249 +n01793340 +n01793435 +n01793565 +n01793715 +n01794158 +n01794344 +n01794651 +n01795088 +n01795545 +n01795735 +n01795900 +n01796019 +n01796105 +n01796340 +n01796519 +n01796729 +n01797020 +n01797307 +n01797601 +n01797886 +n01798168 +n01798484 +n01798706 +n01798839 +n01798979 +n01799302 +n01799679 +n01800195 +n01800424 +n01800633 +n01801088 +n01801479 +n01801672 +n01801876 +n01802159 +n01802721 +n01803078 +n01803362 +n01803641 +n01803893 +n01804163 +n01804478 +n01804653 +n01804921 +n01805070 +n01805321 +n01805801 +n01806061 +n01806143 +n01806297 +n01806364 +n01806467 +n01806567 +n01806847 +n01807105 +n01807496 +n01807828 +n01808140 +n01808291 +n01808596 +n01809106 +n01809371 +n01809752 +n01810268 +n01810700 +n01811243 +n01811909 +n01812187 +n01812337 +n01812662 +n01812866 +n01813088 +n01813385 +n01813532 +n01813658 +n01813948 +n01814217 +n01814370 +n01814549 +n01814620 +n01814755 +n01814921 +n01815036 +n01815270 +n01815601 +n01816017 +n01816140 +n01816474 +n01816887 +n01817263 +n01817346 +n01817953 +n01818299 +n01818515 +n01818832 +n01819115 +n01819313 +n01819465 +n01819734 +n01820052 +n01820348 +n01820546 +n01820801 +n01821076 +n01821203 +n01821554 +n01821869 +n01822300 +n01822602 +n01823013 +n01823414 +n01823740 +n01824035 +n01824344 +n01824575 +n01824749 +n01825278 +n01825930 +n01826364 +n01826680 +n01826844 +n01827403 +n01827793 +n01828096 +n01828556 +n01828970 +n01829413 +n01829869 +n01830042 +n01830479 +n01830915 +n01831360 +n01831712 +n01832167 +n01832493 +n01832813 +n01833112 +n01833415 +n01833805 +n01834177 +n01834540 +n01835276 +n01835769 +n01835918 +n01836087 +n01836673 +n01837072 +n01837526 +n01838038 +n01838598 +n01839086 +n01839330 +n01839598 +n01839750 +n01839949 +n01840120 +n01840412 +n01840775 +n01841102 +n01841288 +n01841441 +n01841679 +n01841943 +n01842235 +n01842504 +n01842788 +n01843065 +n01843383 +n01843719 +n01844231 +n01844551 +n01844746 +n01844917 +n01845132 +n01845477 +n01846331 +n01847000 +n01847089 +n01847170 +n01847253 +n01847407 +n01847806 +n01847978 +n01848123 +n01848323 +n01848453 +n01848555 +n01848648 +n01848840 +n01848976 +n01849157 +n01849466 +n01849676 +n01849863 +n01850192 +n01850373 +n01850553 +n01850873 +n01851038 +n01851207 +n01851375 +n01851573 +n01851731 +n01851895 +n01852142 +n01852329 +n01852400 +n01852671 +n01852861 +n01853195 +n01853498 +n01853666 +n01853870 +n01854415 +n01854700 +n01854838 +n01855032 +n01855188 +n01855476 +n01855672 +n01856072 +n01856155 +n01856380 +n01856553 +n01856890 +n01857079 +n01857325 +n01857512 +n01857632 +n01857851 +n01858281 +n01858441 +n01858780 +n01858845 +n01858906 +n01859190 +n01859325 +n01859496 +n01859689 +n01859852 +n01860002 +n01860187 +n01860497 +n01860864 +n01861148 +n01861330 +n01861778 +n01862399 +n01871265 +n01871543 +n01871875 +n01872401 +n01872772 +n01873310 +n01874434 +n01874928 +n01875313 +n01875610 +n01876034 +n01876326 +n01876667 +n01877134 +n01877606 +n01877812 +n01878061 +n01878335 +n01878639 +n01878929 +n01879217 +n01879509 +n01879837 +n01880152 +n01880473 +n01880716 +n01880813 +n01881171 +n01881564 +n01881857 +n01882125 +n01882714 +n01883070 +n01883513 +n01883920 +n01884104 +n01884203 +n01884476 +n01884834 +n01885158 +n01885498 +n01886045 +n01886756 +n01887474 +n01887623 +n01887787 +n01887896 +n01888045 +n01888181 +n01888264 +n01888411 +n01889074 +n01889520 +n01889849 +n01890144 +n01890564 +n01890860 +n01891013 +n01891274 +n01891633 +n01892030 +n01892145 +n01892385 +n01892551 +n01892744 +n01893021 +n01893164 +n01893399 +n01893825 +n01894207 +n01894522 +n01894956 +n01896844 +n01897257 +n01897426 +n01897536 +n01897667 +n01898593 +n01899894 +n01900150 +n01903234 +n01903346 +n01903498 +n01904029 +n01904806 +n01904886 +n01905321 +n01905661 +n01906749 +n01907287 +n01907738 +n01908042 +n01908958 +n01909422 +n01909788 +n01909906 +n01910252 +n01910747 +n01911063 +n01911403 +n01911839 +n01912152 +n01912454 +n01912809 +n01913166 +n01913346 +n01913440 +n01914163 +n01914609 +n01914830 +n01915700 +n01915811 +n01916187 +n01916388 +n01916481 +n01916588 +n01916925 +n01917289 +n01917611 +n01917882 +n01918744 +n01919385 +n01920051 +n01920438 +n01921059 +n01922303 +n01922717 +n01922948 +n01923025 +n01923404 +n01923890 +n01924800 +n01924916 +n01925270 +n01925695 +n01925916 +n01926379 +n01926689 +n01927159 +n01927456 +n01927928 +n01928215 +n01928517 +n01928865 +n01929186 +n01930112 +n01930852 +n01931140 +n01931520 +n01931714 +n01932151 +n01932936 +n01933151 +n01933478 +n01933988 +n01934440 +n01934844 +n01935176 +n01935395 +n01936391 +n01936671 +n01936858 +n01937579 +n01937909 +n01938454 +n01938735 +n01940736 +n01941223 +n01941340 +n01942177 +n01942869 +n01943087 +n01943541 +n01943899 +n01944118 +n01944390 +n01944812 +n01944955 +n01945143 +n01945340 +n01945685 +n01945845 +n01946277 +n01946630 +n01946827 +n01947139 +n01947396 +n01947997 +n01948446 +n01948573 +n01949085 +n01949499 +n01949973 +n01950731 +n01951274 +n01951613 +n01952029 +n01952712 +n01953361 +n01953594 +n01953762 +n01954516 +n01955084 +n01955933 +n01956344 +n01956481 +n01956764 +n01957335 +n01958038 +n01958346 +n01958435 +n01958531 +n01959029 +n01959492 +n01959985 +n01960177 +n01960459 +n01961234 +n01961600 +n01961985 +n01962506 +n01962788 +n01963317 +n01963479 +n01963571 +n01964049 +n01964271 +n01964441 +n01964957 +n01965252 +n01965529 +n01965889 +n01966377 +n01966586 +n01967094 +n01967308 +n01967963 +n01968315 +n01968897 +n01969726 +n01970164 +n01970667 +n01971094 +n01971280 +n01971620 +n01971850 +n01972131 +n01972541 +n01973148 +n01974773 +n01975687 +n01976146 +n01976868 +n01976957 +n01977485 +n01978010 +n01978136 +n01978287 +n01978455 +n01978587 +n01978930 +n01979269 +n01979526 +n01979874 +n01980166 +n01980655 +n01981276 +n01981702 +n01982068 +n01982347 +n01982650 +n01983048 +n01983481 +n01983674 +n01983829 +n01984245 +n01984695 +n01985128 +n01985493 +n01985797 +n01986214 +n01986806 +n01987076 +n01987545 +n01987727 +n01988203 +n01988701 +n01988869 +n01989516 +n01989869 +n01990007 +n01990516 +n01990800 +n01991028 +n01991520 +n01992262 +n01992423 +n01992773 +n01993525 +n01993830 +n01994910 +n01995514 +n01995686 +n01996280 +n01996585 +n01997119 +n01997825 +n01998183 +n01998741 +n01999186 +n01999767 +n02000954 +n02002075 +n02002556 +n02002724 +n02003037 +n02003204 +n02003577 +n02003839 +n02004131 +n02004492 +n02004855 +n02005399 +n02005790 +n02006063 +n02006364 +n02006656 +n02006985 +n02007284 +n02007558 +n02008041 +n02008497 +n02008643 +n02008796 +n02009229 +n02009380 +n02009508 +n02009750 +n02009912 +n02010272 +n02010453 +n02010728 +n02011016 +n02011281 +n02011460 +n02011805 +n02011943 +n02012185 +n02012849 +n02013177 +n02013567 +n02013706 +n02014237 +n02014524 +n02014941 +n02015357 +n02015554 +n02015797 +n02016066 +n02016358 +n02016659 +n02016816 +n02016956 +n02017213 +n02017475 +n02017725 +n02018027 +n02018207 +n02018368 +n02018795 +n02019190 +n02019438 +n02019929 +n02020219 +n02020578 +n02021050 +n02021281 +n02021795 +n02022684 +n02023341 +n02023855 +n02023992 +n02024185 +n02024479 +n02024763 +n02025043 +n02025239 +n02025389 +n02026059 +n02026629 +n02026948 +n02027075 +n02027357 +n02027492 +n02027897 +n02028035 +n02028175 +n02028342 +n02028451 +n02028727 +n02028900 +n02029087 +n02029378 +n02029706 +n02030035 +n02030224 +n02030287 +n02030568 +n02030837 +n02030996 +n02031298 +n02031585 +n02031934 +n02032222 +n02032355 +n02032480 +n02032769 +n02033041 +n02033208 +n02033324 +n02033561 +n02033779 +n02033882 +n02034129 +n02034295 +n02034661 +n02034971 +n02035210 +n02035402 +n02035656 +n02036053 +n02036228 +n02036711 +n02037110 +n02037464 +n02037869 +n02038141 +n02038466 +n02038993 +n02039171 +n02039497 +n02039780 +n02040266 +n02040505 +n02041085 +n02041246 +n02041678 +n02041875 +n02042046 +n02042180 +n02042472 +n02042759 +n02043063 +n02043333 +n02043808 +n02044178 +n02044517 +n02044778 +n02044908 +n02045369 +n02045596 +n02045864 +n02046171 +n02046759 +n02046939 +n02047045 +n02047260 +n02047411 +n02047517 +n02047614 +n02047975 +n02048115 +n02048353 +n02048698 +n02049088 +n02049532 +n02050004 +n02050313 +n02050442 +n02050586 +n02050809 +n02051059 +n02051474 +n02051845 +n02052204 +n02052365 +n02052775 +n02053083 +n02053425 +n02053584 +n02054036 +n02054502 +n02054711 +n02055107 +n02055658 +n02055803 +n02056228 +n02056570 +n02056728 +n02057035 +n02057330 +n02057731 +n02057898 +n02058221 +n02058594 +n02058747 +n02059162 +n02059541 +n02059852 +n02060133 +n02060411 +n02060569 +n02060889 +n02061217 +n02061560 +n02061853 +n02062017 +n02062430 +n02062744 +n02063224 +n02063662 +n02064000 +n02064338 +n02064816 +n02065026 +n02065263 +n02065407 +n02065726 +n02066245 +n02066707 +n02067240 +n02067603 +n02067768 +n02068206 +n02068541 +n02068974 +n02069412 +n02069701 +n02069974 +n02070174 +n02070430 +n02070624 +n02070776 +n02071028 +n02071294 +n02071636 +n02072040 +n02072493 +n02072798 +n02073250 +n02073831 +n02074367 +n02074726 +n02075296 +n02075612 +n02075927 +n02076196 +n02076402 +n02076779 +n02077152 +n02077384 +n02077658 +n02077787 +n02077923 +n02078292 +n02078574 +n02078738 +n02079005 +n02079389 +n02079851 +n02080146 +n02080415 +n02080713 +n02081060 +n02081571 +n02081798 +n02081927 +n02082056 +n02082190 +n02082791 +n02083346 +n02083672 +n02083780 +n02084071 +n02084732 +n02084861 +n02085019 +n02085118 +n02085272 +n02085374 +n02085620 +n02085782 +n02085936 +n02086079 +n02086240 +n02086346 +n02086478 +n02086646 +n02086753 +n02086910 +n02087046 +n02087122 +n02087314 +n02087394 +n02087551 +n02088094 +n02088238 +n02088364 +n02088466 +n02088632 +n02088745 +n02088839 +n02088992 +n02089078 +n02089232 +n02089468 +n02089555 +n02089725 +n02089867 +n02089973 +n02090129 +n02090253 +n02090379 +n02090475 +n02090622 +n02090721 +n02090827 +n02091032 +n02091134 +n02091244 +n02091467 +n02091635 +n02091831 +n02092002 +n02092173 +n02092339 +n02092468 +n02093056 +n02093256 +n02093428 +n02093647 +n02093754 +n02093859 +n02093991 +n02094114 +n02094258 +n02094433 +n02094562 +n02094721 +n02094931 +n02095050 +n02095212 +n02095314 +n02095412 +n02095570 +n02095727 +n02095889 +n02096051 +n02096177 +n02096294 +n02096437 +n02096585 +n02096756 +n02097047 +n02097130 +n02097209 +n02097298 +n02097474 +n02097658 +n02097786 +n02097967 +n02098105 +n02098286 +n02098413 +n02098550 +n02098806 +n02098906 +n02099029 +n02099267 +n02099429 +n02099601 +n02099712 +n02099849 +n02099997 +n02100236 +n02100399 +n02100583 +n02100735 +n02100877 +n02101006 +n02101108 +n02101388 +n02101556 +n02101670 +n02101861 +n02102040 +n02102177 +n02102318 +n02102480 +n02102605 +n02102806 +n02102973 +n02103181 +n02103406 +n02103841 +n02104029 +n02104184 +n02104280 +n02104365 +n02104523 +n02104882 +n02105056 +n02105162 +n02105251 +n02105412 +n02105505 +n02105641 +n02105855 +n02106030 +n02106166 +n02106382 +n02106550 +n02106662 +n02106854 +n02106966 +n02107142 +n02107312 +n02107420 +n02107574 +n02107683 +n02107908 +n02108000 +n02108089 +n02108254 +n02108422 +n02108551 +n02108672 +n02108915 +n02109047 +n02109150 +n02109256 +n02109391 +n02109525 +n02109687 +n02109811 +n02109961 +n02110063 +n02110185 +n02110341 +n02110532 +n02110627 +n02110806 +n02110958 +n02111129 +n02111277 +n02111500 +n02111626 +n02111889 +n02112018 +n02112137 +n02112350 +n02112497 +n02112706 +n02112826 +n02113023 +n02113186 +n02113335 +n02113624 +n02113712 +n02113799 +n02113892 +n02113978 +n02114100 +n02114367 +n02114548 +n02114712 +n02114855 +n02115012 +n02115096 +n02115335 +n02115641 +n02115913 +n02116185 +n02116450 +n02116738 +n02117135 +n02117512 +n02117646 +n02117900 +n02118176 +n02118333 +n02118643 +n02118707 +n02119022 +n02119247 +n02119359 +n02119477 +n02119634 +n02119789 +n02120079 +n02120278 +n02120505 +n02120997 +n02121620 +n02121808 +n02122298 +n02122430 +n02122510 +n02122580 +n02122725 +n02122810 +n02122878 +n02122948 +n02123045 +n02123159 +n02123242 +n02123394 +n02123478 +n02123597 +n02123785 +n02123917 +n02124075 +n02124157 +n02124313 +n02124484 +n02124623 +n02125010 +n02125081 +n02125311 +n02125494 +n02125689 +n02125872 +n02126028 +n02126139 +n02126317 +n02126640 +n02126787 +n02127052 +n02127292 +n02127381 +n02127482 +n02127586 +n02127678 +n02127808 +n02128385 +n02128598 +n02128669 +n02128757 +n02128925 +n02129165 +n02129463 +n02129530 +n02129604 +n02129837 +n02129923 +n02129991 +n02130086 +n02130308 +n02130545 +n02130925 +n02131653 +n02132136 +n02132320 +n02132466 +n02132580 +n02132788 +n02133161 +n02133400 +n02133704 +n02134084 +n02134418 +n02134971 +n02135220 +n02135610 +n02135844 +n02136103 +n02136285 +n02136452 +n02136794 +n02137015 +n02137302 +n02137549 +n02137722 +n02137888 +n02138169 +n02138441 +n02138647 +n02138777 +n02139199 +n02139671 +n02140049 +n02140179 +n02140268 +n02140491 +n02140858 +n02141306 +n02141611 +n02141713 +n02142407 +n02142734 +n02142898 +n02143142 +n02143439 +n02143891 +n02144251 +n02144593 +n02144936 +n02145424 +n02145910 +n02146201 +n02146371 +n02146700 +n02146879 +n02147173 +n02147328 +n02147591 +n02147947 +n02148088 +n02148512 +n02148835 +n02148991 +n02149420 +n02149653 +n02149861 +n02150134 +n02150482 +n02150885 +n02151230 +n02152740 +n02152881 +n02152991 +n02153109 +n02153203 +n02153809 +n02156732 +n02156871 +n02157206 +n02157285 +n02159955 +n02160947 +n02161225 +n02161338 +n02161457 +n02161588 +n02162561 +n02163008 +n02163297 +n02164464 +n02165105 +n02165456 +n02165877 +n02166229 +n02166567 +n02166826 +n02167151 +n02167505 +n02167820 +n02167944 +n02168245 +n02168427 +n02168699 +n02169023 +n02169218 +n02169497 +n02169705 +n02169974 +n02170400 +n02170599 +n02170738 +n02170993 +n02171164 +n02171453 +n02171869 +n02172182 +n02172518 +n02172678 +n02172761 +n02172870 +n02173113 +n02173373 +n02173784 +n02174001 +n02174355 +n02174659 +n02175014 +n02175569 +n02175916 +n02176261 +n02176439 +n02176747 +n02176916 +n02177196 +n02177506 +n02177775 +n02177972 +n02178411 +n02178717 +n02179012 +n02179192 +n02179340 +n02179891 +n02180233 +n02180427 +n02180875 +n02181235 +n02181477 +n02181724 +n02182045 +n02182355 +n02182642 +n02182930 +n02183096 +n02183507 +n02183857 +n02184473 +n02184589 +n02184720 +n02185167 +n02185481 +n02186153 +n02186717 +n02187150 +n02187279 +n02187554 +n02187900 +n02188699 +n02189363 +n02189670 +n02190166 +n02190790 +n02191273 +n02191773 +n02191979 +n02192252 +n02192513 +n02192814 +n02193009 +n02193163 +n02194249 +n02194750 +n02195091 +n02195526 +n02195819 +n02196119 +n02196344 +n02196896 +n02197185 +n02197689 +n02197877 +n02198129 +n02198532 +n02198859 +n02199170 +n02199502 +n02200198 +n02200509 +n02200630 +n02200850 +n02201000 +n02201497 +n02201626 +n02202006 +n02202124 +n02202287 +n02202678 +n02203152 +n02203592 +n02203978 +n02204249 +n02204722 +n02204907 +n02205219 +n02205673 +n02206270 +n02206856 +n02207179 +n02207345 +n02207449 +n02207647 +n02207805 +n02208280 +n02208498 +n02208848 +n02208979 +n02209111 +n02209354 +n02209624 +n02209964 +n02210427 +n02210921 +n02211444 +n02211627 +n02211896 +n02212062 +n02212602 +n02212958 +n02213107 +n02213239 +n02213543 +n02213663 +n02213788 +n02214096 +n02214341 +n02214499 +n02214660 +n02214773 +n02215161 +n02215621 +n02215770 +n02216211 +n02216365 +n02216740 +n02217563 +n02217839 +n02218134 +n02218371 +n02218713 +n02219015 +n02219486 +n02220055 +n02220225 +n02220518 +n02220804 +n02221083 +n02221414 +n02221571 +n02221715 +n02221820 +n02222035 +n02222321 +n02222582 +n02223266 +n02223520 +n02224023 +n02224713 +n02225081 +n02225798 +n02226183 +n02226429 +n02226821 +n02226970 +n02227247 +n02227604 +n02227966 +n02228341 +n02228697 +n02229156 +n02229544 +n02229765 +n02230023 +n02230187 +n02230480 +n02230634 +n02231052 +n02231487 +n02231803 +n02232223 +n02233338 +n02233943 +n02234355 +n02234570 +n02234848 +n02235205 +n02236044 +n02236241 +n02236355 +n02236896 +n02237424 +n02237581 +n02237868 +n02238235 +n02238358 +n02238594 +n02238887 +n02239192 +n02239528 +n02239774 +n02240068 +n02240517 +n02241008 +n02241426 +n02241569 +n02241799 +n02242137 +n02242455 +n02243209 +n02243562 +n02243878 +n02244173 +n02244515 +n02244797 +n02245111 +n02245443 +n02246011 +n02246628 +n02246941 +n02247216 +n02247511 +n02247655 +n02248062 +n02248368 +n02248510 +n02248887 +n02249134 +n02249515 +n02249809 +n02250280 +n02250822 +n02251067 +n02251233 +n02251593 +n02251775 +n02252226 +n02252799 +n02252972 +n02253127 +n02253264 +n02253494 +n02253715 +n02253913 +n02254246 +n02254697 +n02254901 +n02255023 +n02255391 +n02256172 +n02256656 +n02257003 +n02257284 +n02257715 +n02257985 +n02258198 +n02258508 +n02258629 +n02259212 +n02259377 +n02259708 +n02259987 +n02260421 +n02260863 +n02261063 +n02261419 +n02261757 +n02262178 +n02262449 +n02262803 +n02263378 +n02264021 +n02264232 +n02264363 +n02264591 +n02264885 +n02265330 +n02266050 +n02266269 +n02266421 +n02266864 +n02267208 +n02267483 +n02268148 +n02268443 +n02268853 +n02269196 +n02269340 +n02269522 +n02269657 +n02270011 +n02270200 +n02270623 +n02270945 +n02271222 +n02271570 +n02271897 +n02272286 +n02272552 +n02272871 +n02273392 +n02274024 +n02274259 +n02274822 +n02275560 +n02275773 +n02276078 +n02276258 +n02276355 +n02276749 +n02276902 +n02277094 +n02277268 +n02277422 +n02277742 +n02278024 +n02278210 +n02278463 +n02278839 +n02278980 +n02279257 +n02279637 +n02279972 +n02280458 +n02280649 +n02281015 +n02281136 +n02281267 +n02281406 +n02281787 +n02282257 +n02282385 +n02282553 +n02282903 +n02283077 +n02283201 +n02283617 +n02283951 +n02284224 +n02284611 +n02284884 +n02285179 +n02285548 +n02285801 +n02286089 +n02286425 +n02286654 +n02287004 +n02287352 +n02287622 +n02287799 +n02287987 +n02288122 +n02288268 +n02288789 +n02289307 +n02289610 +n02289988 +n02290340 +n02290664 +n02290870 +n02291220 +n02291572 +n02291748 +n02292085 +n02292401 +n02292692 +n02293352 +n02293868 +n02294097 +n02294407 +n02294577 +n02295064 +n02295390 +n02295870 +n02296021 +n02296276 +n02296612 +n02296912 +n02297294 +n02297442 +n02297819 +n02297938 +n02298095 +n02298218 +n02298541 +n02299039 +n02299157 +n02299378 +n02299505 +n02299846 +n02300173 +n02300554 +n02300797 +n02301452 +n02301935 +n02302244 +n02302459 +n02302620 +n02302969 +n02303284 +n02303585 +n02303777 +n02304036 +n02304432 +n02304657 +n02304797 +n02305085 +n02305407 +n02305636 +n02305929 +n02306433 +n02306825 +n02307176 +n02307325 +n02307515 +n02307681 +n02307910 +n02308033 +n02308139 +n02308471 +n02308618 +n02308735 +n02309120 +n02309242 +n02309337 +n02309841 +n02310000 +n02310149 +n02310334 +n02310585 +n02310717 +n02310941 +n02311060 +n02311617 +n02311748 +n02312006 +n02312175 +n02312325 +n02312427 +n02312640 +n02312912 +n02313008 +n02313360 +n02313709 +n02315487 +n02315821 +n02316707 +n02317335 +n02317781 +n02318167 +n02318687 +n02319095 +n02319308 +n02319555 +n02319829 +n02320127 +n02320465 +n02321170 +n02321529 +n02322047 +n02322992 +n02323449 +n02323902 +n02324045 +n02324431 +n02324514 +n02324587 +n02324850 +n02325366 +n02325722 +n02325884 +n02326074 +n02326432 +n02326763 +n02326862 +n02327028 +n02327175 +n02327435 +n02327656 +n02327842 +n02328009 +n02328150 +n02328429 +n02328820 +n02328942 +n02329401 +n02330245 +n02331046 +n02331309 +n02331842 +n02332156 +n02332447 +n02332755 +n02332954 +n02333190 +n02333546 +n02333733 +n02333819 +n02333909 +n02334201 +n02334460 +n02334728 +n02335127 +n02335231 +n02336011 +n02336275 +n02336641 +n02336826 +n02337001 +n02337171 +n02337332 +n02337598 +n02337902 +n02338145 +n02338449 +n02338722 +n02338901 +n02339282 +n02339376 +n02339922 +n02340186 +n02340358 +n02340640 +n02340930 +n02341288 +n02341475 +n02341616 +n02341974 +n02342250 +n02342534 +n02342885 +n02343058 +n02343320 +n02343772 +n02344175 +n02344270 +n02344408 +n02344528 +n02344918 +n02345078 +n02345340 +n02345600 +n02345774 +n02345997 +n02346170 +n02346627 +n02346998 +n02347274 +n02347573 +n02347744 +n02348173 +n02348788 +n02349205 +n02349390 +n02349557 +n02349847 +n02350105 +n02350357 +n02350670 +n02350989 +n02351343 +n02351870 +n02352002 +n02352290 +n02352591 +n02352932 +n02353172 +n02353411 +n02353861 +n02354162 +n02354320 +n02354621 +n02354781 +n02355227 +n02355477 +n02356381 +n02356612 +n02356798 +n02356977 +n02357111 +n02357401 +n02357585 +n02357911 +n02358091 +n02358390 +n02358584 +n02358712 +n02358890 +n02359047 +n02359324 +n02359556 +n02359667 +n02359915 +n02360282 +n02360480 +n02360781 +n02360933 +n02361090 +n02361337 +n02361587 +n02361706 +n02361850 +n02362194 +n02363005 +n02363245 +n02363351 +n02363996 +n02364520 +n02364673 +n02364840 +n02365108 +n02365480 +n02366002 +n02366301 +n02366579 +n02366959 +n02367492 +n02367812 +n02368116 +n02368399 +n02368821 +n02369293 +n02369555 +n02369680 +n02369935 +n02370137 +n02370525 +n02370806 +n02371344 +n02372140 +n02372584 +n02372952 +n02373336 +n02374149 +n02374451 +n02375302 +n02375438 +n02375757 +n02375862 +n02376542 +n02376679 +n02376791 +n02376918 +n02377063 +n02377181 +n02377291 +n02377388 +n02377480 +n02377603 +n02377703 +n02378149 +n02378299 +n02378415 +n02378541 +n02378625 +n02378755 +n02378870 +n02378969 +n02379081 +n02379183 +n02379329 +n02379430 +n02379630 +n02379743 +n02379908 +n02380052 +n02380335 +n02380464 +n02380583 +n02380745 +n02380875 +n02381004 +n02381119 +n02381261 +n02381364 +n02381460 +n02381609 +n02381831 +n02382039 +n02382132 +n02382204 +n02382338 +n02382437 +n02382635 +n02382750 +n02382850 +n02382948 +n02383231 +n02384741 +n02384858 +n02385002 +n02385098 +n02385214 +n02385580 +n02385676 +n02385776 +n02385898 +n02386014 +n02386141 +n02386224 +n02386310 +n02386496 +n02386746 +n02386853 +n02386968 +n02387093 +n02387254 +n02387346 +n02387452 +n02387722 +n02387887 +n02387983 +n02388143 +n02388276 +n02388453 +n02388588 +n02388735 +n02388832 +n02388917 +n02389026 +n02389128 +n02389261 +n02389346 +n02389559 +n02389779 +n02389865 +n02389943 +n02390015 +n02390101 +n02390258 +n02390454 +n02390640 +n02390738 +n02390834 +n02390938 +n02391049 +n02391234 +n02391373 +n02391508 +n02391617 +n02391994 +n02392434 +n02392555 +n02392824 +n02393161 +n02393580 +n02393807 +n02393940 +n02394477 +n02395003 +n02395406 +n02395694 +n02395855 +n02395931 +n02396014 +n02396088 +n02396157 +n02396427 +n02396796 +n02397096 +n02397529 +n02397744 +n02397987 +n02398521 +n02399000 +n02401031 +n02402010 +n02402175 +n02402425 +n02403003 +n02403153 +n02403231 +n02403325 +n02403454 +n02403740 +n02403820 +n02403920 +n02404028 +n02404186 +n02404432 +n02404573 +n02404906 +n02405101 +n02405302 +n02405440 +n02405577 +n02405692 +n02405799 +n02405929 +n02406046 +n02406174 +n02406432 +n02406533 +n02406647 +n02406749 +n02406859 +n02406952 +n02407071 +n02407172 +n02407276 +n02407390 +n02407521 +n02407625 +n02407763 +n02407959 +n02408429 +n02408660 +n02408817 +n02409038 +n02409202 +n02409508 +n02409870 +n02410011 +n02410141 +n02410509 +n02410702 +n02410900 +n02411206 +n02411705 +n02411999 +n02412080 +n02412210 +n02412440 +n02412629 +n02412700 +n02412787 +n02412909 +n02412977 +n02413050 +n02413131 +n02413484 +n02413593 +n02413717 +n02413824 +n02413917 +n02414043 +n02414209 +n02414290 +n02414442 +n02414578 +n02414763 +n02414904 +n02415130 +n02415253 +n02415435 +n02415577 +n02415829 +n02416104 +n02416519 +n02416820 +n02416880 +n02416964 +n02417070 +n02417242 +n02417387 +n02417534 +n02417663 +n02417785 +n02417914 +n02418064 +n02418465 +n02418770 +n02419056 +n02419336 +n02419634 +n02419796 +n02420509 +n02420828 +n02421136 +n02421449 +n02421792 +n02422106 +n02422391 +n02422699 +n02423022 +n02423218 +n02423362 +n02423589 +n02424085 +n02424305 +n02424486 +n02424589 +n02424695 +n02424909 +n02425086 +n02425228 +n02425532 +n02425887 +n02426176 +n02426481 +n02426813 +n02427032 +n02427183 +n02427470 +n02427576 +n02427724 +n02428089 +n02428349 +n02428508 +n02428842 +n02429456 +n02430045 +n02430559 +n02430643 +n02430748 +n02430830 +n02431122 +n02431337 +n02431441 +n02431542 +n02431628 +n02431785 +n02431976 +n02432291 +n02432511 +n02432704 +n02432983 +n02433318 +n02433546 +n02433729 +n02433925 +n02434190 +n02434415 +n02434712 +n02434954 +n02435216 +n02435517 +n02435853 +n02436224 +n02436353 +n02436645 +n02437136 +n02437312 +n02437482 +n02437616 +n02437971 +n02438173 +n02438272 +n02438580 +n02439033 +n02439398 +n02441326 +n02441942 +n02442172 +n02442336 +n02442446 +n02442572 +n02442668 +n02442845 +n02443015 +n02443114 +n02443346 +n02443484 +n02443808 +n02443959 +n02444251 +n02444819 +n02445004 +n02445171 +n02445394 +n02445715 +n02446206 +n02446352 +n02446645 +n02447021 +n02447366 +n02447762 +n02448060 +n02448318 +n02448633 +n02448885 +n02449183 +n02449350 +n02449699 +n02450034 +n02450295 +n02450426 +n02450561 +n02450677 +n02450829 +n02451125 +n02451415 +n02451575 +n02453108 +n02453611 +n02454379 +n02454794 +n02455135 +n02455428 +n02455720 +n02456008 +n02456275 +n02456962 +n02457408 +n02457945 +n02458135 +n02458517 +n02459190 +n02460009 +n02460451 +n02460817 +n02461128 +n02461830 +n02462213 +n02469248 +n02469472 +n02469914 +n02470238 +n02470325 +n02470709 +n02470899 +n02471300 +n02471762 +n02472293 +n02472987 +n02473307 +n02473554 +n02473720 +n02473857 +n02473983 +n02474110 +n02474282 +n02474605 +n02474777 +n02475078 +n02475358 +n02475669 +n02476219 +n02476567 +n02476870 +n02477028 +n02477187 +n02477329 +n02477516 +n02477782 +n02478239 +n02478875 +n02479332 +n02480153 +n02480495 +n02480855 +n02481103 +n02481235 +n02481366 +n02481500 +n02481823 +n02482060 +n02482286 +n02482474 +n02482650 +n02483092 +n02483362 +n02483708 +n02484322 +n02484473 +n02484975 +n02485225 +n02485371 +n02485536 +n02485688 +n02485988 +n02486261 +n02486410 +n02486657 +n02486908 +n02487079 +n02487347 +n02487547 +n02487675 +n02487847 +n02488003 +n02488291 +n02488415 +n02488702 +n02488894 +n02489166 +n02489589 +n02490219 +n02490597 +n02490811 +n02491107 +n02491329 +n02491474 +n02492035 +n02492356 +n02492660 +n02492948 +n02493224 +n02493509 +n02493793 +n02494079 +n02494383 +n02495242 +n02496052 +n02496913 +n02497673 +n02498153 +n02498743 +n02499022 +n02499316 +n02499568 +n02499808 +n02500267 +n02500596 +n02501583 +n02501923 +n02502006 +n02502514 +n02502807 +n02503127 +n02503517 +n02503756 +n02504013 +n02504458 +n02504770 +n02505063 +n02505238 +n02505485 +n02505998 +n02506947 +n02507148 +n02507649 +n02508021 +n02508213 +n02508346 +n02508742 +n02509197 +n02509515 +n02509815 +n02510455 +n02511730 +n02512053 +n02512752 +n02512830 +n02512938 +n02513248 +n02513355 +n02513560 +n02513727 +n02513805 +n02513939 +n02514041 +n02515214 +n02515713 +n02516188 +n02516776 +n02517442 +n02517938 +n02518324 +n02518622 +n02519148 +n02519340 +n02519472 +n02519686 +n02519862 +n02520147 +n02520525 +n02520810 +n02521646 +n02522399 +n02522637 +n02522722 +n02522866 +n02523110 +n02523427 +n02523877 +n02524202 +n02524524 +n02524659 +n02524928 +n02525382 +n02525703 +n02526121 +n02526425 +n02526818 +n02527057 +n02527271 +n02527622 +n02528163 +n02529293 +n02529772 +n02530052 +n02530188 +n02530421 +n02530637 +n02530831 +n02530999 +n02531114 +n02531625 +n02532028 +n02532272 +n02532451 +n02532602 +n02532786 +n02532918 +n02533209 +n02533545 +n02533834 +n02534165 +n02534559 +n02534734 +n02535080 +n02535163 +n02535258 +n02535537 +n02535759 +n02536165 +n02536456 +n02536864 +n02537085 +n02537319 +n02537525 +n02537716 +n02538010 +n02538216 +n02538406 +n02538562 +n02538985 +n02539424 +n02539573 +n02539894 +n02540412 +n02540983 +n02541257 +n02541687 +n02542017 +n02542432 +n02542958 +n02543255 +n02543565 +n02544274 +n02545841 +n02546028 +n02546331 +n02546627 +n02547014 +n02547733 +n02548247 +n02548689 +n02548884 +n02549248 +n02549376 +n02549989 +n02550203 +n02550460 +n02550655 +n02551134 +n02551668 +n02552171 +n02553028 +n02554730 +n02555863 +n02556373 +n02556846 +n02557182 +n02557318 +n02557591 +n02557749 +n02557909 +n02558206 +n02558860 +n02559144 +n02559383 +n02559862 +n02560110 +n02561108 +n02561381 +n02561514 +n02561661 +n02561803 +n02561937 +n02562315 +n02562796 +n02562971 +n02563079 +n02563182 +n02563648 +n02563792 +n02563949 +n02564270 +n02564403 +n02564720 +n02564935 +n02565072 +n02565324 +n02565573 +n02566109 +n02566489 +n02566665 +n02567334 +n02567633 +n02568087 +n02568447 +n02568959 +n02569484 +n02569631 +n02569905 +n02570164 +n02570484 +n02570838 +n02571167 +n02571652 +n02571810 +n02572196 +n02572484 +n02573249 +n02573704 +n02574271 +n02574910 +n02575325 +n02575590 +n02576223 +n02576575 +n02576906 +n02577041 +n02577164 +n02577403 +n02577662 +n02577952 +n02578233 +n02578454 +n02578771 +n02578928 +n02579303 +n02579557 +n02579762 +n02579928 +n02580336 +n02580679 +n02580830 +n02581108 +n02581482 +n02581642 +n02581957 +n02582220 +n02582349 +n02582721 +n02583567 +n02583890 +n02584145 +n02584449 +n02585872 +n02586238 +n02586543 +n02587051 +n02587300 +n02587479 +n02587618 +n02587877 +n02588286 +n02588794 +n02588945 +n02589062 +n02589196 +n02589316 +n02589623 +n02589796 +n02590094 +n02590495 +n02590702 +n02590987 +n02591330 +n02591613 +n02591911 +n02592055 +n02592371 +n02592734 +n02593019 +n02593191 +n02593453 +n02593679 +n02594250 +n02594942 +n02595056 +n02595339 +n02595702 +n02596067 +n02596252 +n02596381 +n02596720 +n02597004 +n02597367 +n02597608 +n02597818 +n02597972 +n02598134 +n02598573 +n02598878 +n02599052 +n02599347 +n02599557 +n02599958 +n02600298 +n02600503 +n02600798 +n02601344 +n02601767 +n02601921 +n02602059 +n02602405 +n02602760 +n02603317 +n02603540 +n02603862 +n02604157 +n02604480 +n02604954 +n02605316 +n02605703 +n02605936 +n02606052 +n02606384 +n02606751 +n02607072 +n02607201 +n02607470 +n02607862 +n02608284 +n02608547 +n02608860 +n02608996 +n02609302 +n02609823 +n02610066 +n02610373 +n02610664 +n02610980 +n02611561 +n02611898 +n02612167 +n02613181 +n02613572 +n02613820 +n02614140 +n02614482 +n02614653 +n02614978 +n02615298 +n02616128 +n02616397 +n02616851 +n02617537 +n02618094 +n02618513 +n02618827 +n02619165 +n02619550 +n02619861 +n02620167 +n02620578 +n02621258 +n02621908 +n02622249 +n02622547 +n02622712 +n02622955 +n02623445 +n02624167 +n02624551 +n02624807 +n02624987 +n02625258 +n02625612 +n02625851 +n02626089 +n02626265 +n02626471 +n02626762 +n02627037 +n02627292 +n02627532 +n02627835 +n02628062 +n02628259 +n02628600 +n02629230 +n02629716 +n02630281 +n02630615 +n02630739 +n02631041 +n02631330 +n02631475 +n02631628 +n02631775 +n02632039 +n02632494 +n02633422 +n02633677 +n02633977 +n02634545 +n02635154 +n02635580 +n02636170 +n02636405 +n02636550 +n02636854 +n02637179 +n02637475 +n02637977 +n02638596 +n02639087 +n02639605 +n02639922 +n02640242 +n02640626 +n02640857 +n02641379 +n02642107 +n02642644 +n02643112 +n02643316 +n02643566 +n02643836 +n02644113 +n02644360 +n02644501 +n02644665 +n02644817 +n02645538 +n02645691 +n02645953 +n02646667 +n02646892 +n02648035 +n02648625 +n02648916 +n02649218 +n02649546 +n02650050 +n02650413 +n02650541 +n02651060 +n02652132 +n02652668 +n02653145 +n02653497 +n02653786 +n02654112 +n02654425 +n02654745 +n02655020 +n02655523 +n02655848 +n02656032 +n02656301 +n02656670 +n02656969 +n02657368 +n02657694 +n02658079 +n02658531 +n02658811 +n02659176 +n02659478 +n02659808 +n02660091 +n02660208 +n02660519 +n02660640 +n02661017 +n02661473 +n02661618 +n02662239 +n02662397 +n02662559 +n02662825 +n02662993 +n02663211 +n02663485 +n02663849 +n02664285 +n02664642 +n02665250 +n02665985 +n02666196 +n02666501 +n02666624 +n02666943 +n02667093 +n02667244 +n02667379 +n02667478 +n02667576 +n02667693 +n02668393 +n02668613 +n02669295 +n02669442 +n02669534 +n02669723 +n02670186 +n02670382 +n02670683 +n02670935 +n02671780 +n02672152 +n02672371 +n02672831 +n02675077 +n02675219 +n02675522 +n02676097 +n02676261 +n02676566 +n02676670 +n02676938 +n02677028 +n02677136 +n02677436 +n02677718 +n02678010 +n02678384 +n02678897 +n02679142 +n02679257 +n02679961 +n02680110 +n02680512 +n02680638 +n02680754 +n02681392 +n02682311 +n02682407 +n02682569 +n02682811 +n02682922 +n02683183 +n02683323 +n02683454 +n02683558 +n02683791 +n02684248 +n02684356 +n02684515 +n02684649 +n02684962 +n02685082 +n02685253 +n02685365 +n02685701 +n02685995 +n02686121 +n02686227 +n02686379 +n02686568 +n02687172 +n02687423 +n02687682 +n02687821 +n02687992 +n02688273 +n02688443 +n02689144 +n02689274 +n02689434 +n02689748 +n02689819 +n02690373 +n02690715 +n02691156 +n02692086 +n02692232 +n02692513 +n02692680 +n02692877 +n02693246 +n02693413 +n02693540 +n02694045 +n02694279 +n02694426 +n02694662 +n02694966 +n02695627 +n02695762 +n02696165 +n02696246 +n02696569 +n02696843 +n02697022 +n02697221 +n02697576 +n02697675 +n02697876 +n02698244 +n02698473 +n02698634 +n02699494 +n02699629 +n02699770 +n02699915 +n02700064 +n02700258 +n02700895 +n02701002 +n02701260 +n02701730 +n02702989 +n02703124 +n02703275 +n02704645 +n02704792 +n02704949 +n02705201 +n02705429 +n02705944 +n02706221 +n02706806 +n02708093 +n02708224 +n02708433 +n02708555 +n02708711 +n02708885 +n02709101 +n02709367 +n02709637 +n02709763 +n02709908 +n02710044 +n02710201 +n02710324 +n02710429 +n02710600 +n02711237 +n02711780 +n02712545 +n02712643 +n02713003 +n02713218 +n02713364 +n02713496 +n02714315 +n02714535 +n02714751 +n02715229 +n02715513 +n02715712 +n02716626 +n02720048 +n02720576 +n02721813 +n02723165 +n02724722 +n02725872 +n02726017 +n02726210 +n02726305 +n02726681 +n02727016 +n02727141 +n02727426 +n02727825 +n02728440 +n02729222 +n02729837 +n02729965 +n02730265 +n02730568 +n02730930 +n02731251 +n02731398 +n02731629 +n02731900 +n02732072 +n02732572 +n02732827 +n02733213 +n02733524 +n02734725 +n02734835 +n02735268 +n02735361 +n02735538 +n02735688 +n02736396 +n02736798 +n02737351 +n02737660 +n02738031 +n02738271 +n02738449 +n02738535 +n02738741 +n02738859 +n02738978 +n02739123 +n02739427 +n02739550 +n02739668 +n02739889 +n02740061 +n02740300 +n02740533 +n02740764 +n02741367 +n02741475 +n02742070 +n02742194 +n02742322 +n02742468 +n02742753 +n02743426 +n02744323 +n02744844 +n02744961 +n02745492 +n02745611 +n02745816 +n02746008 +n02746225 +n02746365 +n02746595 +n02746683 +n02746978 +n02747063 +n02747177 +n02747672 +n02747802 +n02748183 +n02748359 +n02748491 +n02749169 +n02749292 +n02749479 +n02749670 +n02749790 +n02749953 +n02750070 +n02750169 +n02750320 +n02750652 +n02751067 +n02751215 +n02751295 +n02751490 +n02752199 +n02752496 +n02752615 +n02752810 +n02752917 +n02753044 +n02753394 +n02753710 +n02754103 +n02754656 +n02755140 +n02755352 +n02755529 +n02755675 +n02755823 +n02755984 +n02756098 +n02756854 +n02756977 +n02757061 +n02757337 +n02757462 +n02757714 +n02757810 +n02757927 +n02758134 +n02758490 +n02758863 +n02758960 +n02759257 +n02759387 +n02759700 +n02759963 +n02760099 +n02760199 +n02760298 +n02760429 +n02760658 +n02760855 +n02761034 +n02761206 +n02761392 +n02761557 +n02761696 +n02761834 +n02762169 +n02762371 +n02762508 +n02762725 +n02762909 +n02763083 +n02763198 +n02763306 +n02763604 +n02763714 +n02763901 +n02764044 +n02764398 +n02764505 +n02764614 +n02764779 +n02764935 +n02765028 +n02766168 +n02766320 +n02766534 +n02766792 +n02767038 +n02767147 +n02767433 +n02767665 +n02767956 +n02768114 +n02768226 +n02768433 +n02768655 +n02768973 +n02769075 +n02769290 +n02769669 +n02769748 +n02769963 +n02770078 +n02770211 +n02770585 +n02770721 +n02770830 +n02771004 +n02771166 +n02771286 +n02771547 +n02771750 +n02772101 +n02772435 +n02772554 +n02772700 +n02773037 +n02773838 +n02774152 +n02774630 +n02774921 +n02775039 +n02775178 +n02775483 +n02775689 +n02775813 +n02775897 +n02776007 +n02776205 +n02776505 +n02776631 +n02776825 +n02776978 +n02777100 +n02777292 +n02777402 +n02777638 +n02777734 +n02777927 +n02778131 +n02778294 +n02778456 +n02778588 +n02778669 +n02779435 +n02779609 +n02779719 +n02779971 +n02780315 +n02780445 +n02780588 +n02780704 +n02780815 +n02781121 +n02781213 +n02781338 +n02781517 +n02781764 +n02782093 +n02782432 +n02782602 +n02782681 +n02782778 +n02783035 +n02783161 +n02783324 +n02783459 +n02783900 +n02783994 +n02784124 +n02784998 +n02785648 +n02786058 +n02786198 +n02786331 +n02786463 +n02786611 +n02786736 +n02786837 +n02787120 +n02787269 +n02787435 +n02787622 +n02788021 +n02788148 +n02788386 +n02788462 +n02788572 +n02788689 +n02789487 +n02790669 +n02790823 +n02790996 +n02791124 +n02791270 +n02791532 +n02791665 +n02791795 +n02792409 +n02792552 +n02792948 +n02793089 +n02793199 +n02793296 +n02793414 +n02793495 +n02793684 +n02793842 +n02793930 +n02794008 +n02794156 +n02794368 +n02794474 +n02794664 +n02794779 +n02794972 +n02795169 +n02795528 +n02795670 +n02795783 +n02795978 +n02796207 +n02796318 +n02796412 +n02796623 +n02796995 +n02797295 +n02797535 +n02797692 +n02797881 +n02799071 +n02799175 +n02799323 +n02799897 +n02800213 +n02800497 +n02800675 +n02800940 +n02801047 +n02801184 +n02801450 +n02801525 +n02801823 +n02801938 +n02802215 +n02802426 +n02802544 +n02802721 +n02802990 +n02803349 +n02803539 +n02803666 +n02803809 +n02803934 +n02804123 +n02804252 +n02804414 +n02804515 +n02804610 +n02805283 +n02805845 +n02805983 +n02806088 +n02806379 +n02806530 +n02806762 +n02806875 +n02806992 +n02807133 +n02807523 +n02807616 +n02807731 +n02808185 +n02808304 +n02808440 +n02808829 +n02808968 +n02809105 +n02809241 +n02809364 +n02809491 +n02809605 +n02809736 +n02810139 +n02810270 +n02810471 +n02810782 +n02811059 +n02811204 +n02811350 +n02811468 +n02811618 +n02811719 +n02811936 +n02812201 +n02812342 +n02812631 +n02812785 +n02812949 +n02813252 +n02813399 +n02813544 +n02813645 +n02813752 +n02813981 +n02814116 +n02814338 +n02814428 +n02814533 +n02814774 +n02814860 +n02815478 +n02815749 +n02815834 +n02815950 +n02816494 +n02816656 +n02816768 +n02817031 +n02817251 +n02817386 +n02817516 +n02817650 +n02817799 +n02818135 +n02818254 +n02818687 +n02818832 +n02819697 +n02820085 +n02820210 +n02820556 +n02820675 +n02821202 +n02821415 +n02821543 +n02821627 +n02821943 +n02822064 +n02822220 +n02822399 +n02822579 +n02822762 +n02822865 +n02823124 +n02823335 +n02823428 +n02823510 +n02823586 +n02823750 +n02823848 +n02823964 +n02824058 +n02824152 +n02824319 +n02824448 +n02825153 +n02825240 +n02825442 +n02825657 +n02825872 +n02825961 +n02826068 +n02826259 +n02826459 +n02826589 +n02826683 +n02826812 +n02826886 +n02827148 +n02827606 +n02828115 +n02828299 +n02828427 +n02828884 +n02829246 +n02829353 +n02829510 +n02829596 +n02830157 +n02831237 +n02831335 +n02831595 +n02831724 +n02831894 +n02831998 +n02833040 +n02833140 +n02833275 +n02833403 +n02833793 +n02834027 +n02834397 +n02834506 +n02834642 +n02834778 +n02835271 +n02835412 +n02835551 +n02835724 +n02835829 +n02835915 +n02836035 +n02836174 +n02836268 +n02836392 +n02836513 +n02836607 +n02836900 +n02837134 +n02837567 +n02837789 +n02837887 +n02838014 +n02838178 +n02838345 +n02838577 +n02838728 +n02838958 +n02839110 +n02839351 +n02839592 +n02839910 +n02840134 +n02840245 +n02840515 +n02840619 +n02841063 +n02841187 +n02841315 +n02841506 +n02841641 +n02841847 +n02842133 +n02842573 +n02842809 +n02843029 +n02843158 +n02843276 +n02843465 +n02843553 +n02843684 +n02843777 +n02843909 +n02844056 +n02844214 +n02844307 +n02844714 +n02845130 +n02845293 +n02845985 +n02846141 +n02846260 +n02846511 +n02846619 +n02846733 +n02846874 +n02847461 +n02847631 +n02847852 +n02848118 +n02848216 +n02848523 +n02848806 +n02848921 +n02849154 +n02849885 +n02850060 +n02850358 +n02850732 +n02850950 +n02851099 +n02851795 +n02851939 +n02852043 +n02852173 +n02852360 +n02853016 +n02853218 +n02853336 +n02853745 +n02853870 +n02854378 +n02854532 +n02854630 +n02854739 +n02854926 +n02855089 +n02855390 +n02855701 +n02855793 +n02855925 +n02856013 +n02856237 +n02856362 +n02857365 +n02857477 +n02857644 +n02857907 +n02858304 +n02859184 +n02859343 +n02859443 +n02859557 +n02859729 +n02859955 +n02860415 +n02860640 +n02860847 +n02861022 +n02861147 +n02861286 +n02861387 +n02861509 +n02861658 +n02861777 +n02861886 +n02862048 +n02862916 +n02863014 +n02863176 +n02863340 +n02863426 +n02863536 +n02863638 +n02863750 +n02864122 +n02864504 +n02864593 +n02864987 +n02865351 +n02865665 +n02865931 +n02866106 +n02866386 +n02866578 +n02867401 +n02867592 +n02867715 +n02867966 +n02868240 +n02868429 +n02868546 +n02868638 +n02868975 +n02869155 +n02869249 +n02869563 +n02869737 +n02869837 +n02870526 +n02870676 +n02870772 +n02870880 +n02871005 +n02871147 +n02871314 +n02871439 +n02871525 +n02871631 +n02871824 +n02871963 +n02872333 +n02872529 +n02872752 +n02873520 +n02873623 +n02873733 +n02873839 +n02874086 +n02874214 +n02874336 +n02874442 +n02874537 +n02874642 +n02874750 +n02875436 +n02875626 +n02875948 +n02876084 +n02876326 +n02876457 +n02876657 +n02877266 +n02877513 +n02877642 +n02877765 +n02877962 +n02878107 +n02878222 +n02878425 +n02878534 +n02878628 +n02878796 +n02879087 +n02879309 +n02879422 +n02879517 +n02879718 +n02880189 +n02880393 +n02880546 +n02880842 +n02880940 +n02881193 +n02881546 +n02881757 +n02881906 +n02882190 +n02882301 +n02882483 +n02882647 +n02882894 +n02883004 +n02883101 +n02883205 +n02883344 +n02884225 +n02884450 +n02884859 +n02884994 +n02885108 +n02885233 +n02885338 +n02885462 +n02885882 +n02886321 +n02886434 +n02886599 +n02887079 +n02887209 +n02887489 +n02887832 +n02887970 +n02888270 +n02888429 +n02888569 +n02888898 +n02889425 +n02889646 +n02889856 +n02889996 +n02890188 +n02890351 +n02890513 +n02890662 +n02890804 +n02890940 +n02891188 +n02891788 +n02892201 +n02892304 +n02892392 +n02892499 +n02892626 +n02892767 +n02892948 +n02893269 +n02893418 +n02893608 +n02893692 +n02893941 +n02894024 +n02894158 +n02894337 +n02894605 +n02894847 +n02895008 +n02895154 +n02895328 +n02895438 +n02896074 +n02896294 +n02896442 +n02896694 +n02896856 +n02896949 +n02897097 +n02897389 +n02897820 +n02898093 +n02898173 +n02898269 +n02898369 +n02898585 +n02898711 +n02899439 +n02900160 +n02900459 +n02900594 +n02900705 +n02900857 +n02900987 +n02901114 +n02901259 +n02901377 +n02901481 +n02901620 +n02901793 +n02901901 +n02902079 +n02902687 +n02902816 +n02902916 +n02903006 +n02903126 +n02903204 +n02903727 +n02903852 +n02904109 +n02904233 +n02904505 +n02904640 +n02904803 +n02904927 +n02905036 +n02905152 +n02905886 +n02906734 +n02906963 +n02907082 +n02907296 +n02907391 +n02907656 +n02907873 +n02908123 +n02908217 +n02908773 +n02908951 +n02909053 +n02909165 +n02909285 +n02909706 +n02909870 +n02910145 +n02910241 +n02910353 +n02910542 +n02910701 +n02910864 +n02910964 +n02911332 +n02911485 +n02912065 +n02912319 +n02912557 +n02912894 +n02913152 +n02914991 +n02915904 +n02916065 +n02916179 +n02916350 +n02916936 +n02917067 +n02917377 +n02917521 +n02917607 +n02917742 +n02917964 +n02918112 +n02918330 +n02918455 +n02918595 +n02918831 +n02918964 +n02919148 +n02919308 +n02919414 +n02919648 +n02919792 +n02919890 +n02919976 +n02920083 +n02920164 +n02920259 +n02920369 +n02920503 +n02920658 +n02921029 +n02921195 +n02921292 +n02921406 +n02921592 +n02921756 +n02921884 +n02922159 +n02922292 +n02922461 +n02922578 +n02922798 +n02922877 +n02923129 +n02923535 +n02923682 +n02923915 +n02924116 +n02925009 +n02925107 +n02925385 +n02925519 +n02925666 +n02926426 +n02926591 +n02927053 +n02927161 +n02927764 +n02927887 +n02928049 +n02928299 +n02928413 +n02928608 +n02929184 +n02929289 +n02929462 +n02929582 +n02929923 +n02930080 +n02930214 +n02930339 +n02930645 +n02930766 +n02931013 +n02931148 +n02931294 +n02931417 +n02931836 +n02932019 +n02932400 +n02932523 +n02932693 +n02932891 +n02933112 +n02933340 +n02933462 +n02933649 +n02933750 +n02933990 +n02934168 +n02934451 +n02935017 +n02935387 +n02935490 +n02935658 +n02935891 +n02936176 +n02936281 +n02936402 +n02936570 +n02936714 +n02936921 +n02937010 +n02937336 +n02937958 +n02938218 +n02938321 +n02938886 +n02939185 +n02939763 +n02939866 +n02940289 +n02940385 +n02940570 +n02940706 +n02941095 +n02941228 +n02941845 +n02942015 +n02942147 +n02942349 +n02942460 +n02942699 +n02943241 +n02943465 +n02943686 +n02943871 +n02943964 +n02944075 +n02944146 +n02944256 +n02944459 +n02944579 +n02944826 +n02945161 +n02945813 +n02945964 +n02946127 +n02946270 +n02946348 +n02946509 +n02946753 +n02946824 +n02946921 +n02947212 +n02947660 +n02947818 +n02947977 +n02948072 +n02948293 +n02948403 +n02948557 +n02948834 +n02948942 +n02949084 +n02949202 +n02949356 +n02949542 +n02950018 +n02950120 +n02950186 +n02950256 +n02950482 +n02950632 +n02950826 +n02950943 +n02951358 +n02951585 +n02951703 +n02951843 +n02952109 +n02952237 +n02952374 +n02952485 +n02952585 +n02952674 +n02952798 +n02952935 +n02953056 +n02953197 +n02953455 +n02953552 +n02953673 +n02953850 +n02954163 +n02954340 +n02954938 +n02955065 +n02955247 +n02955540 +n02955767 +n02956393 +n02956699 +n02956795 +n02956883 +n02957008 +n02957135 +n02957252 +n02957427 +n02957755 +n02957862 +n02958343 +n02959942 +n02960352 +n02960690 +n02960903 +n02961035 +n02961225 +n02961451 +n02961544 +n02961947 +n02962061 +n02962200 +n02962414 +n02962843 +n02962938 +n02963159 +n02963302 +n02963503 +n02963692 +n02963821 +n02963987 +n02964075 +n02964196 +n02964295 +n02964634 +n02964843 +n02964934 +n02965024 +n02965122 +n02965216 +n02965300 +n02965529 +n02965783 +n02966068 +n02966193 +n02966545 +n02966687 +n02966786 +n02966942 +n02967081 +n02967170 +n02967294 +n02967407 +n02967540 +n02967626 +n02967782 +n02967991 +n02968074 +n02968210 +n02968333 +n02968473 +n02969010 +n02969163 +n02969323 +n02969527 +n02969634 +n02969886 +n02970408 +n02970534 +n02970685 +n02970849 +n02971167 +n02971356 +n02971473 +n02971579 +n02971691 +n02971940 +n02972397 +n02972714 +n02972934 +n02973017 +n02973236 +n02973805 +n02973904 +n02974003 +n02974348 +n02974454 +n02974565 +n02974697 +n02975212 +n02975589 +n02975994 +n02976123 +n02976249 +n02976350 +n02976455 +n02976552 +n02976641 +n02976815 +n02976939 +n02977058 +n02977330 +n02977438 +n02977619 +n02977936 +n02978055 +n02978205 +n02978367 +n02978478 +n02978753 +n02978881 +n02979074 +n02979186 +n02979290 +n02979399 +n02979516 +n02979836 +n02980036 +n02980203 +n02980441 +n02980625 +n02981024 +n02981198 +n02981321 +n02981565 +n02981792 +n02981911 +n02982232 +n02982416 +n02982515 +n02982599 +n02983072 +n02983189 +n02983357 +n02983507 +n02983904 +n02984061 +n02984203 +n02984469 +n02984699 +n02985137 +n02985606 +n02985828 +n02985963 +n02986066 +n02986160 +n02986348 +n02987047 +n02987379 +n02987492 +n02987706 +n02987823 +n02987950 +n02988066 +n02988156 +n02988304 +n02988486 +n02988679 +n02988963 +n02989099 +n02990373 +n02990758 +n02991048 +n02991302 +n02991847 +n02992032 +n02992211 +n02992368 +n02992529 +n02992795 +n02993194 +n02993368 +n02993546 +n02994573 +n02994743 +n02995345 +n02995871 +n02995998 +n02997391 +n02997607 +n02997910 +n02998003 +n02998107 +n02998563 +n02998696 +n02998841 +n02999138 +n02999410 +n02999936 +n03000134 +n03000247 +n03000530 +n03000684 +n03001115 +n03001282 +n03001540 +n03001627 +n03002096 +n03002210 +n03002341 +n03002555 +n03002711 +n03002816 +n03002948 +n03003091 +n03003633 +n03004275 +n03004409 +n03004531 +n03004620 +n03004713 +n03004824 +n03005033 +n03005147 +n03005285 +n03005515 +n03005619 +n03006626 +n03006788 +n03006903 +n03007130 +n03007297 +n03007444 +n03007591 +n03008177 +n03008817 +n03008976 +n03009111 +n03009269 +n03009794 +n03010473 +n03010656 +n03010795 +n03010915 +n03011018 +n03011355 +n03011741 +n03012013 +n03012159 +n03012373 +n03012499 +n03012644 +n03012734 +n03012897 +n03013006 +n03013438 +n03013580 +n03013850 +n03014440 +n03014705 +n03015149 +n03015254 +n03015478 +n03015631 +n03015851 +n03016209 +n03016389 +n03016609 +n03016737 +n03016868 +n03016953 +n03017070 +n03017168 +n03017698 +n03017835 +n03018209 +n03018349 +n03018614 +n03018712 +n03018848 +n03019198 +n03019304 +n03019434 +n03019685 +n03019806 +n03019938 +n03020034 +n03020416 +n03020692 +n03021228 +n03024064 +n03024233 +n03024333 +n03024518 +n03025070 +n03025165 +n03025250 +n03025886 +n03026506 +n03026907 +n03027001 +n03027108 +n03027250 +n03027505 +n03027625 +n03028079 +n03028596 +n03028785 +n03029066 +n03029197 +n03029296 +n03029445 +n03029925 +n03030262 +n03030353 +n03030557 +n03030880 +n03031012 +n03031152 +n03031422 +n03031756 +n03032252 +n03032453 +n03032811 +n03033267 +n03033362 +n03033986 +n03034244 +n03034405 +n03034516 +n03034663 +n03035252 +n03035510 +n03035715 +n03035832 +n03036022 +n03036149 +n03036244 +n03036341 +n03036469 +n03036701 +n03036866 +n03037108 +n03037228 +n03037404 +n03037590 +n03037709 +n03038041 +n03038281 +n03038480 +n03038685 +n03038870 +n03039015 +n03039259 +n03039353 +n03039493 +n03039827 +n03039947 +n03040229 +n03040376 +n03040836 +n03041114 +n03041265 +n03041449 +n03041632 +n03041810 +n03042139 +n03042384 +n03042490 +n03042697 +n03042829 +n03042984 +n03043173 +n03043274 +n03043423 +n03043693 +n03043798 +n03043958 +n03044671 +n03044801 +n03044934 +n03045074 +n03045228 +n03045337 +n03045698 +n03045800 +n03046029 +n03046133 +n03046257 +n03046802 +n03046921 +n03047052 +n03047171 +n03047690 +n03047799 +n03047941 +n03048883 +n03049066 +n03049326 +n03049457 +n03049782 +n03049924 +n03050026 +n03050453 +n03050546 +n03050655 +n03050864 +n03051041 +n03051249 +n03051396 +n03051540 +n03052464 +n03052917 +n03053047 +n03053976 +n03054491 +n03054605 +n03054901 +n03055159 +n03055418 +n03055670 +n03055857 +n03056097 +n03056215 +n03056288 +n03056493 +n03056583 +n03056873 +n03057021 +n03057541 +n03057636 +n03057724 +n03057841 +n03057920 +n03058107 +n03058603 +n03058949 +n03059103 +n03059236 +n03059366 +n03059685 +n03059934 +n03060728 +n03061050 +n03061211 +n03061345 +n03061505 +n03061674 +n03061819 +n03061893 +n03062015 +n03062122 +n03062245 +n03062336 +n03062651 +n03062798 +n03062985 +n03063073 +n03063199 +n03063338 +n03063485 +n03063599 +n03063689 +n03063834 +n03063968 +n03064250 +n03064350 +n03064562 +n03064758 +n03064935 +n03065243 +n03065424 +n03065708 +n03066232 +n03066359 +n03066464 +n03066849 +n03067093 +n03067212 +n03067339 +n03067518 +n03068181 +n03068998 +n03069752 +n03070059 +n03070193 +n03070396 +n03070587 +n03070854 +n03071021 +n03071160 +n03071288 +n03071552 +n03072056 +n03072201 +n03072440 +n03072682 +n03073296 +n03073384 +n03073545 +n03073694 +n03073977 +n03074380 +n03074855 +n03075097 +n03075248 +n03075370 +n03075500 +n03075634 +n03075768 +n03075946 +n03076411 +n03076623 +n03076708 +n03077442 +n03077616 +n03077741 +n03078287 +n03078506 +n03078670 +n03078802 +n03078995 +n03079136 +n03079230 +n03079494 +n03079616 +n03079741 +n03080309 +n03080497 +n03080633 +n03080731 +n03080904 +n03081859 +n03081986 +n03082127 +n03082280 +n03082450 +n03082656 +n03082807 +n03082979 +n03084420 +n03084834 +n03085013 +n03085219 +n03085333 +n03085602 +n03085781 +n03085915 +n03086183 +n03086457 +n03086580 +n03086670 +n03086868 +n03087069 +n03087245 +n03087366 +n03087521 +n03087643 +n03087816 +n03088389 +n03088580 +n03088707 +n03089477 +n03089624 +n03089753 +n03089879 +n03090000 +n03090172 +n03090437 +n03090710 +n03090856 +n03091044 +n03091223 +n03091374 +n03091907 +n03092053 +n03092166 +n03092314 +n03092476 +n03092656 +n03092883 +n03093427 +n03093792 +n03094159 +n03094503 +n03095699 +n03095965 +n03096439 +n03096960 +n03097362 +n03097535 +n03097673 +n03098140 +n03098515 +n03098688 +n03098806 +n03098959 +n03099147 +n03099274 +n03099454 +n03099622 +n03099771 +n03099945 +n03100240 +n03100346 +n03100490 +n03100897 +n03101156 +n03101302 +n03101375 +n03101517 +n03101664 +n03101796 +n03101986 +n03102371 +n03102516 +n03102654 +n03102859 +n03103128 +n03103396 +n03103563 +n03103904 +n03104019 +n03104512 +n03105088 +n03105214 +n03105306 +n03105467 +n03105645 +n03105810 +n03105974 +n03106722 +n03106898 +n03107046 +n03107488 +n03107716 +n03108455 +n03108624 +n03108759 +n03108853 +n03109033 +n03109150 +n03109253 +n03109693 +n03109881 +n03110202 +n03110669 +n03111041 +n03111177 +n03111296 +n03111690 +n03112240 +n03112719 +n03112869 +n03113152 +n03113505 +n03113657 +n03113835 +n03114041 +n03114236 +n03114379 +n03114504 +n03114743 +n03114839 +n03115014 +n03115180 +n03115400 +n03115663 +n03115762 +n03115897 +n03116008 +n03116163 +n03116530 +n03116767 +n03117199 +n03117642 +n03118346 +n03118969 +n03119203 +n03119396 +n03119510 +n03120198 +n03120491 +n03120778 +n03121040 +n03121190 +n03121298 +n03121431 +n03121897 +n03122073 +n03122202 +n03122295 +n03122748 +n03123553 +n03123666 +n03123809 +n03123917 +n03124043 +n03124170 +n03124313 +n03124474 +n03124590 +n03125057 +n03125588 +n03125729 +n03125870 +n03126090 +n03126385 +n03126580 +n03126707 +n03126927 +n03127024 +n03127203 +n03127408 +n03127531 +n03127747 +n03127925 +n03128085 +n03128248 +n03128427 +n03128519 +n03129001 +n03129471 +n03129636 +n03129753 +n03129848 +n03130066 +n03130233 +n03130563 +n03130761 +n03130866 +n03131193 +n03131574 +n03131669 +n03131967 +n03132076 +n03132261 +n03132438 +n03132666 +n03132776 +n03133050 +n03133415 +n03133878 +n03134118 +n03134232 +n03134394 +n03134739 +n03134853 +n03135030 +n03135532 +n03135656 +n03135788 +n03135917 +n03136051 +n03136254 +n03136369 +n03136504 +n03137473 +n03137579 +n03138128 +n03138217 +n03138344 +n03138669 +n03139089 +n03139464 +n03139640 +n03139998 +n03140126 +n03140292 +n03140431 +n03140546 +n03140652 +n03140771 +n03140900 +n03141065 +n03141327 +n03141455 +n03141612 +n03141702 +n03141823 +n03142099 +n03142205 +n03142325 +n03142431 +n03142679 +n03143400 +n03143572 +n03143754 +n03144156 +n03144873 +n03144982 +n03145147 +n03145277 +n03145384 +n03145522 +n03145719 +n03145843 +n03146219 +n03146342 +n03146449 +n03146560 +n03146687 +n03146777 +n03146846 +n03147084 +n03147156 +n03147280 +n03147509 +n03148324 +n03148518 +n03148727 +n03148808 +n03149135 +n03149401 +n03149686 +n03149810 +n03150232 +n03150511 +n03150661 +n03150795 +n03151077 +n03152303 +n03152951 +n03153246 +n03153585 +n03153948 +n03154073 +n03154316 +n03154446 +n03154616 +n03154745 +n03154895 +n03155178 +n03155502 +n03155915 +n03156071 +n03156279 +n03156405 +n03156767 +n03157348 +n03158186 +n03158414 +n03158668 +n03158796 +n03158885 +n03159535 +n03159640 +n03160001 +n03160186 +n03160309 +n03160740 +n03161016 +n03161450 +n03161893 +n03162297 +n03162460 +n03162556 +n03162714 +n03162818 +n03163222 +n03163381 +n03163488 +n03163798 +n03163973 +n03164192 +n03164344 +n03164605 +n03164722 +n03164929 +n03165096 +n03165211 +n03165466 +n03165616 +n03165823 +n03165955 +n03166120 +n03166514 +n03166600 +n03166685 +n03166809 +n03166951 +n03167153 +n03167978 +n03168107 +n03168217 +n03168543 +n03168663 +n03168774 +n03168933 +n03169063 +n03169176 +n03170292 +n03170459 +n03170635 +n03170872 +n03171228 +n03171356 +n03171635 +n03171910 +n03172038 +n03172738 +n03172965 +n03173270 +n03173387 +n03173929 +n03174079 +n03174450 +n03174731 +n03175081 +n03175189 +n03175301 +n03175457 +n03175604 +n03175843 +n03175983 +n03176238 +n03176386 +n03176594 +n03176763 +n03177059 +n03177165 +n03177708 +n03178000 +n03178173 +n03178430 +n03178538 +n03178674 +n03179701 +n03179910 +n03180011 +n03180384 +n03180504 +n03180732 +n03180865 +n03180969 +n03181293 +n03181667 +n03182140 +n03182232 +n03182912 +n03183080 +n03185868 +n03186199 +n03186285 +n03186818 +n03187037 +n03187153 +n03187268 +n03187595 +n03187751 +n03188290 +n03188531 +n03188725 +n03188871 +n03189083 +n03189311 +n03189818 +n03190458 +n03191286 +n03191451 +n03191561 +n03191776 +n03192543 +n03192907 +n03193107 +n03193260 +n03193423 +n03193597 +n03193754 +n03194170 +n03194297 +n03194812 +n03194992 +n03195332 +n03195485 +n03195799 +n03195959 +n03196062 +n03196217 +n03196324 +n03196598 +n03196990 +n03197201 +n03197337 +n03197446 +n03198223 +n03198500 +n03199358 +n03199488 +n03199647 +n03199775 +n03199901 +n03200231 +n03200357 +n03200539 +n03200701 +n03200906 +n03201035 +n03201208 +n03201529 +n03201638 +n03201776 +n03201895 +n03201996 +n03202354 +n03202481 +n03202760 +n03202940 +n03203089 +n03203806 +n03204134 +n03204306 +n03204436 +n03204558 +n03204955 +n03205143 +n03205304 +n03205458 +n03205574 +n03205669 +n03205903 +n03206023 +n03206158 +n03206282 +n03206405 +n03206602 +n03206718 +n03206908 +n03207305 +n03207548 +n03207630 +n03207743 +n03207835 +n03207941 +n03208556 +n03208938 +n03209359 +n03209477 +n03209666 +n03209910 +n03210245 +n03210372 +n03210552 +n03210683 +n03211117 +n03211413 +n03211616 +n03211789 +n03212114 +n03212247 +n03212406 +n03212811 +n03213014 +n03213361 +n03213538 +n03213715 +n03213826 +n03214253 +n03214450 +n03214582 +n03214966 +n03215076 +n03215191 +n03215337 +n03215508 +n03215749 +n03215930 +n03216199 +n03216402 +n03216562 +n03216710 +n03216828 +n03217653 +n03217739 +n03217889 +n03218198 +n03218446 +n03219010 +n03219135 +n03219483 +n03219612 +n03219859 +n03219966 +n03220095 +n03220237 +n03220513 +n03220692 +n03221059 +n03221351 +n03221540 +n03221720 +n03222176 +n03222318 +n03222516 +n03222722 +n03222857 +n03223162 +n03223299 +n03223441 +n03223553 +n03223686 +n03223923 +n03224490 +n03224603 +n03224753 +n03224893 +n03225108 +n03225458 +n03225616 +n03225777 +n03225988 +n03226090 +n03226254 +n03226375 +n03226538 +n03226880 +n03227010 +n03227184 +n03227317 +n03227721 +n03227856 +n03228016 +n03228254 +n03228365 +n03228533 +n03228692 +n03228796 +n03228967 +n03229115 +n03229244 +n03229526 +n03231160 +n03231368 +n03231819 +n03232309 +n03232417 +n03232543 +n03232815 +n03232923 +n03233123 +n03233624 +n03233744 +n03233905 +n03234164 +n03234952 +n03235042 +n03235180 +n03235327 +n03235796 +n03235979 +n03236093 +n03236217 +n03236423 +n03236580 +n03236735 +n03237212 +n03237340 +n03237416 +n03237639 +n03237839 +n03237992 +n03238131 +n03238286 +n03238586 +n03238762 +n03238879 +n03239054 +n03239259 +n03239607 +n03239726 +n03240140 +n03240683 +n03240892 +n03241093 +n03241335 +n03241496 +n03241903 +n03242120 +n03242264 +n03242390 +n03242506 +n03242995 +n03243218 +n03243625 +n03244047 +n03244231 +n03244388 +n03244775 +n03244919 +n03245271 +n03245421 +n03245724 +n03245889 +n03246197 +n03246312 +n03246454 +n03246653 +n03246933 +n03247083 +n03247351 +n03247495 +n03248835 +n03249342 +n03249569 +n03249956 +n03250089 +n03250279 +n03250405 +n03250588 +n03250847 +n03250952 +n03251100 +n03251280 +n03251533 +n03251766 +n03251932 +n03252231 +n03252324 +n03252422 +n03252637 +n03252787 +n03253071 +n03253187 +n03253279 +n03253714 +n03253796 +n03253886 +n03254046 +n03254189 +n03254374 +n03254625 +n03254737 +n03254862 +n03255030 +n03255167 +n03255322 +n03255488 +n03255899 +n03256032 +n03256166 +n03256472 +n03256631 +n03256788 +n03256928 +n03257065 +n03257210 +n03257586 +n03258192 +n03258330 +n03258456 +n03258577 +n03258905 +n03259009 +n03259280 +n03259401 +n03259505 +n03260206 +n03260504 +n03260733 +n03260849 +n03261019 +n03261263 +n03261395 +n03261603 +n03261776 +n03262072 +n03262248 +n03262519 +n03262717 +n03262809 +n03262932 +n03263076 +n03263338 +n03263640 +n03263758 +n03264906 +n03265032 +n03265754 +n03266195 +n03266371 +n03266620 +n03266749 +n03267113 +n03267468 +n03267696 +n03267821 +n03268142 +n03268311 +n03268645 +n03268790 +n03268918 +n03269073 +n03269203 +n03269401 +n03270165 +n03270695 +n03270854 +n03271030 +n03271260 +n03271376 +n03271574 +n03271765 +n03271865 +n03272010 +n03272125 +n03272239 +n03272383 +n03272562 +n03272810 +n03272940 +n03273061 +n03273551 +n03273740 +n03273913 +n03274265 +n03274435 +n03274561 +n03274796 +n03275125 +n03275311 +n03275566 +n03275681 +n03275864 +n03276179 +n03276696 +n03276839 +n03277004 +n03277149 +n03277459 +n03277602 +n03277771 +n03278248 +n03278914 +n03279153 +n03279364 +n03279508 +n03279804 +n03279918 +n03280216 +n03280394 +n03280644 +n03281145 +n03281524 +n03281673 +n03282060 +n03282295 +n03282401 +n03283221 +n03283413 +n03283827 +n03284308 +n03284482 +n03284743 +n03284886 +n03284981 +n03285578 +n03285730 +n03285912 +n03286572 +n03287351 +n03287733 +n03288003 +n03288500 +n03288643 +n03288742 +n03288886 +n03289660 +n03289985 +n03290096 +n03290195 +n03290653 +n03291413 +n03291551 +n03291741 +n03291819 +n03291963 +n03292085 +n03292362 +n03292475 +n03292603 +n03292736 +n03292960 +n03293095 +n03293741 +n03293863 +n03294048 +n03294604 +n03294833 +n03295012 +n03295140 +n03295246 +n03295928 +n03296081 +n03296217 +n03296328 +n03296478 +n03296963 +n03297103 +n03297226 +n03297495 +n03297644 +n03297735 +n03298089 +n03298352 +n03298716 +n03298858 +n03299406 +n03300216 +n03300443 +n03301175 +n03301291 +n03301389 +n03301568 +n03301833 +n03301940 +n03302671 +n03302790 +n03302938 +n03303217 +n03303669 +n03303831 +n03304197 +n03304323 +n03304465 +n03305300 +n03305522 +n03305953 +n03306385 +n03306869 +n03307037 +n03307573 +n03307792 +n03308152 +n03308481 +n03308614 +n03309110 +n03309356 +n03309465 +n03309687 +n03309808 +n03313333 +n03314227 +n03314378 +n03314608 +n03314780 +n03314884 +n03315644 +n03315805 +n03315990 +n03316105 +n03316406 +n03316873 +n03317233 +n03317510 +n03317673 +n03317788 +n03317889 +n03318136 +n03318294 +n03318865 +n03318983 +n03319167 +n03319457 +n03319576 +n03319745 +n03320046 +n03320262 +n03320421 +n03320519 +n03320845 +n03320959 +n03321103 +n03321419 +n03321563 +n03321843 +n03321954 +n03322570 +n03322704 +n03322836 +n03322940 +n03323096 +n03323211 +n03323319 +n03323703 +n03324629 +n03324814 +n03324928 +n03325088 +n03325288 +n03325403 +n03325584 +n03325691 +n03325941 +n03326073 +n03326371 +n03326475 +n03326660 +n03326795 +n03326948 +n03327133 +n03327234 +n03327553 +n03327691 +n03327841 +n03328201 +n03329302 +n03329536 +n03329663 +n03330002 +n03330665 +n03330792 +n03330947 +n03331077 +n03331244 +n03331599 +n03332005 +n03332173 +n03332271 +n03332393 +n03332591 +n03332784 +n03332989 +n03333129 +n03333252 +n03333349 +n03333610 +n03333711 +n03333851 +n03334017 +n03334291 +n03334382 +n03334492 +n03334912 +n03335030 +n03335333 +n03335461 +n03335846 +n03336168 +n03336282 +n03336575 +n03336742 +n03336839 +n03337140 +n03337383 +n03337494 +n03337822 +n03338287 +n03338821 +n03339296 +n03339529 +n03339643 +n03340009 +n03340723 +n03340923 +n03341035 +n03341153 +n03341297 +n03341606 +n03342015 +n03342127 +n03342262 +n03342432 +n03342657 +n03342863 +n03342961 +n03343047 +n03343234 +n03343354 +n03343560 +n03343737 +n03343853 +n03344305 +n03344393 +n03344509 +n03344642 +n03344784 +n03344935 +n03345487 +n03345837 +n03346135 +n03346289 +n03346455 +n03347037 +n03347472 +n03347617 +n03348142 +n03348868 +n03349020 +n03349296 +n03349367 +n03349469 +n03349599 +n03349771 +n03349892 +n03350204 +n03350352 +n03350456 +n03350602 +n03351151 +n03351262 +n03351434 +n03351979 +n03352232 +n03352366 +n03352628 +n03352961 +n03353281 +n03353951 +n03354207 +n03354903 +n03355468 +n03355768 +n03355925 +n03356038 +n03356279 +n03356446 +n03356559 +n03356858 +n03356982 +n03357081 +n03357267 +n03357716 +n03358172 +n03358380 +n03358726 +n03358841 +n03359137 +n03359285 +n03359436 +n03359566 +n03360133 +n03360300 +n03360431 +n03360622 +n03360731 +n03361109 +n03361297 +n03361380 +n03361550 +n03361683 +n03362639 +n03362771 +n03362890 +n03363363 +n03363549 +n03363749 +n03364008 +n03364156 +n03364599 +n03364937 +n03365231 +n03365374 +n03365592 +n03365991 +n03366464 +n03366721 +n03366823 +n03366974 +n03367059 +n03367321 +n03367410 +n03367545 +n03367875 +n03367969 +n03368048 +n03368352 +n03369276 +n03369407 +n03369512 +n03369866 +n03370387 +n03370646 +n03371875 +n03372029 +n03372549 +n03372822 +n03372933 +n03373237 +n03373611 +n03373943 +n03374102 +n03374282 +n03374372 +n03374473 +n03374570 +n03374649 +n03374838 +n03375171 +n03375329 +n03375575 +n03376159 +n03376279 +n03376595 +n03376771 +n03376938 +n03378005 +n03378174 +n03378342 +n03378442 +n03378593 +n03378765 +n03379051 +n03379204 +n03379343 +n03379719 +n03379828 +n03379989 +n03380301 +n03380647 +n03380724 +n03380867 +n03381126 +n03381231 +n03381450 +n03381565 +n03381776 +n03382104 +n03382292 +n03382413 +n03382533 +n03382708 +n03382856 +n03382969 +n03383099 +n03383211 +n03383378 +n03383468 +n03383562 +n03383821 +n03384167 +n03384352 +n03384891 +n03385295 +n03385557 +n03386011 +n03386343 +n03386544 +n03386726 +n03386870 +n03387323 +n03387653 +n03388043 +n03388183 +n03388323 +n03388549 +n03388711 +n03388990 +n03389611 +n03389761 +n03389889 +n03389983 +n03390075 +n03390327 +n03390673 +n03390786 +n03390983 +n03391301 +n03391613 +n03391770 +n03392648 +n03392741 +n03393017 +n03393199 +n03393324 +n03393761 +n03393912 +n03394149 +n03394272 +n03394480 +n03394649 +n03394916 +n03395256 +n03395401 +n03395514 +n03395859 +n03396074 +n03396580 +n03396654 +n03396997 +n03397087 +n03397266 +n03397412 +n03397532 +n03397947 +n03398153 +n03398228 +n03399579 +n03399677 +n03399761 +n03399971 +n03400231 +n03400972 +n03401129 +n03401279 +n03401721 +n03402188 +n03402369 +n03402511 +n03402785 +n03402941 +n03403643 +n03404012 +n03404149 +n03404251 +n03404360 +n03404449 +n03404900 +n03405111 +n03405265 +n03405595 +n03405725 +n03406759 +n03406966 +n03407369 +n03407865 +n03408054 +n03408264 +n03408340 +n03408444 +n03409297 +n03409393 +n03409591 +n03409920 +n03410022 +n03410147 +n03410303 +n03410423 +n03410571 +n03410740 +n03410938 +n03411079 +n03411208 +n03411339 +n03411927 +n03412058 +n03412220 +n03412387 +n03412511 +n03412906 +n03413124 +n03413264 +n03413428 +n03413684 +n03413828 +n03414029 +n03414162 +n03414676 +n03415252 +n03415486 +n03415626 +n03415749 +n03415868 +n03416094 +n03416489 +n03416640 +n03416775 +n03416900 +n03417042 +n03417202 +n03417345 +n03417749 +n03417970 +n03418158 +n03418242 +n03418402 +n03418618 +n03418749 +n03418915 +n03419014 +n03420345 +n03420801 +n03420935 +n03421117 +n03421324 +n03421485 +n03421669 +n03421768 +n03421960 +n03422072 +n03422484 +n03422589 +n03422771 +n03423099 +n03423224 +n03423306 +n03423479 +n03423568 +n03423719 +n03423877 +n03424204 +n03424325 +n03424489 +n03424630 +n03424862 +n03425241 +n03425325 +n03425413 +n03425595 +n03425769 +n03426134 +n03426285 +n03426462 +n03426574 +n03426871 +n03427202 +n03427296 +n03428090 +n03428226 +n03428349 +n03429003 +n03429137 +n03429288 +n03429682 +n03429771 +n03429914 +n03430091 +n03430313 +n03430418 +n03430551 +n03430959 +n03431243 +n03431570 +n03431745 +n03432061 +n03432129 +n03432360 +n03432509 +n03433247 +n03433637 +n03433877 +n03434188 +n03434285 +n03434830 +n03435593 +n03435743 +n03435991 +n03436075 +n03436182 +n03436417 +n03436549 +n03436656 +n03436772 +n03436891 +n03436990 +n03437184 +n03437295 +n03437430 +n03437581 +n03437741 +n03437829 +n03437941 +n03438071 +n03438257 +n03438661 +n03438780 +n03438863 +n03439348 +n03439631 +n03439814 +n03440216 +n03440682 +n03440876 +n03441112 +n03441345 +n03441465 +n03441582 +n03442288 +n03442487 +n03442597 +n03442756 +n03443005 +n03443149 +n03443371 +n03443543 +n03443912 +n03444034 +n03445326 +n03445617 +n03445777 +n03445924 +n03446070 +n03446268 +n03446832 +n03447075 +n03447358 +n03447447 +n03447721 +n03447894 +n03448031 +n03448590 +n03448696 +n03448956 +n03449217 +n03449309 +n03449451 +n03449564 +n03449858 +n03450230 +n03450516 +n03450734 +n03450881 +n03450974 +n03451120 +n03451253 +n03451365 +n03451711 +n03451798 +n03452267 +n03452449 +n03452594 +n03452741 +n03453231 +n03453320 +n03453443 +n03454110 +n03454211 +n03454442 +n03454536 +n03454707 +n03454885 +n03455355 +n03455488 +n03455642 +n03455802 +n03456024 +n03456186 +n03456299 +n03456447 +n03456548 +n03456665 +n03457008 +n03457451 +n03457686 +n03457902 +n03458271 +n03458422 +n03459328 +n03459591 +n03459775 +n03459914 +n03460040 +n03460147 +n03460297 +n03460455 +n03460899 +n03461288 +n03461385 +n03461651 +n03461882 +n03461988 +n03462110 +n03462315 +n03462747 +n03462972 +n03463185 +n03463381 +n03463666 +n03464053 +n03464467 +n03464628 +n03464952 +n03465040 +n03465151 +n03465320 +n03465426 +n03465500 +n03465605 +n03465718 +n03465818 +n03466162 +n03466493 +n03466600 +n03466839 +n03466947 +n03467068 +n03467254 +n03467380 +n03467517 +n03467796 +n03467887 +n03467984 +n03468570 +n03468696 +n03468821 +n03469031 +n03469175 +n03469493 +n03469832 +n03469903 +n03470005 +n03470222 +n03470387 +n03470629 +n03470948 +n03471030 +n03471190 +n03471347 +n03471779 +n03472232 +n03472535 +n03472672 +n03472796 +n03472937 +n03473078 +n03473227 +n03473465 +n03473817 +n03473966 +n03474167 +n03474352 +n03474779 +n03474896 +n03475581 +n03475674 +n03475823 +n03475961 +n03476083 +n03476313 +n03476542 +n03476684 +n03476991 +n03477143 +n03477303 +n03477410 +n03477512 +n03477773 +n03477902 +n03478589 +n03478756 +n03478907 +n03479121 +n03479266 +n03479397 +n03479502 +n03480579 +n03480719 +n03480973 +n03481172 +n03481521 +n03482001 +n03482128 +n03482252 +n03482405 +n03482523 +n03482877 +n03483086 +n03483230 +n03483316 +n03483531 +n03483637 +n03483823 +n03483971 +n03484083 +n03484487 +n03484576 +n03484809 +n03484931 +n03485198 +n03485309 +n03485407 +n03485575 +n03485794 +n03487090 +n03487331 +n03487444 +n03487533 +n03487642 +n03487774 +n03487886 +n03488111 +n03488188 +n03488438 +n03488603 +n03488784 +n03488887 +n03489048 +n03489162 +n03490006 +n03490119 +n03490324 +n03490449 +n03490649 +n03490784 +n03490884 +n03491032 +n03491724 +n03491988 +n03492087 +n03492250 +n03492542 +n03492922 +n03493219 +n03493792 +n03493911 +n03494278 +n03494537 +n03494706 +n03495039 +n03495258 +n03495570 +n03495671 +n03495941 +n03496183 +n03496296 +n03496486 +n03496612 +n03496892 +n03497100 +n03497352 +n03497657 +n03498441 +n03498536 +n03498662 +n03498781 +n03498866 +n03498962 +n03499354 +n03499468 +n03499907 +n03500090 +n03500209 +n03500295 +n03500389 +n03500457 +n03500557 +n03500699 +n03500838 +n03500971 +n03501152 +n03501288 +n03501520 +n03501614 +n03502200 +n03502331 +n03502509 +n03502777 +n03502897 +n03503097 +n03503233 +n03503358 +n03503477 +n03503567 +n03503718 +n03503997 +n03504205 +n03504293 +n03504723 +n03505015 +n03505133 +n03505383 +n03505504 +n03505667 +n03505764 +n03506028 +n03506184 +n03506370 +n03506560 +n03506727 +n03506880 +n03507241 +n03507458 +n03507658 +n03507963 +n03508101 +n03508485 +n03508881 +n03509394 +n03509608 +n03509843 +n03510072 +n03510244 +n03510384 +n03510487 +n03510583 +n03510866 +n03510987 +n03511175 +n03511333 +n03512030 +n03512147 +n03512452 +n03512624 +n03512911 +n03513137 +n03513376 +n03514129 +n03514340 +n03514451 +n03514693 +n03514894 +n03515338 +n03515934 +n03516266 +n03516367 +n03516647 +n03516844 +n03516996 +n03517509 +n03517647 +n03517760 +n03517899 +n03517982 +n03518135 +n03518230 +n03518305 +n03518445 +n03518631 +n03518829 +n03518943 +n03519081 +n03519226 +n03519387 +n03519674 +n03519848 +n03520493 +n03521076 +n03521431 +n03521544 +n03521675 +n03521771 +n03521899 +n03522003 +n03522100 +n03522634 +n03522863 +n03522990 +n03523134 +n03523398 +n03523506 +n03523987 +n03524150 +n03524287 +n03524425 +n03524574 +n03524745 +n03524976 +n03525074 +n03525252 +n03525454 +n03525693 +n03525827 +n03526062 +n03527149 +n03527444 +n03527565 +n03527675 +n03528100 +n03528263 +n03528523 +n03528901 +n03529175 +n03529444 +n03529629 +n03529860 +n03530189 +n03530511 +n03530642 +n03530910 +n03531281 +n03531447 +n03531546 +n03531691 +n03531982 +n03532342 +n03532672 +n03532919 +n03533014 +n03533392 +n03533486 +n03533654 +n03533845 +n03534580 +n03534695 +n03534776 +n03535024 +n03535284 +n03535647 +n03535780 +n03536122 +n03536568 +n03536761 +n03537085 +n03537241 +n03537412 +n03537550 +n03538037 +n03538179 +n03538300 +n03538406 +n03538542 +n03538634 +n03538817 +n03538957 +n03539103 +n03539293 +n03539433 +n03539546 +n03539678 +n03539754 +n03540090 +n03540267 +n03540476 +n03540595 +n03540914 +n03541091 +n03541269 +n03541393 +n03541537 +n03541696 +n03541923 +n03542333 +n03542605 +n03542727 +n03542860 +n03543012 +n03543112 +n03543254 +n03543394 +n03543511 +n03543603 +n03543735 +n03543945 +n03544143 +n03544238 +n03544360 +n03545150 +n03545470 +n03545585 +n03545756 +n03545961 +n03546112 +n03546235 +n03546340 +n03547054 +n03547229 +n03547397 +n03547530 +n03547861 +n03548086 +n03548195 +n03548320 +n03548402 +n03548533 +n03548626 +n03548930 +n03549199 +n03549350 +n03549473 +n03549589 +n03549732 +n03549897 +n03550153 +n03550289 +n03550420 +n03551084 +n03551395 +n03551582 +n03551790 +n03552001 +n03552449 +n03552749 +n03553019 +n03553248 +n03553486 +n03554375 +n03554460 +n03554645 +n03555006 +n03555217 +n03555426 +n03555564 +n03555662 +n03555862 +n03555996 +n03556173 +n03556679 +n03556811 +n03556992 +n03557270 +n03557360 +n03557590 +n03557692 +n03557840 +n03558007 +n03558176 +n03558404 +n03558633 +n03558739 +n03559373 +n03559531 +n03559999 +n03560430 +n03560860 +n03561047 +n03561169 +n03561573 +n03562565 +n03563200 +n03563460 +n03563710 +n03563967 +n03564849 +n03565288 +n03565565 +n03565710 +n03565830 +n03565991 +n03566193 +n03566329 +n03566555 +n03566730 +n03566860 +n03567066 +n03567635 +n03567788 +n03567912 +n03568117 +n03568818 +n03569014 +n03569174 +n03569293 +n03569494 +n03571280 +n03571439 +n03571625 +n03571853 +n03571942 +n03572107 +n03572205 +n03572321 +n03572631 +n03573574 +n03573848 +n03574243 +n03574416 +n03574555 +n03574816 +n03575958 +n03576215 +n03576443 +n03576955 +n03577090 +n03577312 +n03577474 +n03577672 +n03577818 +n03578055 +n03578251 +n03578656 +n03578981 +n03579538 +n03579982 +n03580518 +n03580615 +n03580845 +n03580990 +n03581125 +n03581531 +n03581897 +n03582508 +n03582959 +n03583419 +n03583621 +n03584254 +n03584400 +n03584829 +n03585073 +n03585337 +n03585438 +n03585551 +n03585682 +n03585778 +n03585875 +n03586219 +n03586631 +n03586911 +n03587205 +n03588216 +n03588841 +n03588951 +n03589313 +n03589513 +n03589672 +n03589791 +n03590306 +n03590475 +n03590588 +n03590841 +n03590932 +n03591116 +n03591313 +n03591592 +n03591798 +n03591901 +n03592245 +n03592669 +n03592773 +n03592931 +n03593122 +n03593222 +n03593526 +n03593862 +n03594010 +n03594148 +n03594277 +n03594523 +n03594734 +n03594945 +n03595055 +n03595264 +n03595409 +n03595523 +n03595614 +n03595860 +n03596099 +n03596285 +n03596543 +n03597147 +n03597317 +n03597916 +n03598151 +n03598299 +n03598385 +n03598515 +n03598646 +n03598783 +n03598930 +n03599486 +n03599964 +n03600285 +n03600475 +n03600722 +n03600977 +n03601442 +n03601638 +n03601840 +n03602081 +n03602194 +n03602365 +n03602686 +n03602790 +n03602883 +n03603442 +n03603594 +n03603722 +n03604156 +n03604311 +n03604400 +n03604536 +n03604629 +n03604763 +n03604843 +n03605417 +n03605504 +n03605598 +n03605722 +n03605915 +n03606106 +n03606251 +n03606347 +n03606465 +n03607029 +n03607186 +n03607527 +n03607659 +n03607923 +n03608504 +n03609147 +n03609235 +n03609397 +n03609542 +n03609786 +n03609959 +n03610098 +n03610418 +n03610524 +n03610682 +n03610836 +n03610992 +n03612010 +n03612814 +n03612965 +n03613294 +n03613592 +n03614007 +n03614383 +n03614532 +n03614782 +n03614887 +n03615300 +n03615406 +n03615563 +n03615655 +n03615790 +n03616091 +n03616225 +n03616428 +n03616763 +n03616979 +n03617095 +n03617312 +n03617480 +n03617594 +n03617834 +n03618101 +n03618339 +n03618546 +n03618678 +n03618797 +n03618982 +n03619050 +n03619196 +n03619275 +n03619396 +n03619650 +n03619793 +n03619890 +n03620052 +n03620353 +n03620967 +n03621049 +n03621377 +n03621694 +n03622058 +n03622401 +n03622526 +n03622839 +n03622931 +n03623198 +n03623338 +n03623556 +n03624134 +n03624400 +n03624767 +n03625355 +n03625539 +n03625646 +n03625943 +n03626115 +n03626272 +n03626418 +n03626502 +n03626760 +n03627232 +n03627954 +n03628071 +n03628215 +n03628421 +n03628511 +n03628728 +n03628831 +n03628984 +n03629100 +n03629231 +n03629520 +n03629643 +n03630262 +n03630383 +n03631177 +n03631811 +n03631922 +n03632100 +n03632577 +n03632729 +n03632852 +n03632963 +n03633091 +n03633341 +n03633632 +n03633886 +n03634034 +n03634899 +n03635032 +n03635108 +n03635330 +n03635516 +n03635668 +n03635932 +n03636248 +n03636649 +n03637027 +n03637181 +n03637318 +n03637480 +n03637787 +n03637898 +n03638014 +n03638180 +n03638623 +n03638743 +n03638883 +n03639077 +n03639230 +n03639497 +n03639675 +n03639880 +n03640850 +n03640988 +n03641569 +n03641947 +n03642144 +n03642341 +n03642444 +n03642573 +n03642806 +n03643149 +n03643253 +n03643491 +n03643737 +n03643907 +n03644073 +n03644378 +n03644858 +n03645011 +n03645168 +n03645290 +n03645577 +n03646020 +n03646148 +n03646296 +n03646809 +n03646916 +n03647423 +n03647520 +n03648219 +n03648431 +n03648667 +n03649003 +n03649161 +n03649288 +n03649674 +n03649797 +n03649909 +n03650551 +n03651388 +n03651605 +n03651843 +n03652100 +n03652389 +n03652729 +n03652826 +n03652932 +n03653110 +n03653220 +n03653454 +n03653583 +n03653740 +n03653833 +n03653975 +n03654576 +n03654826 +n03655072 +n03655470 +n03655720 +n03656484 +n03656957 +n03657121 +n03657239 +n03657511 +n03658102 +n03658185 +n03658635 +n03658858 +n03659292 +n03659686 +n03659809 +n03659950 +n03660124 +n03660562 +n03660909 +n03661043 +n03661340 +n03662301 +n03662452 +n03662601 +n03662719 +n03662887 +n03663433 +n03663531 +n03663910 +n03664159 +n03664675 +n03664840 +n03664943 +n03665232 +n03665366 +n03665851 +n03665924 +n03666238 +n03666362 +n03666591 +n03666917 +n03667060 +n03667235 +n03667552 +n03667664 +n03667829 +n03668067 +n03668279 +n03668488 +n03668803 +n03669245 +n03669534 +n03669886 +n03670208 +n03671914 +n03672521 +n03672827 +n03673027 +n03673270 +n03673450 +n03673767 +n03674270 +n03674440 +n03674731 +n03674842 +n03675076 +n03675235 +n03675445 +n03675558 +n03675907 +n03676087 +n03676483 +n03676623 +n03676759 +n03677115 +n03677682 +n03677766 +n03678558 +n03678729 +n03678879 +n03679384 +n03679712 +n03680248 +n03680355 +n03680512 +n03680734 +n03680858 +n03680942 +n03681477 +n03681813 +n03682380 +n03682487 +n03682877 +n03683079 +n03683341 +n03683457 +n03683606 +n03683708 +n03683995 +n03684143 +n03684224 +n03684489 +n03684611 +n03684740 +n03684823 +n03685307 +n03685486 +n03685640 +n03685820 +n03686130 +n03686363 +n03686470 +n03686924 +n03687137 +n03687928 +n03688066 +n03688192 +n03688405 +n03688504 +n03688605 +n03688707 +n03688832 +n03688943 +n03689157 +n03689570 +n03690168 +n03690279 +n03690473 +n03690851 +n03690938 +n03691459 +n03691817 +n03692004 +n03692136 +n03692272 +n03692379 +n03692522 +n03692842 +n03693293 +n03693474 +n03693707 +n03693860 +n03694196 +n03694356 +n03694639 +n03694761 +n03694949 +n03695122 +n03695452 +n03695616 +n03695753 +n03695857 +n03695957 +n03696065 +n03696301 +n03696445 +n03696568 +n03696746 +n03696909 +n03697007 +n03697366 +n03697552 +n03697812 +n03697913 +n03698123 +n03698226 +n03698360 +n03698604 +n03698723 +n03698815 +n03699280 +n03699591 +n03699754 +n03699975 +n03700963 +n03701191 +n03701391 +n03701640 +n03701790 +n03702248 +n03702440 +n03702582 +n03703075 +n03703203 +n03703463 +n03703590 +n03703730 +n03703862 +n03703945 +n03704549 +n03704834 +n03705379 +n03705808 +n03706229 +n03706415 +n03706653 +n03706939 +n03707171 +n03707372 +n03707597 +n03707766 +n03708036 +n03708425 +n03708843 +n03708962 +n03709206 +n03709363 +n03709545 +n03709644 +n03709823 +n03709960 +n03710079 +n03710193 +n03710294 +n03710421 +n03710528 +n03710637 +n03710721 +n03710937 +n03711044 +n03711711 +n03711999 +n03712111 +n03712337 +n03712444 +n03712887 +n03712981 +n03713069 +n03713151 +n03713436 +n03714235 +n03715114 +n03715275 +n03715386 +n03715669 +n03715892 +n03716228 +n03716887 +n03716966 +n03717131 +n03717285 +n03717447 +n03717622 +n03718212 +n03718335 +n03718458 +n03718581 +n03718699 +n03718789 +n03718935 +n03719053 +n03719343 +n03719560 +n03719743 +n03720005 +n03720163 +n03720665 +n03720891 +n03721047 +n03721252 +n03721384 +n03721590 +n03722007 +n03722288 +n03722646 +n03722944 +n03723153 +n03723267 +n03723439 +n03723781 +n03723885 +n03724066 +n03724176 +n03724417 +n03724538 +n03724623 +n03724756 +n03724870 +n03725035 +n03725506 +n03725600 +n03725717 +n03725869 +n03726116 +n03726233 +n03726371 +n03726516 +n03726760 +n03726993 +n03727067 +n03727465 +n03727605 +n03727837 +n03727946 +n03728437 +n03728982 +n03729131 +n03729308 +n03729402 +n03729482 +n03729647 +n03729826 +n03729951 +n03730153 +n03730334 +n03730494 +n03730655 +n03730788 +n03730893 +n03731019 +n03731483 +n03731695 +n03731882 +n03732020 +n03732114 +n03732458 +n03732543 +n03732658 +n03733131 +n03733281 +n03733465 +n03733547 +n03733644 +n03733805 +n03733925 +n03735637 +n03735963 +n03736064 +n03736147 +n03736269 +n03736372 +n03736470 +n03736970 +n03738066 +n03738241 +n03738472 +n03739518 +n03739693 +n03742019 +n03742115 +n03742238 +n03743016 +n03743279 +n03743902 +n03744276 +n03744684 +n03744840 +n03745146 +n03745487 +n03745571 +n03746005 +n03746155 +n03746330 +n03746486 +n03748162 +n03749504 +n03749634 +n03749807 +n03750206 +n03750437 +n03750614 +n03751065 +n03751269 +n03751458 +n03751590 +n03751757 +n03752071 +n03752185 +n03752398 +n03752922 +n03753077 +n03753514 +n03757604 +n03758089 +n03758220 +n03758894 +n03758992 +n03759243 +n03759432 +n03759661 +n03759954 +n03760310 +n03760671 +n03760944 +n03761084 +n03761588 +n03761731 +n03762238 +n03762332 +n03762434 +n03762602 +n03762982 +n03763727 +n03763968 +n03764276 +n03764606 +n03764736 +n03764822 +n03764995 +n03765128 +n03765467 +n03765561 +n03765934 +n03766044 +n03766218 +n03766322 +n03766508 +n03766600 +n03766697 +n03766935 +n03767112 +n03767203 +n03767459 +n03767745 +n03767966 +n03768132 +n03768683 +n03768823 +n03768916 +n03769610 +n03769722 +n03769881 +n03770085 +n03770224 +n03770316 +n03770439 +n03770520 +n03770679 +n03770834 +n03770954 +n03772077 +n03772269 +n03772584 +n03772674 +n03773035 +n03773504 +n03773835 +n03774327 +n03774461 +n03775071 +n03775199 +n03775388 +n03775546 +n03775636 +n03775747 +n03775847 +n03776167 +n03776460 +n03776877 +n03776997 +n03777126 +n03777568 +n03777754 +n03778459 +n03778817 +n03779000 +n03779128 +n03779246 +n03779370 +n03779884 +n03780047 +n03780799 +n03781055 +n03781244 +n03781467 +n03781594 +n03781683 +n03781787 +n03782006 +n03782190 +n03782794 +n03782929 +n03783304 +n03783430 +n03783575 +n03783873 +n03784139 +n03784270 +n03784793 +n03784896 +n03785016 +n03785142 +n03785237 +n03785499 +n03785721 +n03786096 +n03786194 +n03786313 +n03786621 +n03786715 +n03786901 +n03787032 +n03787523 +n03788047 +n03788195 +n03788365 +n03788498 +n03788601 +n03788914 +n03789171 +n03789400 +n03789603 +n03789794 +n03789946 +n03790230 +n03790512 +n03790755 +n03790953 +n03791053 +n03791235 +n03792048 +n03792334 +n03792526 +n03792782 +n03792972 +n03793489 +n03793850 +n03794056 +n03794136 +n03794798 +n03795123 +n03795269 +n03795758 +n03795976 +n03796181 +n03796401 +n03796522 +n03796605 +n03796848 +n03796974 +n03797062 +n03797182 +n03797264 +n03797390 +n03797896 +n03798061 +n03798442 +n03798610 +n03798982 +n03799113 +n03799240 +n03799375 +n03799610 +n03799876 +n03800371 +n03800485 +n03800563 +n03800772 +n03800933 +n03801353 +n03801533 +n03801671 +n03801760 +n03801880 +n03802007 +n03802228 +n03802393 +n03802643 +n03802800 +n03802973 +n03803116 +n03803284 +n03803780 +n03804211 +n03804744 +n03805180 +n03805280 +n03805374 +n03805503 +n03805725 +n03805933 +n03807334 +n03809211 +n03809312 +n03809603 +n03809686 +n03809802 +n03810412 +n03810952 +n03811295 +n03811444 +n03811847 +n03811965 +n03812263 +n03812382 +n03812789 +n03812924 +n03813078 +n03813176 +n03813946 +n03814528 +n03814639 +n03814727 +n03814817 +n03814906 +n03815149 +n03815278 +n03815482 +n03815615 +n03816005 +n03816136 +n03816394 +n03816530 +n03816849 +n03817191 +n03817331 +n03817522 +n03817647 +n03818001 +n03818343 +n03819047 +n03819336 +n03819448 +n03819595 +n03819994 +n03820154 +n03820318 +n03820728 +n03820950 +n03821145 +n03821424 +n03821518 +n03822171 +n03822361 +n03822504 +n03822656 +n03822767 +n03823111 +n03823216 +n03823312 +n03823673 +n03823906 +n03824197 +n03824284 +n03824381 +n03824589 +n03824713 +n03824999 +n03825080 +n03825271 +n03825442 +n03825673 +n03825788 +n03825913 +n03826039 +n03826186 +n03827420 +n03827536 +n03828020 +n03829340 +n03829857 +n03829954 +n03831203 +n03831382 +n03831757 +n03832144 +n03832673 +n03833907 +n03834040 +n03834472 +n03834604 +n03835197 +n03835729 +n03835941 +n03836062 +n03836451 +n03836602 +n03836906 +n03836976 +n03837422 +n03837606 +n03837698 +n03837869 +n03838024 +n03838298 +n03838748 +n03838899 +n03839172 +n03839276 +n03839424 +n03839671 +n03839795 +n03840327 +n03840681 +n03840823 +n03841011 +n03841143 +n03841290 +n03841666 +n03842012 +n03842156 +n03842276 +n03842377 +n03842585 +n03842754 +n03842986 +n03843092 +n03843316 +n03843438 +n03843555 +n03843883 +n03844045 +n03844233 +n03844550 +n03844673 +n03844815 +n03844965 +n03845107 +n03845190 +n03845990 +n03846100 +n03846234 +n03846431 +n03846677 +n03846772 +n03846970 +n03847471 +n03847823 +n03848033 +n03848168 +n03848348 +n03848537 +n03849275 +n03849412 +n03849679 +n03849814 +n03849943 +n03850053 +n03850245 +n03850492 +n03850613 +n03851341 +n03851787 +n03852280 +n03852544 +n03852688 +n03853291 +n03853924 +n03854065 +n03854421 +n03854506 +n03854722 +n03854815 +n03855214 +n03855333 +n03855464 +n03855604 +n03855756 +n03855908 +n03856012 +n03856335 +n03856465 +n03856728 +n03857026 +n03857156 +n03857291 +n03857687 +n03857828 +n03858085 +n03858183 +n03858418 +n03858533 +n03858837 +n03859000 +n03859170 +n03859280 +n03859495 +n03859608 +n03859958 +n03860234 +n03860404 +n03861048 +n03861271 +n03861430 +n03861596 +n03861842 +n03862379 +n03862676 +n03862862 +n03863108 +n03863262 +n03863657 +n03863783 +n03863923 +n03864139 +n03864356 +n03864692 +n03865288 +n03865371 +n03865557 +n03865820 +n03865949 +n03866082 +n03867854 +n03868044 +n03868242 +n03868324 +n03868406 +n03868643 +n03868763 +n03868863 +n03869838 +n03869976 +n03870105 +n03870290 +n03870546 +n03870672 +n03870980 +n03871083 +n03871371 +n03871524 +n03871628 +n03871724 +n03871860 +n03872016 +n03872167 +n03872273 +n03873416 +n03873699 +n03873848 +n03873996 +n03874138 +n03874293 +n03874487 +n03874599 +n03874823 +n03875218 +n03875806 +n03875955 +n03876111 +n03876231 +n03877351 +n03877472 +n03877674 +n03877845 +n03878066 +n03878211 +n03878294 +n03878418 +n03878511 +n03878674 +n03878828 +n03878963 +n03879456 +n03879705 +n03880032 +n03880129 +n03880323 +n03880531 +n03881305 +n03881404 +n03881534 +n03882611 +n03882960 +n03883054 +n03883385 +n03883524 +n03883664 +n03883773 +n03883944 +n03884397 +n03884554 +n03884639 +n03884778 +n03884926 +n03885028 +n03885194 +n03885293 +n03885410 +n03885535 +n03885669 +n03885788 +n03885904 +n03886053 +n03886641 +n03886762 +n03886940 +n03887185 +n03887330 +n03887512 +n03887697 +n03887899 +n03888022 +n03888257 +n03888605 +n03888808 +n03888998 +n03889397 +n03889503 +n03889626 +n03889726 +n03889871 +n03890093 +n03890233 +n03890358 +n03890514 +n03891051 +n03891251 +n03891332 +n03891538 +n03892178 +n03892425 +n03892557 +n03892728 +n03893935 +n03894051 +n03894379 +n03894677 +n03894933 +n03895038 +n03895170 +n03895866 +n03896103 +n03896233 +n03896419 +n03896526 +n03896628 +n03896984 +n03897130 +n03897634 +n03897943 +n03898129 +n03898271 +n03898395 +n03898633 +n03898787 +n03899100 +n03899612 +n03899768 +n03899933 +n03900028 +n03900194 +n03900301 +n03900393 +n03900979 +n03901229 +n03901338 +n03901750 +n03901974 +n03902125 +n03902220 +n03902482 +n03902756 +n03903133 +n03903290 +n03903424 +n03903733 +n03903868 +n03904060 +n03904183 +n03904433 +n03904657 +n03904782 +n03904909 +n03905361 +n03905540 +n03905730 +n03905947 +n03906106 +n03906224 +n03906463 +n03906590 +n03906789 +n03906894 +n03906997 +n03907475 +n03907654 +n03907908 +n03908111 +n03908204 +n03908456 +n03908618 +n03908714 +n03909020 +n03909160 +n03909406 +n03909516 +n03909658 +n03911406 +n03911513 +n03911658 +n03911767 +n03911866 +n03912218 +n03912821 +n03913343 +n03913930 +n03914106 +n03914337 +n03914438 +n03914583 +n03914831 +n03915118 +n03915320 +n03915437 +n03915900 +n03916031 +n03916289 +n03916385 +n03916470 +n03916720 +n03917048 +n03917198 +n03917327 +n03917814 +n03918074 +n03918480 +n03918737 +n03919096 +n03919289 +n03919430 +n03919808 +n03920288 +n03920384 +n03920641 +n03920737 +n03920867 +n03923379 +n03923564 +n03923692 +n03923918 +n03924069 +n03924407 +n03924532 +n03924679 +n03926148 +n03926412 +n03926876 +n03927091 +n03927299 +n03927539 +n03927792 +n03928116 +n03928589 +n03928814 +n03928994 +n03929091 +n03929202 +n03929443 +n03929660 +n03929855 +n03930229 +n03930313 +n03930431 +n03930515 +n03930630 +n03931044 +n03931765 +n03931885 +n03931980 +n03932080 +n03932670 +n03933391 +n03933933 +n03934042 +n03934229 +n03934311 +n03934565 +n03934656 +n03934890 +n03935116 +n03935234 +n03935335 +n03935883 +n03936269 +n03936466 +n03937543 +n03937835 +n03937931 +n03938037 +n03938244 +n03938401 +n03938522 +n03938725 +n03939062 +n03939178 +n03939281 +n03939440 +n03939565 +n03939677 +n03939844 +n03940256 +n03940894 +n03941013 +n03941231 +n03941417 +n03941586 +n03941684 +n03941887 +n03942028 +n03942600 +n03942813 +n03942920 +n03943115 +n03943266 +n03943623 +n03943714 +n03943833 +n03943920 +n03944024 +n03944138 +n03944341 +n03945459 +n03945615 +n03945817 +n03945928 +n03946076 +n03946162 +n03947111 +n03947343 +n03947466 +n03947798 +n03947888 +n03948242 +n03948459 +n03948830 +n03948950 +n03949145 +n03949317 +n03949761 +n03950228 +n03950359 +n03950537 +n03950647 +n03950899 +n03951068 +n03951213 +n03951453 +n03951800 +n03951971 +n03952150 +n03952576 +n03953020 +n03953416 +n03953901 +n03954393 +n03954731 +n03955296 +n03955489 +n03955809 +n03955941 +n03956157 +n03956331 +n03956531 +n03956623 +n03956785 +n03956922 +n03957315 +n03957420 +n03957762 +n03957991 +n03958227 +n03958338 +n03958630 +n03958752 +n03959014 +n03959123 +n03959227 +n03959701 +n03960374 +n03960490 +n03961394 +n03961630 +n03961711 +n03961828 +n03961939 +n03962525 +n03962685 +n03962852 +n03962932 +n03963028 +n03963198 +n03963294 +n03963483 +n03963645 +n03964495 +n03964611 +n03965456 +n03965907 +n03966206 +n03966325 +n03966582 +n03966751 +n03966976 +n03967270 +n03967396 +n03967562 +n03967942 +n03968293 +n03968479 +n03968581 +n03968728 +n03969510 +n03970156 +n03970363 +n03970546 +n03971218 +n03971321 +n03971960 +n03972146 +n03972372 +n03972524 +n03973003 +n03973285 +n03973402 +n03973520 +n03973628 +n03973839 +n03973945 +n03974070 +n03974915 +n03975035 +n03975657 +n03975788 +n03975926 +n03976105 +n03976268 +n03976467 +n03976657 +n03977158 +n03977266 +n03977430 +n03977592 +n03977966 +n03978421 +n03978575 +n03978686 +n03978815 +n03978966 +n03979377 +n03979492 +n03980026 +n03980478 +n03980874 +n03980986 +n03981094 +n03981340 +n03981566 +n03981760 +n03981924 +n03982232 +n03982331 +n03982430 +n03982642 +n03982767 +n03982895 +n03983396 +n03983499 +n03983612 +n03983712 +n03983928 +n03984125 +n03984234 +n03984381 +n03984643 +n03984759 +n03985069 +n03985232 +n03985441 +n03985881 +n03986071 +n03986224 +n03986355 +n03986562 +n03986704 +n03986857 +n03986949 +n03987266 +n03987376 +n03987674 +n03987865 +n03987990 +n03988170 +n03988758 +n03988926 +n03989199 +n03989349 +n03989447 +n03989665 +n03989777 +n03989898 +n03990474 +n03991062 +n03991202 +n03991321 +n03991443 +n03991646 +n03991837 +n03992325 +n03992436 +n03992509 +n03992703 +n03992975 +n03993053 +n03993180 +n03993403 +n03993703 +n03993878 +n03994008 +n03994297 +n03994417 +n03994614 +n03994757 +n03995018 +n03995265 +n03995372 +n03995535 +n03995661 +n03995856 +n03996004 +n03996145 +n03996416 +n03996849 +n03997274 +n03997484 +n03997875 +n03998194 +n03998333 +n03998673 +n03999064 +n03999160 +n03999621 +n03999992 +n04000311 +n04000480 +n04000592 +n04000716 +n04000998 +n04001132 +n04001265 +n04001397 +n04001499 +n04001661 +n04001845 +n04002262 +n04002371 +n04002629 +n04003241 +n04003359 +n04003856 +n04004099 +n04004210 +n04004475 +n04004767 +n04004990 +n04005197 +n04005630 +n04005912 +n04006067 +n04006227 +n04006330 +n04006411 +n04007415 +n04007664 +n04008385 +n04008634 +n04009552 +n04009801 +n04009923 +n04010057 +n04010779 +n04010927 +n04011827 +n04012084 +n04012482 +n04012665 +n04013060 +n04013176 +n04013600 +n04013729 +n04014297 +n04015204 +n04015786 +n04015908 +n04016240 +n04016479 +n04016576 +n04016684 +n04016846 +n04017571 +n04017807 +n04018155 +n04018399 +n04018667 +n04019101 +n04019335 +n04019541 +n04019696 +n04019881 +n04020087 +n04020298 +n04020744 +n04020912 +n04021028 +n04021164 +n04021362 +n04021503 +n04021704 +n04021798 +n04022332 +n04022434 +n04022708 +n04022866 +n04023021 +n04023119 +n04023249 +n04023422 +n04023695 +n04023962 +n04024137 +n04024274 +n04024862 +n04024983 +n04025508 +n04025633 +n04026053 +n04026180 +n04026417 +n04026813 +n04026918 +n04027023 +n04027367 +n04027706 +n04027820 +n04027935 +n04028074 +n04028221 +n04028315 +n04028581 +n04028764 +n04029416 +n04029647 +n04029734 +n04029913 +n04030054 +n04030161 +n04030274 +n04030414 +n04030518 +n04030846 +n04030965 +n04031884 +n04032509 +n04032603 +n04032936 +n04033287 +n04033425 +n04033557 +n04033801 +n04033901 +n04033995 +n04034262 +n04034367 +n04035231 +n04035634 +n04035748 +n04035836 +n04035912 +n04036155 +n04036303 +n04036776 +n04036963 +n04037076 +n04037220 +n04037298 +n04037443 +n04037873 +n04037964 +n04038231 +n04038338 +n04038440 +n04038727 +n04039041 +n04039209 +n04039381 +n04039742 +n04039848 +n04040247 +n04040373 +n04040540 +n04040759 +n04041069 +n04041243 +n04041408 +n04041544 +n04041747 +n04042076 +n04042204 +n04042358 +n04042632 +n04042795 +n04042985 +n04043168 +n04043411 +n04043733 +n04044307 +n04044498 +n04044716 +n04044955 +n04045085 +n04045255 +n04045397 +n04045644 +n04045787 +n04045941 +n04046091 +n04046277 +n04046400 +n04046590 +n04046974 +n04047139 +n04047401 +n04047733 +n04047834 +n04048441 +n04049303 +n04049405 +n04049585 +n04049753 +n04050066 +n04050313 +n04050600 +n04050933 +n04051269 +n04051439 +n04051549 +n04051705 +n04051825 +n04052235 +n04052346 +n04052442 +n04052658 +n04052757 +n04053508 +n04053677 +n04053767 +n04054361 +n04054566 +n04054670 +n04055180 +n04055447 +n04055700 +n04055861 +n04056073 +n04056180 +n04056413 +n04056932 +n04057047 +n04057215 +n04057435 +n04057673 +n04057846 +n04057981 +n04058096 +n04058239 +n04058486 +n04058594 +n04058721 +n04059157 +n04059298 +n04059399 +n04059516 +n04059947 +n04060198 +n04060448 +n04060647 +n04060904 +n04061681 +n04061793 +n04061969 +n04062179 +n04062428 +n04062644 +n04062807 +n04063154 +n04063373 +n04063868 +n04064213 +n04064401 +n04064747 +n04064862 +n04065272 +n04065464 +n04065789 +n04065909 +n04066023 +n04066270 +n04066388 +n04066476 +n04066767 +n04067143 +n04067231 +n04067353 +n04067472 +n04067658 +n04067818 +n04067921 +n04068441 +n04068601 +n04069166 +n04069276 +n04069434 +n04069582 +n04069777 +n04070003 +n04070207 +n04070415 +n04070545 +n04070727 +n04070964 +n04071102 +n04071263 +n04071393 +n04072193 +n04072551 +n04072960 +n04073425 +n04073948 +n04074185 +n04074963 +n04075291 +n04075468 +n04075715 +n04075813 +n04075916 +n04076052 +n04076284 +n04076713 +n04077430 +n04077594 +n04077734 +n04077889 +n04078002 +n04078574 +n04078955 +n04079106 +n04079244 +n04079603 +n04079933 +n04080138 +n04080454 +n04080705 +n04080833 +n04081281 +n04081699 +n04081844 +n04082344 +n04082562 +n04082710 +n04082886 +n04083113 +n04083309 +n04083649 +n04083800 +n04084517 +n04084682 +n04084889 +n04085017 +n04085574 +n04085873 +n04086066 +n04086273 +n04086446 +n04086663 +n04086794 +n04086937 +n04087126 +n04087432 +n04087709 +n04087826 +n04088229 +n04088343 +n04088441 +n04088696 +n04088797 +n04089152 +n04089376 +n04089666 +n04089836 +n04089976 +n04090263 +n04090548 +n04090781 +n04091097 +n04091466 +n04091584 +n04091693 +n04092168 +n04093157 +n04093223 +n04093625 +n04093775 +n04093915 +n04094060 +n04094250 +n04094438 +n04094608 +n04094720 +n04094859 +n04095109 +n04095210 +n04095342 +n04095577 +n04095938 +n04096066 +n04096733 +n04096848 +n04097085 +n04097373 +n04097622 +n04097760 +n04097866 +n04098169 +n04098260 +n04098399 +n04098513 +n04098795 +n04099003 +n04099175 +n04099429 +n04099969 +n04100174 +n04100519 +n04101375 +n04101497 +n04101701 +n04101860 +n04102037 +n04102162 +n04102285 +n04102406 +n04102618 +n04102760 +n04102872 +n04102962 +n04103094 +n04103206 +n04103364 +n04103665 +n04103769 +n04103918 +n04104147 +n04104384 +n04104500 +n04104770 +n04104925 +n04105068 +n04105438 +n04105704 +n04105893 +n04107598 +n04107743 +n04107984 +n04108268 +n04108822 +n04108999 +n04110068 +n04110178 +n04110281 +n04110439 +n04110654 +n04110841 +n04110955 +n04111190 +n04111414 +n04111531 +n04111668 +n04111962 +n04112147 +n04112252 +n04112430 +n04112579 +n04112654 +n04112752 +n04112921 +n04113038 +n04113194 +n04113316 +n04113406 +n04113641 +n04113765 +n04113968 +n04114069 +n04114301 +n04114428 +n04114719 +n04114844 +n04114996 +n04115144 +n04115256 +n04115456 +n04115542 +n04115802 +n04115996 +n04116098 +n04116294 +n04116389 +n04116512 +n04117216 +n04117464 +n04117639 +n04118021 +n04118538 +n04118635 +n04118776 +n04119091 +n04119230 +n04119360 +n04119478 +n04119630 +n04119751 +n04120489 +n04120695 +n04120842 +n04121228 +n04121342 +n04121426 +n04121511 +n04121728 +n04122262 +n04122349 +n04122492 +n04122578 +n04122685 +n04122825 +n04123026 +n04123123 +n04123228 +n04123317 +n04123448 +n04123567 +n04123740 +n04124098 +n04124202 +n04124370 +n04124488 +n04124573 +n04124887 +n04125021 +n04125116 +n04125257 +n04125541 +n04125692 +n04125853 +n04126066 +n04126244 +n04126541 +n04126659 +n04126852 +n04126980 +n04127117 +n04127249 +n04127395 +n04127521 +n04127633 +n04127904 +n04128413 +n04128499 +n04128710 +n04128837 +n04129490 +n04129688 +n04129766 +n04130143 +n04130257 +n04130566 +n04130907 +n04131015 +n04131113 +n04131208 +n04131368 +n04131499 +n04131690 +n04131811 +n04131929 +n04132158 +n04132465 +n04132603 +n04132829 +n04132985 +n04133114 +n04133789 +n04134008 +n04134170 +n04134523 +n04134632 +n04135024 +n04135118 +n04135315 +n04135710 +n04135933 +n04136045 +n04136161 +n04136333 +n04136510 +n04136800 +n04137089 +n04137217 +n04137355 +n04137444 +n04137773 +n04137897 +n04138131 +n04138261 +n04138869 +n04138977 +n04139140 +n04139395 +n04139859 +n04140064 +n04140539 +n04140631 +n04140777 +n04140853 +n04141076 +n04141198 +n04141327 +n04141712 +n04141838 +n04141975 +n04142175 +n04142327 +n04142434 +n04142731 +n04142999 +n04143140 +n04143365 +n04143897 +n04144241 +n04144539 +n04144651 +n04145863 +n04146050 +n04146343 +n04146504 +n04146614 +n04146862 +n04146976 +n04147183 +n04147291 +n04147495 +n04147793 +n04147916 +n04148054 +n04148285 +n04148464 +n04148579 +n04148703 +n04149083 +n04149374 +n04149813 +n04150153 +n04150273 +n04150371 +n04150980 +n04151108 +n04151581 +n04151940 +n04152387 +n04152593 +n04153025 +n04153330 +n04153751 +n04154152 +n04154340 +n04154565 +n04154753 +n04154854 +n04154938 +n04155068 +n04155177 +n04155457 +n04155625 +n04155735 +n04155889 +n04156040 +n04156140 +n04156297 +n04156411 +n04156591 +n04156814 +n04156946 +n04157099 +n04157320 +n04158002 +n04158138 +n04158250 +n04158672 +n04158807 +n04158956 +n04160036 +n04160261 +n04160372 +n04160586 +n04160847 +n04161010 +n04161358 +n04161981 +n04162433 +n04162706 +n04163530 +n04164002 +n04164199 +n04164406 +n04164757 +n04164868 +n04165409 +n04165675 +n04165945 +n04166111 +n04166281 +n04166436 +n04167346 +n04167489 +n04167661 +n04168084 +n04168199 +n04168472 +n04168541 +n04168840 +n04169437 +n04169597 +n04170037 +n04170384 +n04170515 +n04170694 +n04170933 +n04171208 +n04171459 +n04171629 +n04171831 +n04172107 +n04172230 +n04172342 +n04172512 +n04172607 +n04172776 +n04172904 +n04173046 +n04173172 +n04173511 +n04173907 +n04174026 +n04174101 +n04174234 +n04174500 +n04174705 +n04175039 +n04175147 +n04175574 +n04176068 +n04176190 +n04176295 +n04176528 +n04177041 +n04177329 +n04177545 +n04177654 +n04177755 +n04177820 +n04177931 +n04178190 +n04178329 +n04178668 +n04179126 +n04179712 +n04179824 +n04179913 +n04180063 +n04180229 +n04180888 +n04181083 +n04181228 +n04181561 +n04181718 +n04182152 +n04182322 +n04183217 +n04183329 +n04183957 +n04184095 +n04184316 +n04184435 +n04184600 +n04184880 +n04185071 +n04185529 +n04185804 +n04185946 +n04186051 +n04186268 +n04186455 +n04186624 +n04186848 +n04187061 +n04187233 +n04187547 +n04187751 +n04187885 +n04187970 +n04188064 +n04188179 +n04189092 +n04189282 +n04189651 +n04189816 +n04190052 +n04190376 +n04190464 +n04190747 +n04190997 +n04191150 +n04191595 +n04191943 +n04192238 +n04192361 +n04192521 +n04192698 +n04192858 +n04193179 +n04193377 +n04193742 +n04193883 +n04194009 +n04194127 +n04194289 +n04196080 +n04196502 +n04196803 +n04196925 +n04197110 +n04197391 +n04197781 +n04197878 +n04198015 +n04198233 +n04198355 +n04198453 +n04198562 +n04198722 +n04198797 +n04199027 +n04200000 +n04200258 +n04200537 +n04200800 +n04200908 +n04201064 +n04201297 +n04201733 +n04202142 +n04202282 +n04202417 +n04203356 +n04204081 +n04204238 +n04204347 +n04204755 +n04205062 +n04205318 +n04205505 +n04205613 +n04206070 +n04206225 +n04206356 +n04206570 +n04206790 +n04207151 +n04207343 +n04207596 +n04207763 +n04207903 +n04208065 +n04208210 +n04208427 +n04208582 +n04208760 +n04208936 +n04209133 +n04209239 +n04209509 +n04209613 +n04209811 +n04210012 +n04210120 +n04210288 +n04210390 +n04210591 +n04210858 +n04211001 +n04211219 +n04211356 +n04211528 +n04211857 +n04211970 +n04212165 +n04212282 +n04212467 +n04212810 +n04213105 +n04213264 +n04213353 +n04213530 +n04214046 +n04214282 +n04214413 +n04214649 +n04215153 +n04215402 +n04215588 +n04215800 +n04215910 +n04216634 +n04216860 +n04216963 +n04217387 +n04217546 +n04217718 +n04217882 +n04218564 +n04218921 +n04219185 +n04219424 +n04219580 +n04220250 +n04220805 +n04221076 +n04221673 +n04221823 +n04222210 +n04222307 +n04222470 +n04222723 +n04222847 +n04223066 +n04223170 +n04223299 +n04224395 +n04224543 +n04224842 +n04225031 +n04225222 +n04225729 +n04225987 +n04226322 +n04226464 +n04226537 +n04226826 +n04226962 +n04227050 +n04227144 +n04227519 +n04227787 +n04227900 +n04228054 +n04228215 +n04228422 +n04228581 +n04228693 +n04229007 +n04229107 +n04229480 +n04229620 +n04229737 +n04229816 +n04229959 +n04230387 +n04230487 +n04230603 +n04230707 +n04230808 +n04231272 +n04231693 +n04231905 +n04232153 +n04232312 +n04232437 +n04232800 +n04233027 +n04233124 +n04233295 +n04233715 +n04233832 +n04234160 +n04234260 +n04234455 +n04234670 +n04234763 +n04234887 +n04235291 +n04235646 +n04235771 +n04235860 +n04236001 +n04236377 +n04236702 +n04236809 +n04236935 +n04237174 +n04237287 +n04237423 +n04238128 +n04238321 +n04238617 +n04238763 +n04238953 +n04239074 +n04239218 +n04239333 +n04239436 +n04239639 +n04239786 +n04239900 +n04240434 +n04240752 +n04240867 +n04241042 +n04241249 +n04241394 +n04241573 +n04242084 +n04242315 +n04242408 +n04242587 +n04242704 +n04243003 +n04243142 +n04243251 +n04243546 +n04243941 +n04244379 +n04244847 +n04244997 +n04245218 +n04245412 +n04245508 +n04245847 +n04246060 +n04246271 +n04246459 +n04246731 +n04246855 +n04247011 +n04247440 +n04247544 +n04247630 +n04247736 +n04247876 +n04248209 +n04248396 +n04248507 +n04248851 +n04249415 +n04249582 +n04249882 +n04250224 +n04250473 +n04250599 +n04250692 +n04250850 +n04251144 +n04251701 +n04251791 +n04252077 +n04252225 +n04252331 +n04252560 +n04252653 +n04253057 +n04253168 +n04253304 +n04253931 +n04254009 +n04254120 +n04254450 +n04254680 +n04254777 +n04255163 +n04255346 +n04255499 +n04255586 +n04255670 +n04255768 +n04255899 +n04256318 +n04256520 +n04256758 +n04256891 +n04257223 +n04257684 +n04257790 +n04257986 +n04258138 +n04258333 +n04258438 +n04258618 +n04258732 +n04258859 +n04259202 +n04259468 +n04259630 +n04260192 +n04260364 +n04260589 +n04261116 +n04261281 +n04261369 +n04261506 +n04261638 +n04261767 +n04261868 +n04262161 +n04262530 +n04262678 +n04262869 +n04263257 +n04263336 +n04263502 +n04263760 +n04263950 +n04264134 +n04264233 +n04264361 +n04264485 +n04264628 +n04264765 +n04264914 +n04265275 +n04265428 +n04265904 +n04266014 +n04266162 +n04266375 +n04266486 +n04266849 +n04266968 +n04267091 +n04267165 +n04267246 +n04267435 +n04267577 +n04267985 +n04268142 +n04268275 +n04268418 +n04268565 +n04268799 +n04269086 +n04269270 +n04269502 +n04269668 +n04269822 +n04269944 +n04270147 +n04270371 +n04270576 +n04270891 +n04271148 +n04271531 +n04271793 +n04271891 +n04272054 +n04272389 +n04272782 +n04272928 +n04273064 +n04273285 +n04273569 +n04273659 +n04273796 +n04273972 +n04274686 +n04274985 +n04275093 +n04275175 +n04275283 +n04275548 +n04275661 +n04275904 +n04277352 +n04277493 +n04277669 +n04277826 +n04278247 +n04278353 +n04278447 +n04278605 +n04278932 +n04279063 +n04279172 +n04279353 +n04279462 +n04279858 +n04279987 +n04280259 +n04280373 +n04280487 +n04280845 +n04280970 +n04281260 +n04281375 +n04281571 +n04281998 +n04282231 +n04282494 +n04282872 +n04282992 +n04283096 +n04283255 +n04283378 +n04283585 +n04283784 +n04283905 +n04284002 +n04284341 +n04284438 +n04284572 +n04284869 +n04285008 +n04285146 +n04285622 +n04285803 +n04285965 +n04286128 +n04286575 +n04286960 +n04287351 +n04287451 +n04287747 +n04287898 +n04287986 +n04288165 +n04288272 +n04288533 +n04288673 +n04289027 +n04289195 +n04289449 +n04289576 +n04289690 +n04289827 +n04290079 +n04290259 +n04290507 +n04290615 +n04290762 +n04291069 +n04291242 +n04291759 +n04291992 +n04292080 +n04292221 +n04292414 +n04292572 +n04292921 +n04293119 +n04293258 +n04293744 +n04294212 +n04294426 +n04294614 +n04294879 +n04295081 +n04295353 +n04295571 +n04295777 +n04295881 +n04296562 +n04297098 +n04297750 +n04297847 +n04298053 +n04298661 +n04298765 +n04299215 +n04299370 +n04299963 +n04300358 +n04300509 +n04300643 +n04301000 +n04301242 +n04301474 +n04301760 +n04302200 +n04302863 +n04302988 +n04303095 +n04303258 +n04303357 +n04303497 +n04304215 +n04304375 +n04304680 +n04305016 +n04305210 +n04305323 +n04305471 +n04305572 +n04305947 +n04306080 +n04306592 +n04306847 +n04307419 +n04307767 +n04307878 +n04307986 +n04308084 +n04308273 +n04308397 +n04308583 +n04308807 +n04308915 +n04309049 +n04309348 +n04309548 +n04309833 +n04310018 +n04310157 +n04310507 +n04310604 +n04310721 +n04310904 +n04311004 +n04311174 +n04311595 +n04312020 +n04312154 +n04312432 +n04312654 +n04312756 +n04312916 +n04313220 +n04313503 +n04313628 +n04314107 +n04314216 +n04314522 +n04314632 +n04314914 +n04315342 +n04315713 +n04315828 +n04315948 +n04316498 +n04316815 +n04316924 +n04317063 +n04317175 +n04317325 +n04317420 +n04317833 +n04317976 +n04318131 +n04318787 +n04318892 +n04318982 +n04319545 +n04319774 +n04319937 +n04320405 +n04320598 +n04320871 +n04320973 +n04321121 +n04321453 +n04322026 +n04322531 +n04322692 +n04322801 +n04323519 +n04323819 +n04324120 +n04324297 +n04324387 +n04324515 +n04325041 +n04325208 +n04325704 +n04325804 +n04325968 +n04326547 +n04326676 +n04326799 +n04326896 +n04327204 +n04327544 +n04327682 +n04328054 +n04328186 +n04328329 +n04328580 +n04328703 +n04328946 +n04329477 +n04329681 +n04329834 +n04329958 +n04330109 +n04330189 +n04330267 +n04330340 +n04330669 +n04330746 +n04330896 +n04330998 +n04331277 +n04331443 +n04331639 +n04331765 +n04331892 +n04332074 +n04332243 +n04332580 +n04332987 +n04333129 +n04333869 +n04334105 +n04334365 +n04334504 +n04334599 +n04335209 +n04335435 +n04335693 +n04335886 +n04336792 +n04337157 +n04337287 +n04337503 +n04337650 +n04338517 +n04338963 +n04339062 +n04339191 +n04339638 +n04339879 +n04340019 +n04340521 +n04340750 +n04340935 +n04341133 +n04341288 +n04341414 +n04341686 +n04343511 +n04343630 +n04343740 +n04344003 +n04344734 +n04344873 +n04345028 +n04345201 +n04345787 +n04346003 +n04346157 +n04346328 +n04346428 +n04346511 +n04346679 +n04346855 +n04347119 +n04347519 +n04347754 +n04348070 +n04348184 +n04348359 +n04348988 +n04349189 +n04349306 +n04349401 +n04349913 +n04350104 +n04350235 +n04350458 +n04350581 +n04350688 +n04350769 +n04350905 +n04351550 +n04351699 +n04353573 +n04354026 +n04354182 +n04354387 +n04354487 +n04354589 +n04355115 +n04355267 +n04355338 +n04355511 +n04355684 +n04355821 +n04355933 +n04356056 +n04356595 +n04356772 +n04356925 +n04357121 +n04357314 +n04357531 +n04357930 +n04358117 +n04358256 +n04358491 +n04358707 +n04358874 +n04359034 +n04359124 +n04359217 +n04359335 +n04359500 +n04359589 +n04360501 +n04360798 +n04360914 +n04361095 +n04361260 +n04361937 +n04362624 +n04362821 +n04362972 +n04363082 +n04363210 +n04363412 +n04363671 +n04363777 +n04363874 +n04363991 +n04364160 +n04364397 +n04364545 +n04364827 +n04364994 +n04365112 +n04365229 +n04365328 +n04365484 +n04365751 +n04366033 +n04366116 +n04366367 +n04366832 +n04367011 +n04367371 +n04367480 +n04367746 +n04367950 +n04368109 +n04368235 +n04368365 +n04368496 +n04368695 +n04368840 +n04369025 +n04369282 +n04369485 +n04369618 +n04370048 +n04370288 +n04370456 +n04370600 +n04370774 +n04370955 +n04371050 +n04371430 +n04371563 +n04371774 +n04371979 +n04372370 +n04373089 +n04373428 +n04373563 +n04373704 +n04373795 +n04373894 +n04374315 +n04374521 +n04374735 +n04374907 +n04375080 +n04375241 +n04375405 +n04375615 +n04375775 +n04375926 +n04376400 +n04376876 +n04377057 +n04378489 +n04378651 +n04378956 +n04379096 +n04379243 +n04379964 +n04380255 +n04380346 +n04380533 +n04380916 +n04381073 +n04381450 +n04381587 +n04381724 +n04381860 +n04381994 +n04382334 +n04382438 +n04382537 +n04382695 +n04382880 +n04383015 +n04383130 +n04383301 +n04383839 +n04383923 +n04384593 +n04384910 +n04385079 +n04385157 +n04385536 +n04385799 +n04386051 +n04386456 +n04386664 +n04386792 +n04387095 +n04387201 +n04387261 +n04387400 +n04387531 +n04387706 +n04387932 +n04388040 +n04388162 +n04388473 +n04388574 +n04388743 +n04389033 +n04389430 +n04389521 +n04389718 +n04389854 +n04389999 +n04390483 +n04390577 +n04390873 +n04390977 +n04391445 +n04391838 +n04392113 +n04392526 +n04392764 +n04392985 +n04393095 +n04393301 +n04393549 +n04393808 +n04393913 +n04394031 +n04394261 +n04394421 +n04394630 +n04395024 +n04395106 +n04395332 +n04395651 +n04395875 +n04396226 +n04396335 +n04396650 +n04396808 +n04396902 +n04397027 +n04397168 +n04397261 +n04397452 +n04397645 +n04397768 +n04397860 +n04398044 +n04398497 +n04398688 +n04398834 +n04398951 +n04399046 +n04399158 +n04399537 +n04399846 +n04400109 +n04400289 +n04400499 +n04400737 +n04400899 +n04401088 +n04401578 +n04401680 +n04401828 +n04401949 +n04402057 +n04402342 +n04402449 +n04402580 +n04402746 +n04402984 +n04403413 +n04403524 +n04403638 +n04403925 +n04404072 +n04404200 +n04404412 +n04404817 +n04404997 +n04405540 +n04405762 +n04405907 +n04406239 +n04406552 +n04406687 +n04406817 +n04407257 +n04407435 +n04407686 +n04408871 +n04409011 +n04409128 +n04409279 +n04409384 +n04409515 +n04409625 +n04409806 +n04409911 +n04410086 +n04410365 +n04410485 +n04410565 +n04410663 +n04410760 +n04410886 +n04411019 +n04411264 +n04411835 +n04411966 +n04412097 +n04412300 +n04412416 +n04413151 +n04413419 +n04413969 +n04414101 +n04414199 +n04414319 +n04414476 +n04414675 +n04414909 +n04415257 +n04415663 +n04415815 +n04416005 +n04416901 +n04417086 +n04417180 +n04417361 +n04417672 +n04417809 +n04418357 +n04418644 +n04419073 +n04419642 +n04419868 +n04420024 +n04420720 +n04421083 +n04421258 +n04421417 +n04421582 +n04421740 +n04421872 +n04422409 +n04422566 +n04422727 +n04422875 +n04423552 +n04423687 +n04423845 +n04424692 +n04425804 +n04425977 +n04426184 +n04426316 +n04426427 +n04427216 +n04427473 +n04427559 +n04427715 +n04427857 +n04428008 +n04428191 +n04428382 +n04428634 +n04429038 +n04429376 +n04430475 +n04430605 +n04430896 +n04431025 +n04431436 +n04431648 +n04431745 +n04431925 +n04432043 +n04432203 +n04432662 +n04432785 +n04433377 +n04433585 +n04434207 +n04434531 +n04434932 +n04435180 +n04435552 +n04435653 +n04435759 +n04435870 +n04436012 +n04436185 +n04436329 +n04436401 +n04436542 +n04436832 +n04436992 +n04437276 +n04437380 +n04437670 +n04437953 +n04438304 +n04438507 +n04438643 +n04438897 +n04439505 +n04439585 +n04439712 +n04440597 +n04440963 +n04441093 +n04441528 +n04441662 +n04441790 +n04442312 +n04442441 +n04442582 +n04442741 +n04443164 +n04443257 +n04443433 +n04443766 +n04444121 +n04444218 +n04444749 +n04444953 +n04445040 +n04445154 +n04445327 +n04445610 +n04445782 +n04445952 +n04446162 +n04446276 +n04446844 +n04447028 +n04447156 +n04447276 +n04447443 +n04447861 +n04448070 +n04448185 +n04448361 +n04449290 +n04449449 +n04449550 +n04449700 +n04449966 +n04450133 +n04450243 +n04450465 +n04450640 +n04450749 +n04450994 +n04451139 +n04451318 +n04451636 +n04451818 +n04452528 +n04452615 +n04452757 +n04452848 +n04453037 +n04453156 +n04453390 +n04453666 +n04453910 +n04454654 +n04454792 +n04454908 +n04455048 +n04455250 +n04455579 +n04455652 +n04456011 +n04456115 +n04456472 +n04456734 +n04457157 +n04457326 +n04457474 +n04457638 +n04457767 +n04457910 +n04458201 +n04458633 +n04458843 +n04459018 +n04459122 +n04459243 +n04459362 +n04459610 +n04459773 +n04459909 +n04460130 +n04461437 +n04461570 +n04461696 +n04461879 +n04462011 +n04462240 +n04462576 +n04463679 +n04464125 +n04464615 +n04464852 +n04465050 +n04465203 +n04465358 +n04465501 +n04465666 +n04466871 +n04467099 +n04467307 +n04467506 +n04467665 +n04467899 +n04468005 +n04469003 +n04469251 +n04469514 +n04469684 +n04469813 +n04470741 +n04471148 +n04471315 +n04471632 +n04471912 +n04472243 +n04472563 +n04472726 +n04472961 +n04473108 +n04473275 +n04473884 +n04474035 +n04474187 +n04474466 +n04475309 +n04475411 +n04475496 +n04475631 +n04475749 +n04475900 +n04476116 +n04476259 +n04476526 +n04476831 +n04476972 +n04477219 +n04477387 +n04477548 +n04477725 +n04478066 +n04478383 +n04478512 +n04478657 +n04479046 +n04479287 +n04479405 +n04479526 +n04479694 +n04479823 +n04479939 +n04480033 +n04480141 +n04480303 +n04480527 +n04480853 +n04480995 +n04481524 +n04481642 +n04482177 +n04482297 +n04482393 +n04482975 +n04483073 +n04483307 +n04483925 +n04484024 +n04484432 +n04485082 +n04485423 +n04485586 +n04485750 +n04485884 +n04486054 +n04486213 +n04486322 +n04486616 +n04486934 +n04487081 +n04487394 +n04487724 +n04487894 +n04488202 +n04488427 +n04488530 +n04488742 +n04488857 +n04489008 +n04489695 +n04489817 +n04490091 +n04491312 +n04491388 +n04491638 +n04491769 +n04491934 +n04492060 +n04492157 +n04492375 +n04492749 +n04493109 +n04493259 +n04493381 +n04494204 +n04495051 +n04495183 +n04495310 +n04495450 +n04495555 +n04495698 +n04495843 +n04496614 +n04496726 +n04496872 +n04497249 +n04497442 +n04497570 +n04497801 +n04498275 +n04498389 +n04498523 +n04498873 +n04499062 +n04499300 +n04499446 +n04499554 +n04499810 +n04500060 +n04500390 +n04501127 +n04501281 +n04501370 +n04501550 +n04501837 +n04501947 +n04502059 +n04502197 +n04502502 +n04502670 +n04502851 +n04502989 +n04503073 +n04503155 +n04503269 +n04503413 +n04503499 +n04503593 +n04503705 +n04504038 +n04504141 +n04504770 +n04505036 +n04505345 +n04505470 +n04505888 +n04506289 +n04506402 +n04506506 +n04506688 +n04506895 +n04506994 +n04507155 +n04507326 +n04507453 +n04507689 +n04508163 +n04508489 +n04508949 +n04509171 +n04509260 +n04509417 +n04509592 +n04510706 +n04511002 +n04513827 +n04513998 +n04514095 +n04514241 +n04514648 +n04515003 +n04515444 +n04515729 +n04515890 +n04516116 +n04516214 +n04516354 +n04516672 +n04517211 +n04517408 +n04517823 +n04517999 +n04518132 +n04518343 +n04518643 +n04518764 +n04519153 +n04519536 +n04519728 +n04519887 +n04520170 +n04520382 +n04520784 +n04520962 +n04521571 +n04521863 +n04521987 +n04522168 +n04523525 +n04523831 +n04524142 +n04524313 +n04524594 +n04524716 +n04524941 +n04525038 +n04525191 +n04525305 +n04525417 +n04525584 +n04525821 +n04526520 +n04526800 +n04526964 +n04527648 +n04528079 +n04528968 +n04529108 +n04529681 +n04529962 +n04530283 +n04530456 +n04530566 +n04531098 +n04531873 +n04532022 +n04532106 +n04532398 +n04532504 +n04532670 +n04532831 +n04533042 +n04533199 +n04533499 +n04533594 +n04533700 +n04533802 +n04533946 +n04534127 +n04534359 +n04534520 +n04534895 +n04535252 +n04535370 +n04535524 +n04536153 +n04536335 +n04536465 +n04536595 +n04536765 +n04536866 +n04537436 +n04538249 +n04538403 +n04538552 +n04538878 +n04539053 +n04539203 +n04539407 +n04539794 +n04540053 +n04540255 +n04540397 +n04540761 +n04541136 +n04541320 +n04541662 +n04541777 +n04541987 +n04542095 +n04542329 +n04542474 +n04542595 +n04542715 +n04542858 +n04542943 +n04543158 +n04543509 +n04543636 +n04543772 +n04543924 +n04543996 +n04544325 +n04544450 +n04545305 +n04545471 +n04545748 +n04545858 +n04545984 +n04546081 +n04546194 +n04546340 +n04546595 +n04546855 +n04547592 +n04548280 +n04548362 +n04549028 +n04549122 +n04549629 +n04549721 +n04549919 +n04550184 +n04550676 +n04551055 +n04551833 +n04552097 +n04552348 +n04552551 +n04552696 +n04553389 +n04553561 +n04553703 +n04554211 +n04554406 +n04554684 +n04554871 +n04554998 +n04555291 +n04555400 +n04555600 +n04555700 +n04555897 +n04556408 +n04556533 +n04556664 +n04556948 +n04557308 +n04557522 +n04557648 +n04557751 +n04558059 +n04558199 +n04558478 +n04558804 +n04559023 +n04559166 +n04559451 +n04559620 +n04559730 +n04559910 +n04559994 +n04560113 +n04560292 +n04560502 +n04560619 +n04560804 +n04560882 +n04561010 +n04561287 +n04561422 +n04561734 +n04561857 +n04561965 +n04562122 +n04562262 +n04562496 +n04562935 +n04563020 +n04563204 +n04563413 +n04563560 +n04563790 +n04564278 +n04564581 +n04565039 +n04565375 +n04566257 +n04566561 +n04566756 +n04567098 +n04567593 +n04567746 +n04568069 +n04568557 +n04568713 +n04568841 +n04569063 +n04569520 +n04569822 +n04570118 +n04570214 +n04570416 +n04570532 +n04570815 +n04570958 +n04571292 +n04571566 +n04571686 +n04571800 +n04571958 +n04572121 +n04572235 +n04572935 +n04573045 +n04573281 +n04573379 +n04573513 +n04573625 +n04573832 +n04573937 +n04574067 +n04574348 +n04574471 +n04574606 +n04574999 +n04575723 +n04575824 +n04576002 +n04576211 +n04576971 +n04577139 +n04577293 +n04577426 +n04577567 +n04577769 +n04578112 +n04578329 +n04578559 +n04578708 +n04578801 +n04578934 +n04579056 +n04579145 +n04579230 +n04579432 +n04579667 +n04579986 +n04580493 +n04581102 +n04581595 +n04581829 +n04582205 +n04582349 +n04582771 +n04582869 +n04583022 +n04583212 +n04583620 +n04583888 +n04583967 +n04584056 +n04584207 +n04584373 +n04585128 +n04585318 +n04585456 +n04585626 +n04585745 +n04585980 +n04586072 +n04586581 +n04586932 +n04587327 +n04587404 +n04587559 +n04587648 +n04588739 +n04589190 +n04589325 +n04589434 +n04589593 +n04589890 +n04590021 +n04590129 +n04590263 +n04590553 +n04590746 +n04590933 +n04591056 +n04591157 +n04591249 +n04591359 +n04591517 +n04591631 +n04591713 +n04591887 +n04592005 +n04592099 +n04592356 +n04592465 +n04592596 +n04592741 +n04593077 +n04593185 +n04593376 +n04593524 +n04593629 +n04593866 +n04594114 +n04594218 +n04594489 +n04594742 +n04594828 +n04594919 +n04595028 +n04595285 +n04595501 +n04595611 +n04595762 +n04595855 +n04596116 +n04596492 +n04596742 +n04596852 +n04597066 +n04597309 +n04597400 +n04597804 +n04597913 +n04598136 +n04598318 +n04598416 +n04598582 +n04598965 +n04599124 +n04599235 +n04600312 +n04600486 +n04600912 +n04601041 +n04601159 +n04601938 +n04602762 +n04602840 +n04602956 +n04603399 +n04603729 +n04603872 +n04604276 +n04604644 +n04604806 +n04605057 +n04605163 +n04605321 +n04605446 +n04605572 +n04605726 +n04606251 +n04606574 +n04607035 +n04607242 +n04607640 +n04607759 +n04607869 +n04607982 +n04608329 +n04608435 +n04608567 +n04608809 +n04608923 +n04609531 +n04609651 +n04609811 +n04610013 +n04610176 +n04610274 +n04610503 +n04610676 +n04611351 +n04611795 +n04611916 +n04612026 +n04612159 +n04612257 +n04612373 +n04612504 +n04612840 +n04613015 +n04613158 +n04613696 +n04613939 +n04614505 +n04614655 +n04614844 +n04615149 +n04615226 +n04615644 +n04682018 +n04950713 +n04950952 +n04951071 +n04951186 +n04951373 +n04951716 +n04951875 +n04953296 +n04953678 +n04955160 +n04957356 +n04957589 +n04958634 +n04958865 +n04959061 +n04959230 +n04959672 +n04960277 +n04960582 +n04961062 +n04961331 +n04961691 +n04962062 +n04962240 +n04963111 +n04963307 +n04963588 +n04963740 +n04964001 +n04964799 +n04964878 +n04965179 +n04965451 +n04965661 +n04966543 +n04966941 +n04967191 +n04967561 +n04967674 +n04967801 +n04967882 +n04968056 +n04968139 +n04968749 +n04968895 +n04969242 +n04969540 +n04969798 +n04969952 +n04970059 +n04970312 +n04970398 +n04970470 +n04970631 +n04970916 +n04971211 +n04971313 +n04972350 +n04972451 +n04972801 +n04973020 +n04973291 +n04973386 +n04973585 +n04973669 +n04973816 +n04974145 +n04974340 +n04974859 +n04975739 +n04976319 +n04976952 +n04977412 +n04978561 +n04979002 +n04979307 +n04981658 +n05102764 +n05218119 +n05233741 +n05235879 +n05238282 +n05239437 +n05241218 +n05241485 +n05241662 +n05242070 +n05242239 +n05242928 +n05244421 +n05244755 +n05244934 +n05245192 +n05257476 +n05257967 +n05258051 +n05258627 +n05259914 +n05260127 +n05260240 +n05261310 +n05262422 +n05262534 +n05262698 +n05263183 +n05263316 +n05263448 +n05265736 +n05266096 +n05266879 +n05278922 +n05279953 +n05282652 +n05285623 +n05302499 +n05314075 +n05399034 +n05399243 +n05399356 +n05418717 +n05427346 +n05442594 +n05447757 +n05448704 +n05448827 +n05449196 +n05449661 +n05449959 +n05450617 +n05451099 +n05451384 +n05453412 +n05453657 +n05453815 +n05454833 +n05454978 +n05455113 +n05458173 +n05458576 +n05459101 +n05459457 +n05459769 +n05460759 +n05464534 +n05467054 +n05467758 +n05468098 +n05468739 +n05469664 +n05469861 +n05475397 +n05482922 +n05486510 +n05491154 +n05526957 +n05538625 +n05539947 +n05541509 +n05542893 +n05545879 +n05571341 +n05578095 +n05581932 +n05584746 +n05586759 +n05604434 +n05716342 +n06008896 +n06209940 +n06254669 +n06255081 +n06255613 +n06259898 +n06262567 +n06262943 +n06263202 +n06263369 +n06263609 +n06263762 +n06263895 +n06266417 +n06266633 +n06266710 +n06266878 +n06266973 +n06267145 +n06267564 +n06267655 +n06267758 +n06267893 +n06267991 +n06271778 +n06272290 +n06272612 +n06272803 +n06273207 +n06273294 +n06273414 +n06273555 +n06273743 +n06273890 +n06273986 +n06274092 +n06274292 +n06274546 +n06274760 +n06274921 +n06275095 +n06275353 +n06275471 +n06276501 +n06276697 +n06276902 +n06277025 +n06277135 +n06277280 +n06278338 +n06278475 +n06281040 +n06281175 +n06340977 +n06359193 +n06359467 +n06359657 +n06415688 +n06417096 +n06418693 +n06419354 +n06423496 +n06470073 +n06591815 +n06592078 +n06592281 +n06592421 +n06595351 +n06596179 +n06596364 +n06596474 +n06596607 +n06596727 +n06596845 +n06613686 +n06614901 +n06616216 +n06618653 +n06625062 +n06785654 +n06793231 +n06794110 +n06874185 +n06883725 +n06892775 +n06998748 +n07005523 +n07248320 +n07273802 +n07461050 +n07556406 +n07556637 +n07556872 +n07556970 +n07557165 +n07557434 +n07560193 +n07560331 +n07560422 +n07560542 +n07560652 +n07560903 +n07561112 +n07561590 +n07561848 +n07562017 +n07562172 +n07562379 +n07562495 +n07562651 +n07562881 +n07562984 +n07563207 +n07563366 +n07563642 +n07563800 +n07564008 +n07564101 +n07564292 +n07564515 +n07564629 +n07564796 +n07564971 +n07565083 +n07565161 +n07565259 +n07565608 +n07565725 +n07565945 +n07566092 +n07566231 +n07566340 +n07566863 +n07567039 +n07567139 +n07567390 +n07567611 +n07567707 +n07567980 +n07568095 +n07568241 +n07568389 +n07568502 +n07568625 +n07568818 +n07568991 +n07569106 +n07569423 +n07569543 +n07569644 +n07569873 +n07570021 +n07570530 +n07570720 +n07572353 +n07572616 +n07572858 +n07572957 +n07573103 +n07573347 +n07573453 +n07573563 +n07573696 +n07574176 +n07574426 +n07574504 +n07574602 +n07574780 +n07574923 +n07575076 +n07575226 +n07575392 +n07575510 +n07575726 +n07575984 +n07576182 +n07576438 +n07576577 +n07576781 +n07576969 +n07577144 +n07577374 +n07577538 +n07577657 +n07577772 +n07577918 +n07578093 +n07579575 +n07579688 +n07579787 +n07579917 +n07580053 +n07580253 +n07580359 +n07580470 +n07580592 +n07581249 +n07581346 +n07581607 +n07581775 +n07581931 +n07582027 +n07582152 +n07582277 +n07582441 +n07582609 +n07582811 +n07582892 +n07582970 +n07583066 +n07583197 +n07583865 +n07583978 +n07584110 +n07584228 +n07584332 +n07584423 +n07584593 +n07584859 +n07584938 +n07585015 +n07585107 +n07585208 +n07585474 +n07585557 +n07585644 +n07585758 +n07585906 +n07585997 +n07586099 +n07586179 +n07586318 +n07586485 +n07586604 +n07586718 +n07586894 +n07587023 +n07587111 +n07587206 +n07587331 +n07587441 +n07587618 +n07587700 +n07587819 +n07587962 +n07588111 +n07588193 +n07588299 +n07588419 +n07588574 +n07588688 +n07588817 +n07588947 +n07589458 +n07589543 +n07589724 +n07589872 +n07589967 +n07590068 +n07590177 +n07590320 +n07590502 +n07590611 +n07590752 +n07590841 +n07590974 +n07591049 +n07591162 +n07591236 +n07591330 +n07591473 +n07591586 +n07591813 +n07591961 +n07592094 +n07592317 +n07592400 +n07592481 +n07592656 +n07592768 +n07592922 +n07593004 +n07593107 +n07593199 +n07593471 +n07593774 +n07593972 +n07594066 +n07594155 +n07594250 +n07594737 +n07594840 +n07595051 +n07595180 +n07595368 +n07595649 +n07595751 +n07595914 +n07596046 +n07596160 +n07596362 +n07596452 +n07596566 +n07596684 +n07596967 +n07597145 +n07597263 +n07597365 +n07598256 +n07598529 +n07598622 +n07598734 +n07598928 +n07599068 +n07599161 +n07599242 +n07599383 +n07599468 +n07599554 +n07599649 +n07599783 +n07599911 +n07599998 +n07600177 +n07600285 +n07600394 +n07600506 +n07600696 +n07600895 +n07601025 +n07601175 +n07601290 +n07601407 +n07601572 +n07601686 +n07601809 +n07602650 +n07604956 +n07605040 +n07605198 +n07605282 +n07605380 +n07605474 +n07605597 +n07605693 +n07605804 +n07605944 +n07606058 +n07606191 +n07606278 +n07606419 +n07606538 +n07606669 +n07606764 +n07606933 +n07607027 +n07607138 +n07607361 +n07607492 +n07607605 +n07607707 +n07607832 +n07607967 +n07608098 +n07608245 +n07608339 +n07608429 +n07608533 +n07608641 +n07608721 +n07608866 +n07608980 +n07609083 +n07609215 +n07609316 +n07609407 +n07609549 +n07609632 +n07609728 +n07609840 +n07610295 +n07610502 +n07610620 +n07610746 +n07610890 +n07611046 +n07611148 +n07611267 +n07611358 +n07611733 +n07611839 +n07611991 +n07612137 +n07612273 +n07612367 +n07612530 +n07612632 +n07612996 +n07613158 +n07613266 +n07613480 +n07613671 +n07613815 +n07614103 +n07614198 +n07614348 +n07614500 +n07614730 +n07614825 +n07615052 +n07615190 +n07615289 +n07615460 +n07615569 +n07615671 +n07615774 +n07615954 +n07616046 +n07616174 +n07616265 +n07616386 +n07616487 +n07616590 +n07616748 +n07616906 +n07617051 +n07617188 +n07617344 +n07617447 +n07617526 +n07617611 +n07617708 +n07617839 +n07617932 +n07618029 +n07618119 +n07618281 +n07618432 +n07618587 +n07618684 +n07618871 +n07619004 +n07619208 +n07619301 +n07619409 +n07619508 +n07619881 +n07620047 +n07620145 +n07620327 +n07620597 +n07620689 +n07621264 +n07621497 +n07621618 +n07623136 +n07624466 +n07624666 +n07624757 +n07624924 +n07625061 +n07625324 +n07627931 +n07628068 +n07628181 +n07631926 +n07639069 +n07641928 +n07642361 +n07642471 +n07642742 +n07642833 +n07642933 +n07643026 +n07643200 +n07643306 +n07643474 +n07643577 +n07643679 +n07643764 +n07643891 +n07643981 +n07644244 +n07648913 +n07648997 +n07650792 +n07650903 +n07651025 +n07654148 +n07654298 +n07655067 +n07655263 +n07663899 +n07665438 +n07666176 +n07672914 +n07678586 +n07678729 +n07678953 +n07679034 +n07679140 +n07679356 +n07680168 +n07680313 +n07680416 +n07680517 +n07680655 +n07680761 +n07680932 +n07681264 +n07681355 +n07681450 +n07681691 +n07681805 +n07681926 +n07682197 +n07682316 +n07682477 +n07682624 +n07682808 +n07682952 +n07683039 +n07683138 +n07683265 +n07683360 +n07683490 +n07683617 +n07683786 +n07684084 +n07684164 +n07684289 +n07684422 +n07684517 +n07684600 +n07684938 +n07685031 +n07685118 +n07685218 +n07685303 +n07685399 +n07685546 +n07685730 +n07685918 +n07686021 +n07686202 +n07686299 +n07686461 +n07686634 +n07686720 +n07686873 +n07687053 +n07687211 +n07687381 +n07687469 +n07687626 +n07687789 +n07688021 +n07688130 +n07688265 +n07688412 +n07688624 +n07688757 +n07688898 +n07689003 +n07689217 +n07689313 +n07689490 +n07689624 +n07689757 +n07689842 +n07690019 +n07690152 +n07690273 +n07690431 +n07690511 +n07690585 +n07690739 +n07690892 +n07691091 +n07691237 +n07691539 +n07691650 +n07691758 +n07691863 +n07691954 +n07692114 +n07692248 +n07692405 +n07692517 +n07692614 +n07692887 +n07693048 +n07693223 +n07693439 +n07693590 +n07693725 +n07693889 +n07693972 +n07694169 +n07694403 +n07694516 +n07694659 +n07694839 +n07695187 +n07695284 +n07695410 +n07695504 +n07695652 +n07695742 +n07695878 +n07695965 +n07696403 +n07696527 +n07696625 +n07696728 +n07696839 +n07696977 +n07697100 +n07697313 +n07697408 +n07697537 +n07697699 +n07697825 +n07698250 +n07698401 +n07698543 +n07698672 +n07698782 +n07700003 +n07703889 +n07704054 +n07704205 +n07704305 +n07705931 +n07707451 +n07708124 +n07708398 +n07708512 +n07708685 +n07708798 +n07709046 +n07709172 +n07709333 +n07709701 +n07709881 +n07710007 +n07710283 +n07710616 +n07710952 +n07711080 +n07711232 +n07711371 +n07711569 +n07711683 +n07711799 +n07711907 +n07712063 +n07712267 +n07712382 +n07712559 +n07712748 +n07712856 +n07712959 +n07713074 +n07713267 +n07713395 +n07713763 +n07713895 +n07714078 +n07714188 +n07714287 +n07714448 +n07714571 +n07714802 +n07714895 +n07714990 +n07715103 +n07715221 +n07715407 +n07715561 +n07715721 +n07716034 +n07716203 +n07716358 +n07716504 +n07716649 +n07716750 +n07716906 +n07717070 +n07717410 +n07717556 +n07717714 +n07717858 +n07718068 +n07718195 +n07718329 +n07718472 +n07718671 +n07718747 +n07718920 +n07719058 +n07719213 +n07719330 +n07719437 +n07719616 +n07719756 +n07719839 +n07719980 +n07720084 +n07720185 +n07720277 +n07720442 +n07720615 +n07720875 +n07721018 +n07721118 +n07721195 +n07721325 +n07721456 +n07721678 +n07721833 +n07721942 +n07722052 +n07722217 +n07722390 +n07722485 +n07722666 +n07722763 +n07722888 +n07723039 +n07723177 +n07723330 +n07723559 +n07723753 +n07723968 +n07724078 +n07724173 +n07724269 +n07724492 +n07724654 +n07724819 +n07724943 +n07725158 +n07725255 +n07725376 +n07725531 +n07725663 +n07725789 +n07725888 +n07726009 +n07726095 +n07726230 +n07726386 +n07726525 +n07726672 +n07726796 +n07727048 +n07727140 +n07727252 +n07727377 +n07727458 +n07727578 +n07727741 +n07727868 +n07728053 +n07728181 +n07728284 +n07728391 +n07728585 +n07728708 +n07728804 +n07729000 +n07729142 +n07729225 +n07729384 +n07729485 +n07729828 +n07729926 +n07730033 +n07730207 +n07730320 +n07730406 +n07730562 +n07730708 +n07730855 +n07731006 +n07731122 +n07731284 +n07731436 +n07731587 +n07731767 +n07731952 +n07732168 +n07732302 +n07732433 +n07732525 +n07732636 +n07732747 +n07732904 +n07733005 +n07733124 +n07733217 +n07733394 +n07733567 +n07733712 +n07733847 +n07734017 +n07734183 +n07734292 +n07734417 +n07734555 +n07734744 +n07734879 +n07735052 +n07735179 +n07735294 +n07735404 +n07735510 +n07735687 +n07735803 +n07735981 +n07736087 +n07736256 +n07736371 +n07736527 +n07736692 +n07736813 +n07736971 +n07737081 +n07737594 +n07737745 +n07738105 +n07738224 +n07739035 +n07739125 +n07739344 +n07739506 +n07739923 +n07740033 +n07740115 +n07740220 +n07740342 +n07740461 +n07740597 +n07740744 +n07740855 +n07740954 +n07741138 +n07741235 +n07741357 +n07741461 +n07741623 +n07741706 +n07741804 +n07741888 +n07742012 +n07742224 +n07742313 +n07742415 +n07742513 +n07742605 +n07742704 +n07743224 +n07743384 +n07743544 +n07743723 +n07743902 +n07744057 +n07744246 +n07744430 +n07744559 +n07744682 +n07744811 +n07745046 +n07745197 +n07745357 +n07745466 +n07745661 +n07745940 +n07746038 +n07746186 +n07746334 +n07746551 +n07746749 +n07746910 +n07747055 +n07747607 +n07747811 +n07747951 +n07748157 +n07748276 +n07748416 +n07748574 +n07748753 +n07748912 +n07749095 +n07749192 +n07749312 +n07749446 +n07749582 +n07749731 +n07749870 +n07749969 +n07750146 +n07750299 +n07750449 +n07750586 +n07750736 +n07750872 +n07751004 +n07751148 +n07751280 +n07751451 +n07751737 +n07751858 +n07751977 +n07752109 +n07752264 +n07752377 +n07752514 +n07752602 +n07752664 +n07752782 +n07752874 +n07752966 +n07753113 +n07753275 +n07753448 +n07753592 +n07753743 +n07753980 +n07754155 +n07754279 +n07754451 +n07754684 +n07754894 +n07755089 +n07755262 +n07755411 +n07755619 +n07755707 +n07755929 +n07756096 +n07756325 +n07756499 +n07756641 +n07756838 +n07756951 +n07757132 +n07757312 +n07757511 +n07757602 +n07757753 +n07757874 +n07757990 +n07758125 +n07758260 +n07758407 +n07758582 +n07758680 +n07758950 +n07759194 +n07759324 +n07759424 +n07759576 +n07759691 +n07759816 +n07760070 +n07760153 +n07760297 +n07760395 +n07760501 +n07760673 +n07760755 +n07760859 +n07761141 +n07761309 +n07761611 +n07761777 +n07761954 +n07762114 +n07762244 +n07762373 +n07762534 +n07762740 +n07762913 +n07763107 +n07763290 +n07763483 +n07763629 +n07763792 +n07763987 +n07764155 +n07764315 +n07764486 +n07764630 +n07764847 +n07765073 +n07765208 +n07765361 +n07765517 +n07765612 +n07765728 +n07765862 +n07765999 +n07766173 +n07766409 +n07766530 +n07766723 +n07766891 +n07767002 +n07767171 +n07767344 +n07767549 +n07767709 +n07767847 +n07768068 +n07768139 +n07768230 +n07768318 +n07768423 +n07768590 +n07768694 +n07768858 +n07769102 +n07769306 +n07769465 +n07769584 +n07769731 +n07769886 +n07770034 +n07770180 +n07770439 +n07770571 +n07770763 +n07770869 +n07771082 +n07771212 +n07771405 +n07771539 +n07771731 +n07771891 +n07772026 +n07772147 +n07772274 +n07772413 +n07772788 +n07772935 +n07773428 +n07774182 +n07774295 +n07774479 +n07774596 +n07774719 +n07774842 +n07775050 +n07775197 +n07783827 +n07785487 +n07800091 +n07800487 +n07800636 +n07800740 +n07801007 +n07801091 +n07801342 +n07801508 +n07801709 +n07801779 +n07801892 +n07802026 +n07802152 +n07802246 +n07802417 +n07802767 +n07802863 +n07802963 +n07803093 +n07803213 +n07803310 +n07803408 +n07803545 +n07803779 +n07803895 +n07803992 +n07804152 +n07804323 +n07804543 +n07804657 +n07804771 +n07804900 +n07805006 +n07805254 +n07805389 +n07805478 +n07805594 +n07805731 +n07805966 +n07806043 +n07806120 +n07806221 +n07806633 +n07806774 +n07806879 +n07807002 +n07807171 +n07807317 +n07807472 +n07807594 +n07807710 +n07807834 +n07807922 +n07808022 +n07808166 +n07808268 +n07808352 +n07808479 +n07808587 +n07808675 +n07808806 +n07808904 +n07809096 +n07809368 +n07810531 +n07810907 +n07811416 +n07812046 +n07812184 +n07812662 +n07812790 +n07812913 +n07813107 +n07813324 +n07813495 +n07813579 +n07813717 +n07813833 +n07814007 +n07814203 +n07814390 +n07814487 +n07814634 +n07814790 +n07814925 +n07815163 +n07815294 +n07815424 +n07815588 +n07815839 +n07815956 +n07816052 +n07816164 +n07816296 +n07816398 +n07816575 +n07816726 +n07816839 +n07817024 +n07817160 +n07817315 +n07817465 +n07817599 +n07817758 +n07817871 +n07818029 +n07818133 +n07818277 +n07818422 +n07818572 +n07818689 +n07818825 +n07818995 +n07819166 +n07819303 +n07819480 +n07819682 +n07819769 +n07819896 +n07820036 +n07820145 +n07820297 +n07820497 +n07820683 +n07820814 +n07820960 +n07821107 +n07821260 +n07821404 +n07821610 +n07821758 +n07821919 +n07822053 +n07822197 +n07822323 +n07822518 +n07822687 +n07822845 +n07823105 +n07823280 +n07823369 +n07823460 +n07823591 +n07823698 +n07823814 +n07823951 +n07824191 +n07824268 +n07824383 +n07824502 +n07824702 +n07824863 +n07824988 +n07825194 +n07825399 +n07825496 +n07825597 +n07825717 +n07825850 +n07825972 +n07826091 +n07826250 +n07826340 +n07826453 +n07826544 +n07826653 +n07826930 +n07827130 +n07827284 +n07827410 +n07827554 +n07827750 +n07827896 +n07828041 +n07828156 +n07828275 +n07828378 +n07828642 +n07828987 +n07829248 +n07829331 +n07829412 +n07830493 +n07830593 +n07830690 +n07830841 +n07830986 +n07831146 +n07831267 +n07831450 +n07831663 +n07831821 +n07831955 +n07832099 +n07832202 +n07832307 +n07832416 +n07832592 +n07832741 +n07832902 +n07833333 +n07833535 +n07833672 +n07833816 +n07833951 +n07834065 +n07834160 +n07834286 +n07834507 +n07834618 +n07834774 +n07834872 +n07835051 +n07835173 +n07835331 +n07835457 +n07835547 +n07835701 +n07835823 +n07835921 +n07836077 +n07836269 +n07836456 +n07836600 +n07836731 +n07836838 +n07837002 +n07837110 +n07837234 +n07837362 +n07837545 +n07837630 +n07837755 +n07837912 +n07838073 +n07838233 +n07838441 +n07838551 +n07838659 +n07838811 +n07838905 +n07839055 +n07839172 +n07839312 +n07839478 +n07839593 +n07839730 +n07839864 +n07840027 +n07840124 +n07840219 +n07840304 +n07840395 +n07840520 +n07840672 +n07840804 +n07841037 +n07841345 +n07841495 +n07841639 +n07841800 +n07841907 +n07842044 +n07842130 +n07842202 +n07842308 +n07842433 +n07842605 +n07842753 +n07842972 +n07843117 +n07843220 +n07843348 +n07843464 +n07843636 +n07843775 +n07844042 +n07844604 +n07844786 +n07844867 +n07845087 +n07845166 +n07845335 +n07845421 +n07845495 +n07845571 +n07845702 +n07845775 +n07845863 +n07846014 +n07846143 +n07846274 +n07846359 +n07846471 +n07846557 +n07846688 +n07846802 +n07846938 +n07847047 +n07847198 +n07847453 +n07847585 +n07847706 +n07847827 +n07847917 +n07848093 +n07848196 +n07848338 +n07848771 +n07848936 +n07849026 +n07849186 +n07849336 +n07849506 +n07849619 +n07849733 +n07849912 +n07850083 +n07850219 +n07850329 +n07851054 +n07851298 +n07851443 +n07851554 +n07851641 +n07851767 +n07851926 +n07852045 +n07852229 +n07852302 +n07852376 +n07852452 +n07852532 +n07852614 +n07852712 +n07852833 +n07852919 +n07853125 +n07853232 +n07853345 +n07853445 +n07853560 +n07853648 +n07853762 +n07853852 +n07853946 +n07854066 +n07854184 +n07854266 +n07854348 +n07854455 +n07854614 +n07854707 +n07854813 +n07854982 +n07855105 +n07855188 +n07855317 +n07855413 +n07855510 +n07855603 +n07855721 +n07855812 +n07855907 +n07856045 +n07856186 +n07856270 +n07856756 +n07856895 +n07856992 +n07857076 +n07857170 +n07857356 +n07857598 +n07857731 +n07857959 +n07858114 +n07858197 +n07858336 +n07858484 +n07858595 +n07858841 +n07858978 +n07859142 +n07859284 +n07859583 +n07859796 +n07859951 +n07860103 +n07860208 +n07860331 +n07860447 +n07860548 +n07860629 +n07860805 +n07860988 +n07861158 +n07861247 +n07861334 +n07861557 +n07861681 +n07861813 +n07861983 +n07862095 +n07862244 +n07862348 +n07862461 +n07862611 +n07862770 +n07862946 +n07863107 +n07863229 +n07863374 +n07863547 +n07863644 +n07863802 +n07863935 +n07864065 +n07864198 +n07864317 +n07864475 +n07864638 +n07864756 +n07864934 +n07865105 +n07865196 +n07865484 +n07865575 +n07865700 +n07865788 +n07866015 +n07866151 +n07866277 +n07866409 +n07866571 +n07866723 +n07866868 +n07867021 +n07867164 +n07867324 +n07867421 +n07867616 +n07867751 +n07867883 +n07868045 +n07868200 +n07868340 +n07868508 +n07868684 +n07868830 +n07868955 +n07869111 +n07869291 +n07869391 +n07869522 +n07869611 +n07869775 +n07869937 +n07870069 +n07870167 +n07870313 +n07870478 +n07870620 +n07870734 +n07870894 +n07871065 +n07871234 +n07871335 +n07871436 +n07871588 +n07871720 +n07871810 +n07872593 +n07872748 +n07873057 +n07873198 +n07873348 +n07873464 +n07873679 +n07873807 +n07874063 +n07874159 +n07874259 +n07874343 +n07874441 +n07874531 +n07874674 +n07874780 +n07874995 +n07875086 +n07875152 +n07875267 +n07875436 +n07875560 +n07875693 +n07875835 +n07875926 +n07876026 +n07876189 +n07876281 +n07876460 +n07876550 +n07876651 +n07876775 +n07876893 +n07877187 +n07877299 +n07877675 +n07877849 +n07877961 +n07878145 +n07878283 +n07878479 +n07878647 +n07878785 +n07878926 +n07879072 +n07879174 +n07879350 +n07879450 +n07879560 +n07879659 +n07879821 +n07879953 +n07880080 +n07880213 +n07880325 +n07880458 +n07880751 +n07880880 +n07880968 +n07881117 +n07881205 +n07881404 +n07881525 +n07881625 +n07881800 +n07882420 +n07882497 +n07882886 +n07883031 +n07883156 +n07883251 +n07883384 +n07883510 +n07883661 +n07884567 +n07885705 +n07886057 +n07886176 +n07886317 +n07886463 +n07886572 +n07886849 +n07887099 +n07887192 +n07887304 +n07887461 +n07887634 +n07887967 +n07888058 +n07888229 +n07888378 +n07888465 +n07888816 +n07888909 +n07889193 +n07889274 +n07889510 +n07889814 +n07889990 +n07890068 +n07890226 +n07890352 +n07890540 +n07890617 +n07890750 +n07890890 +n07890970 +n07891095 +n07891189 +n07891309 +n07891433 +n07891726 +n07892418 +n07892512 +n07892813 +n07893253 +n07893425 +n07893528 +n07893642 +n07893792 +n07893891 +n07894102 +n07894298 +n07894451 +n07894551 +n07894703 +n07894799 +n07894965 +n07895100 +n07895237 +n07895435 +n07895595 +n07895710 +n07895839 +n07895962 +n07896060 +n07896165 +n07896287 +n07896422 +n07896560 +n07896661 +n07896765 +n07896893 +n07896994 +n07897116 +n07897200 +n07897438 +n07897600 +n07897750 +n07897865 +n07897975 +n07898117 +n07898247 +n07898333 +n07898443 +n07898617 +n07898745 +n07898895 +n07899003 +n07899108 +n07899292 +n07899434 +n07899533 +n07899660 +n07899769 +n07899899 +n07899976 +n07900225 +n07900406 +n07900616 +n07900734 +n07900825 +n07900958 +n07901355 +n07901457 +n07901587 +n07902121 +n07902336 +n07902443 +n07902520 +n07902698 +n07902799 +n07902937 +n07903101 +n07903208 +n07903543 +n07903643 +n07903731 +n07903841 +n07903962 +n07904072 +n07904293 +n07904395 +n07904637 +n07904760 +n07904865 +n07904934 +n07905038 +n07905296 +n07905386 +n07905474 +n07905618 +n07905770 +n07905979 +n07906111 +n07906284 +n07906572 +n07906718 +n07906877 +n07907037 +n07907161 +n07907342 +n07907429 +n07907548 +n07907831 +n07907943 +n07908411 +n07908567 +n07908647 +n07908812 +n07908923 +n07909129 +n07909231 +n07909362 +n07909504 +n07909593 +n07909714 +n07909811 +n07909954 +n07910048 +n07910152 +n07910245 +n07910379 +n07910538 +n07910656 +n07910799 +n07910970 +n07911061 +n07911249 +n07911371 +n07911677 +n07912093 +n07912211 +n07913180 +n07913300 +n07913393 +n07913537 +n07913644 +n07913774 +n07913882 +n07914006 +n07914128 +n07914271 +n07914413 +n07914586 +n07914686 +n07914777 +n07914887 +n07914995 +n07915094 +n07915213 +n07915366 +n07915491 +n07915618 +n07915800 +n07915918 +n07916041 +n07916183 +n07916319 +n07916437 +n07916582 +n07917133 +n07917272 +n07917392 +n07917507 +n07917618 +n07917791 +n07917874 +n07917951 +n07918028 +n07918193 +n07918309 +n07918706 +n07918879 +n07919165 +n07919310 +n07919441 +n07919572 +n07919665 +n07919787 +n07919894 +n07920052 +n07920222 +n07920349 +n07920540 +n07920663 +n07920872 +n07920989 +n07921090 +n07921239 +n07921360 +n07921455 +n07921615 +n07921834 +n07921948 +n07922041 +n07922147 +n07922512 +n07922607 +n07922764 +n07922955 +n07923748 +n07924033 +n07924276 +n07924366 +n07924443 +n07924560 +n07924655 +n07924747 +n07924834 +n07924955 +n07925116 +n07925229 +n07925327 +n07925423 +n07925500 +n07925608 +n07925708 +n07925808 +n07925966 +n07926250 +n07926346 +n07926442 +n07926540 +n07926785 +n07926920 +n07927070 +n07927197 +n07927512 +n07927716 +n07927836 +n07927931 +n07928163 +n07928264 +n07928367 +n07928488 +n07928578 +n07928696 +n07928790 +n07928887 +n07928998 +n07929172 +n07929351 +n07929519 +n07929940 +n07930062 +n07930205 +n07930315 +n07930433 +n07930554 +n07930864 +n07931001 +n07931096 +n07931280 +n07931452 +n07931612 +n07931733 +n07931870 +n07932039 +n07932323 +n07932454 +n07932614 +n07932762 +n07932841 +n07933154 +n07933274 +n07933530 +n07933652 +n07933799 +n07933891 +n07934032 +n07934152 +n07934282 +n07934373 +n07934530 +n07934678 +n07934800 +n07934908 +n07935043 +n07935152 +n07935288 +n07935379 +n07935504 +n07935737 +n07935878 +n07936015 +n07936093 +n07936263 +n07936459 +n07936548 +n07936745 +n07936979 +n07937069 +n07937344 +n07937461 +n07937621 +n07938007 +n07938149 +n07938313 +n07938594 +n07942152 +n07951464 +n07954211 +n07977870 +n08079613 +n08182379 +n08238463 +n08242223 +n08249459 +n08253141 +n08256735 +n08376250 +n08385989 +n08492354 +n08492461 +n08494231 +n08495908 +n08496334 +n08500819 +n08500989 +n08501887 +n08505018 +n08506347 +n08511017 +n08517010 +n08517676 +n08518171 +n08519299 +n08521623 +n08523340 +n08524735 +n08539072 +n08539276 +n08540532 +n08547468 +n08547544 +n08551296 +n08554440 +n08555333 +n08555710 +n08558770 +n08558963 +n08559155 +n08560295 +n08569482 +n08571275 +n08571642 +n08571898 +n08573674 +n08573842 +n08578517 +n08579266 +n08579352 +n08580944 +n08583292 +n08583455 +n08583554 +n08583682 +n08584914 +n08586978 +n08589670 +n08596076 +n08597579 +n08598301 +n08598568 +n08599174 +n08599292 +n08611339 +n08611421 +n08613733 +n08614632 +n08616050 +n08618831 +n08619112 +n08623676 +n08628141 +n08633683 +n08640531 +n08640739 +n08640962 +n08643267 +n08644045 +n08645104 +n08645212 +n08645318 +n08647264 +n08648917 +n08649711 +n08651104 +n08652376 +n08658309 +n08658918 +n08659242 +n08659331 +n08659446 +n08659861 +n08661878 +n08662427 +n08663051 +n08663703 +n08663860 +n08673039 +n08674344 +n08676253 +n08677424 +n08677801 +n08678783 +n08679167 +n08679269 +n08679562 +n08685188 +n08782627 +n08896327 +n09032191 +n09186592 +n09189157 +n09191635 +n09193551 +n09193705 +n09194227 +n09199101 +n09201998 +n09203827 +n09205509 +n09206896 +n09206985 +n09208496 +n09209025 +n09210862 +n09213434 +n09213565 +n09214060 +n09214269 +n09214916 +n09215023 +n09215437 +n09217230 +n09218315 +n09218494 +n09218641 +n09219233 +n09223487 +n09224725 +n09226869 +n09228055 +n09229709 +n09230041 +n09230202 +n09231117 +n09233446 +n09233603 +n09238926 +n09239302 +n09242389 +n09245515 +n09246464 +n09247410 +n09248153 +n09248399 +n09249034 +n09249155 +n09251407 +n09255070 +n09256479 +n09257843 +n09259025 +n09259219 +n09260907 +n09262690 +n09263912 +n09264803 +n09265620 +n09266604 +n09267854 +n09268007 +n09269341 +n09269472 +n09269882 +n09270160 +n09270657 +n09270735 +n09274152 +n09274305 +n09279986 +n09281252 +n09282208 +n09283193 +n09283405 +n09283514 +n09283767 +n09283866 +n09287415 +n09287968 +n09288635 +n09289331 +n09289596 +n09290350 +n09290444 +n09294877 +n09295210 +n09295946 +n09300306 +n09300905 +n09302616 +n09303008 +n09303528 +n09304750 +n09305031 +n09305898 +n09308572 +n09308743 +n09309046 +n09309168 +n09309292 +n09310616 +n09315159 +n09319604 +n09325824 +n09326662 +n09327077 +n09327538 +n09330378 +n09331251 +n09332890 +n09335693 +n09335809 +n09336555 +n09337048 +n09337253 +n09338013 +n09339810 +n09344198 +n09344324 +n09344724 +n09348460 +n09349648 +n09351905 +n09352849 +n09353815 +n09354511 +n09357346 +n09357447 +n09359803 +n09361517 +n09362316 +n09362945 +n09366017 +n09366317 +n09375606 +n09376198 +n09376526 +n09376786 +n09381242 +n09382099 +n09384106 +n09389867 +n09391386 +n09391644 +n09391774 +n09392402 +n09393524 +n09393605 +n09396465 +n09396608 +n09398076 +n09398677 +n09399592 +n09400584 +n09400987 +n09402944 +n09403086 +n09403211 +n09403427 +n09403734 +n09405078 +n09405787 +n09406793 +n09409512 +n09409752 +n09410224 +n09411189 +n09411295 +n09415584 +n09415671 +n09416076 +n09416890 +n09421031 +n09421799 +n09421951 +n09422190 +n09422631 +n09425019 +n09425344 +n09428293 +n09428628 +n09429630 +n09432283 +n09432990 +n09433312 +n09433442 +n09433839 +n09435739 +n09436444 +n09436708 +n09437454 +n09438844 +n09438940 +n09439032 +n09439213 +n09442595 +n09443281 +n09443641 +n09444783 +n09445008 +n09445289 +n09447666 +n09448690 +n09450163 +n09451237 +n09452291 +n09452395 +n09452760 +n09453008 +n09454153 +n09454412 +n09454744 +n09456207 +n09457979 +n09458269 +n09459979 +n09460046 +n09461069 +n09462600 +n09463226 +n09464486 +n09466678 +n09467696 +n09468604 +n09470027 +n09470222 +n09472413 +n09472597 +n09474010 +n09474412 +n09474765 +n09475044 +n09475179 +n09475925 +n09476123 +n09478210 +n09480959 +n09481120 +n09493983 +n09495962 +n09505153 +n09537660 +n09556121 +n09605110 +n09606009 +n09606527 +n09607630 +n09607782 +n09607903 +n09608709 +n09610255 +n09610405 +n09611722 +n09612700 +n09613118 +n09613191 +n09613690 +n09615336 +n09616573 +n09616922 +n09617161 +n09617435 +n09617577 +n09617696 +n09618760 +n09618880 +n09618957 +n09619168 +n09619452 +n09620078 +n09620794 +n09621232 +n09622049 +n09622302 +n09624168 +n09624559 +n09624899 +n09625401 +n09626238 +n09627807 +n09627906 +n09629065 +n09629246 +n09629752 +n09631129 +n09632274 +n09632518 +n09633969 +n09635534 +n09635635 +n09635973 +n09636339 +n09637339 +n09638454 +n09638875 +n09639382 +n09639919 +n09640327 +n09640715 +n09641002 +n09641578 +n09643799 +n09644152 +n09644657 +n09648743 +n09648911 +n09649067 +n09650729 +n09650839 +n09650989 +n09651123 +n09651968 +n09652149 +n09653144 +n09653438 +n09654079 +n09654518 +n09654898 +n09655213 +n09655466 +n09656077 +n09657206 +n09657748 +n09658254 +n09658398 +n09658815 +n09658921 +n09659039 +n09659188 +n09660010 +n09660240 +n09661873 +n09662038 +n09662661 +n09662951 +n09663248 +n09663786 +n09663999 +n09664556 +n09664908 +n09665367 +n09665545 +n09666349 +n09666476 +n09666883 +n09667358 +n09668199 +n09668437 +n09668562 +n09668988 +n09669631 +n09670280 +n09670521 +n09670909 +n09671089 +n09672590 +n09672725 +n09672840 +n09673091 +n09674412 +n09674786 +n09675045 +n09675673 +n09675799 +n09675922 +n09676021 +n09676247 +n09676884 +n09677427 +n09678747 +n09679028 +n09679170 +n09679925 +n09680908 +n09681107 +n09681234 +n09681973 +n09683180 +n09683757 +n09683924 +n09684082 +n09684901 +n09685233 +n09685806 +n09686262 +n09686401 +n09688233 +n09688804 +n09689435 +n09689958 +n09690083 +n09690208 +n09690496 +n09690621 +n09690864 +n09691604 +n09691729 +n09691858 +n09692125 +n09692915 +n09693244 +n09693982 +n09694664 +n09694771 +n09695019 +n09695132 +n09695514 +n09695620 +n09695979 +n09696456 +n09696585 +n09696763 +n09697401 +n09697986 +n09698644 +n09699020 +n09699642 +n09700125 +n09700964 +n09701148 +n09701833 +n09702134 +n09702673 +n09703101 +n09703344 +n09703485 +n09703708 +n09703809 +n09703932 +n09704057 +n09704157 +n09704283 +n09705003 +n09705124 +n09705671 +n09705784 +n09706029 +n09706255 +n09707061 +n09707289 +n09707735 +n09708750 +n09708889 +n09709531 +n09709673 +n09710041 +n09710164 +n09710886 +n09711132 +n09711435 +n09712324 +n09712448 +n09712696 +n09712967 +n09713108 +n09714120 +n09714694 +n09715165 +n09715303 +n09715427 +n09716047 +n09716933 +n09717233 +n09718217 +n09718811 +n09718936 +n09719309 +n09719794 +n09720033 +n09720256 +n09720595 +n09720702 +n09720842 +n09721244 +n09721444 +n09722064 +n09722658 +n09722817 +n09723067 +n09723819 +n09723944 +n09724234 +n09724533 +n09724656 +n09724785 +n09725000 +n09725229 +n09725546 +n09725653 +n09725772 +n09725935 +n09726621 +n09726811 +n09727440 +n09727826 +n09728137 +n09728285 +n09729062 +n09729156 +n09730077 +n09730204 +n09730824 +n09731343 +n09731436 +n09731571 +n09732170 +n09733459 +n09733793 +n09734185 +n09734450 +n09734535 +n09734639 +n09735258 +n09735654 +n09736485 +n09736798 +n09736945 +n09737050 +n09737161 +n09737453 +n09738121 +n09738400 +n09740724 +n09741074 +n09741331 +n09741722 +n09741816 +n09741904 +n09741999 +n09742101 +n09742315 +n09742927 +n09743487 +n09743601 +n09743792 +n09744161 +n09744346 +n09744462 +n09744679 +n09744834 +n09745229 +n09745324 +n09745834 +n09745933 +n09746936 +n09747191 +n09747495 +n09748101 +n09748408 +n09748648 +n09748889 +n09749386 +n09750282 +n09750641 +n09750770 +n09750891 +n09751076 +n09751496 +n09751622 +n09751895 +n09752023 +n09752519 +n09753348 +n09753792 +n09754152 +n09754217 +n09754633 +n09754907 +n09755086 +n09755241 +n09755555 +n09755788 +n09755893 +n09756049 +n09756195 +n09756961 +n09757449 +n09758173 +n09758885 +n09759501 +n09760290 +n09760609 +n09760913 +n09761068 +n09761753 +n09762011 +n09762385 +n09763272 +n09763784 +n09764201 +n09764598 +n09764732 +n09764900 +n09765118 +n09765278 +n09767197 +n09769076 +n09769525 +n09769929 +n09770179 +n09770359 +n09771435 +n09772330 +n09772746 +n09772930 +n09773962 +n09774167 +n09774783 +n09775907 +n09776346 +n09776642 +n09776807 +n09777870 +n09778266 +n09778537 +n09778783 +n09778927 +n09779124 +n09779280 +n09779461 +n09779790 +n09780395 +n09780828 +n09780984 +n09781398 +n09781504 +n09781650 +n09782167 +n09782397 +n09782855 +n09783537 +n09783776 +n09783884 +n09784043 +n09784160 +n09784564 +n09785236 +n09785659 +n09785891 +n09786115 +n09787534 +n09787765 +n09788073 +n09788237 +n09789150 +n09789566 +n09789898 +n09790047 +n09790482 +n09791014 +n09791419 +n09791816 +n09792125 +n09792555 +n09792969 +n09793141 +n09793352 +n09793946 +n09794550 +n09794668 +n09795010 +n09795124 +n09795334 +n09796809 +n09796974 +n09797742 +n09797873 +n09797998 +n09798096 +n09800469 +n09800964 +n09801102 +n09801275 +n09801533 +n09802445 +n09802641 +n09802951 +n09804230 +n09805151 +n09805324 +n09805475 +n09806944 +n09807075 +n09808080 +n09808591 +n09809279 +n09809538 +n09809749 +n09809925 +n09810166 +n09811568 +n09811712 +n09811852 +n09813219 +n09814252 +n09814381 +n09814488 +n09814567 +n09814660 +n09815455 +n09815790 +n09816654 +n09816771 +n09817174 +n09817386 +n09818022 +n09819477 +n09820044 +n09820263 +n09821831 +n09822830 +n09823153 +n09823287 +n09823502 +n09823832 +n09824135 +n09824609 +n09825096 +n09825750 +n09826204 +n09826605 +n09826821 +n09827246 +n09827363 +n09828216 +n09828403 +n09828988 +n09830194 +n09830400 +n09830629 +n09830759 +n09830926 +n09831962 +n09832456 +n09832633 +n09832978 +n09833111 +n09833275 +n09833441 +n09833536 +n09833751 +n09833997 +n09834258 +n09834378 +n09834699 +n09834885 +n09835017 +n09835153 +n09835230 +n09835348 +n09835506 +n09836160 +n09836343 +n09836519 +n09836786 +n09837459 +n09837720 +n09838295 +n09838370 +n09838621 +n09839702 +n09840217 +n09840435 +n09840520 +n09841188 +n09841515 +n09841696 +n09842047 +n09842288 +n09842395 +n09842528 +n09842823 +n09843443 +n09843602 +n09843716 +n09843824 +n09844457 +n09844898 +n09845401 +n09845849 +n09846142 +n09846469 +n09846586 +n09846755 +n09846894 +n09847267 +n09847344 +n09847543 +n09848110 +n09848489 +n09849167 +n09849990 +n09850760 +n09850974 +n09851165 +n09851575 +n09853541 +n09853645 +n09853881 +n09854218 +n09854421 +n09854915 +n09855433 +n09856401 +n09856671 +n09856827 +n09857007 +n09858165 +n09858299 +n09858733 +n09859152 +n09859285 +n09859684 +n09859975 +n09861287 +n09861599 +n09861863 +n09861946 +n09862183 +n09862621 +n09863031 +n09863339 +n09863749 +n09863936 +n09864632 +n09864968 +n09865068 +n09865162 +n09865398 +n09865672 +n09865744 +n09866115 +n09866354 +n09866559 +n09866661 +n09866817 +n09866922 +n09867069 +n09867154 +n09867311 +n09868270 +n09868782 +n09868899 +n09869317 +n09869447 +n09869578 +n09870096 +n09871095 +n09871229 +n09871681 +n09871867 +n09871952 +n09872066 +n09872557 +n09873348 +n09873473 +n09873769 +n09873899 +n09874428 +n09874725 +n09874862 +n09875025 +n09875979 +n09876701 +n09877288 +n09877587 +n09877750 +n09877951 +n09878921 +n09879552 +n09880189 +n09880741 +n09881265 +n09881358 +n09881895 +n09883047 +n09883452 +n09883807 +n09885059 +n09885866 +n09886403 +n09886540 +n09888635 +n09889065 +n09889170 +n09889691 +n09889941 +n09890192 +n09890749 +n09891730 +n09892262 +n09892513 +n09892693 +n09893191 +n09893344 +n09893502 +n09893600 +n09894143 +n09894445 +n09894654 +n09894909 +n09895222 +n09895480 +n09895561 +n09895701 +n09895902 +n09896170 +n09896311 +n09896401 +n09896685 +n09896826 +n09898020 +n09899289 +n09899671 +n09899782 +n09899929 +n09901337 +n09901502 +n09901642 +n09901786 +n09901921 +n09902128 +n09902353 +n09902731 +n09902851 +n09902954 +n09903153 +n09903501 +n09903639 +n09903936 +n09904208 +n09904837 +n09905050 +n09905185 +n09905530 +n09906293 +n09906449 +n09906704 +n09907804 +n09908769 +n09909660 +n09909929 +n09910222 +n09910374 +n09910556 +n09910840 +n09911226 +n09912431 +n09912681 +n09912907 +n09912995 +n09913329 +n09913455 +n09913593 +n09915434 +n09915651 +n09916348 +n09917214 +n09917345 +n09917481 +n09917593 +n09918248 +n09918554 +n09918867 +n09919061 +n09919200 +n09919451 +n09919899 +n09920106 +n09920283 +n09920901 +n09921034 +n09923003 +n09923186 +n09923418 +n09923561 +n09923673 +n09923996 +n09924106 +n09924195 +n09924313 +n09924437 +n09924996 +n09927089 +n09927451 +n09928136 +n09928451 +n09928845 +n09929202 +n09929298 +n09929577 +n09930257 +n09930628 +n09930876 +n09931165 +n09931418 +n09931640 +n09932098 +n09932336 +n09932508 +n09932788 +n09933020 +n09933098 +n09933842 +n09933972 +n09934337 +n09934488 +n09934774 +n09935107 +n09935434 +n09936825 +n09936892 +n09937056 +n09937688 +n09937802 +n09937903 +n09938080 +n09938449 +n09938991 +n09940725 +n09940818 +n09941089 +n09941571 +n09941787 +n09941964 +n09942697 +n09942970 +n09943239 +n09943811 +n09944022 +n09944160 +n09944430 +n09945021 +n09945223 +n09945319 +n09945603 +n09945745 +n09946814 +n09947127 +n09950457 +n09950728 +n09951070 +n09951274 +n09951524 +n09951616 +n09952163 +n09953052 +n09953350 +n09953615 +n09954355 +n09954639 +n09955406 +n09955944 +n09956578 +n09957523 +n09958133 +n09958292 +n09958447 +n09958569 +n09959142 +n09959658 +n09960688 +n09961198 +n09961331 +n09961469 +n09961605 +n09961739 +n09962966 +n09964202 +n09964411 +n09965515 +n09965787 +n09966470 +n09966554 +n09967063 +n09967406 +n09967555 +n09967816 +n09967967 +n09968259 +n09968652 +n09968741 +n09968845 +n09970088 +n09970192 +n09970402 +n09970822 +n09971273 +n09971385 +n09971839 +n09972010 +n09972458 +n09972587 +n09974648 +n09975425 +n09976024 +n09976283 +n09976429 +n09976728 +n09976917 +n09978442 +n09979321 +n09979913 +n09980458 +n09980805 +n09980985 +n09981092 +n09981278 +n09981540 +n09981939 +n09982152 +n09982525 +n09983314 +n09983572 +n09983889 +n09984960 +n09985470 +n09985809 +n09985978 +n09986450 +n09986700 +n09986904 +n09987045 +n09987161 +n09987239 +n09988063 +n09988311 +n09988493 +n09988703 +n09989502 +n09990415 +n09990690 +n09990777 +n09991740 +n09991867 +n09992538 +n09992837 +n09993252 +n09993651 +n09994400 +n09994673 +n09994808 +n09994878 +n09995829 +n09996039 +n09996304 +n09996481 +n09997622 +n09998788 +n09999135 +n10000294 +n10000459 +n10000787 +n10001217 +n10001481 +n10001764 +n10002257 +n10002760 +n10003476 +n10004718 +n10005006 +n10005934 +n10006177 +n10006748 +n10007684 +n10007809 +n10007995 +n10008123 +n10008254 +n10009162 +n10009276 +n10009484 +n10009671 +n10010062 +n10010243 +n10010632 +n10010767 +n10010864 +n10011360 +n10011486 +n10012484 +n10013811 +n10015215 +n10015485 +n10015792 +n10015897 +n10017272 +n10017422 +n10018747 +n10018861 +n10019072 +n10019187 +n10019406 +n10020366 +n10020533 +n10020670 +n10020807 +n10020890 +n10022908 +n10023264 +n10023506 +n10023656 +n10024025 +n10024362 +n10024937 +n10025060 +n10025295 +n10025391 +n10025635 +n10026976 +n10027246 +n10027590 +n10028402 +n10028541 +n10029068 +n10030277 +n10032987 +n10033412 +n10033572 +n10033663 +n10033888 +n10034201 +n10034614 +n10035952 +n10036266 +n10036444 +n10036692 +n10036929 +n10037080 +n10037385 +n10037588 +n10037922 +n10038119 +n10038409 +n10038620 +n10039271 +n10039946 +n10040240 +n10040698 +n10040945 +n10041373 +n10041887 +n10042690 +n10042845 +n10043024 +n10043491 +n10043643 +n10044682 +n10044879 +n10047199 +n10047459 +n10048117 +n10048367 +n10048612 +n10048836 +n10049363 +n10050043 +n10050880 +n10051026 +n10051761 +n10051861 +n10051975 +n10052694 +n10053439 +n10053808 +n10054657 +n10055297 +n10055410 +n10055566 +n10055730 +n10055847 +n10056103 +n10056611 +n10056719 +n10057271 +n10058411 +n10058962 +n10059067 +n10060075 +n10060175 +n10060352 +n10061043 +n10061195 +n10061431 +n10061882 +n10062042 +n10062176 +n10062275 +n10062492 +n10062594 +n10062716 +n10062905 +n10062996 +n10063635 +n10063919 +n10064831 +n10064977 +n10065758 +n10066206 +n10066314 +n10067011 +n10067305 +n10067600 +n10067968 +n10068234 +n10068425 +n10069296 +n10069981 +n10070108 +n10070377 +n10070449 +n10070563 +n10070711 +n10071332 +n10071557 +n10072054 +n10074249 +n10074578 +n10074735 +n10074841 +n10075299 +n10075693 +n10076224 +n10076483 +n10076604 +n10076957 +n10077106 +n10077593 +n10077879 +n10078131 +n10078719 +n10078806 +n10079399 +n10079893 +n10080117 +n10080508 +n10080869 +n10081204 +n10081842 +n10082043 +n10082299 +n10082423 +n10082562 +n10082687 +n10082997 +n10083677 +n10083823 +n10084043 +n10084295 +n10085101 +n10085869 +n10086383 +n10086744 +n10087434 +n10087736 +n10088200 +n10090745 +n10091349 +n10091450 +n10091564 +n10091651 +n10091861 +n10091997 +n10092488 +n10092643 +n10092794 +n10092978 +n10093167 +n10093475 +n10093818 +n10094320 +n10094584 +n10094782 +n10095265 +n10095420 +n10095769 +n10095869 +n10096126 +n10096508 +n10097262 +n10097477 +n10097590 +n10097842 +n10097995 +n10098245 +n10098388 +n10098517 +n10098624 +n10098710 +n10098862 +n10099002 +n10099375 +n10101308 +n10101634 +n10101981 +n10102800 +n10103155 +n10103228 +n10103921 +n10104064 +n10104487 +n10104756 +n10104888 +n10105085 +n10105733 +n10105906 +n10106387 +n10106509 +n10106995 +n10107173 +n10107303 +n10108018 +n10108089 +n10108464 +n10108832 +n10109443 +n10109662 +n10109826 +n10110093 +n10110731 +n10110893 +n10111358 +n10111779 +n10111903 +n10112129 +n10113249 +n10113583 +n10113869 +n10114476 +n10114550 +n10114662 +n10115430 +n10115946 +n10116370 +n10116478 +n10116702 +n10117017 +n10117267 +n10117415 +n10117739 +n10117851 +n10118301 +n10118743 +n10118844 +n10119609 +n10120330 +n10120671 +n10121026 +n10121246 +n10121714 +n10121800 +n10122300 +n10122531 +n10123122 +n10123844 +n10126177 +n10126424 +n10126708 +n10127186 +n10127689 +n10128519 +n10128748 +n10129338 +n10129825 +n10130686 +n10130877 +n10131151 +n10131268 +n10131590 +n10131815 +n10132035 +n10132502 +n10134178 +n10134396 +n10134760 +n10134982 +n10135129 +n10135197 +n10135297 +n10136615 +n10136959 +n10137825 +n10138369 +n10138472 +n10139077 +n10139651 +n10140051 +n10140597 +n10140683 +n10140783 +n10140929 +n10141364 +n10141732 +n10142166 +n10142391 +n10142537 +n10142747 +n10142946 +n10143172 +n10143595 +n10143725 +n10144338 +n10145239 +n10145340 +n10145480 +n10145590 +n10145774 +n10145902 +n10146002 +n10146104 +n10146416 +n10146816 +n10146927 +n10147121 +n10147262 +n10147710 +n10147935 +n10148035 +n10148305 +n10148825 +n10149436 +n10149867 +n10150071 +n10150794 +n10150940 +n10151133 +n10151261 +n10151367 +n10151570 +n10151760 +n10152306 +n10152616 +n10152763 +n10153155 +n10153414 +n10153594 +n10153865 +n10154013 +n10154186 +n10154601 +n10155222 +n10155600 +n10155849 +n10156629 +n10156831 +n10157016 +n10157128 +n10157271 +n10158506 +n10159045 +n10159289 +n10159533 +n10160188 +n10160280 +n10160412 +n10161622 +n10162016 +n10162194 +n10162354 +n10164025 +n10164233 +n10164492 +n10165448 +n10166189 +n10166394 +n10167152 +n10167361 +n10167565 +n10167838 +n10168012 +n10168183 +n10168584 +n10168837 +n10169147 +n10169241 +n10169419 +n10169796 +n10170060 +n10170681 +n10170866 +n10171219 +n10171456 +n10171567 +n10172080 +n10173410 +n10173579 +n10173665 +n10173771 +n10174253 +n10174330 +n10174445 +n10174589 +n10174695 +n10174971 +n10175248 +n10175725 +n10176913 +n10177150 +n10178077 +n10178216 +n10179069 +n10180580 +n10180791 +n10180923 +n10181445 +n10181547 +n10181799 +n10181878 +n10182190 +n10182402 +n10183347 +n10183931 +n10184505 +n10185148 +n10185483 +n10185793 +n10186068 +n10186143 +n10186216 +n10186350 +n10186686 +n10186774 +n10187130 +n10187491 +n10187990 +n10188715 +n10188856 +n10188957 +n10189278 +n10189597 +n10190122 +n10190516 +n10191001 +n10191388 +n10191613 +n10192839 +n10193650 +n10194231 +n10194775 +n10195056 +n10195155 +n10195261 +n10195593 +n10196404 +n10196725 +n10197392 +n10198437 +n10198832 +n10199251 +n10200246 +n10200781 +n10202225 +n10202624 +n10202763 +n10203949 +n10204177 +n10204833 +n10205231 +n10205344 +n10205457 +n10205714 +n10206173 +n10206506 +n10206629 +n10207077 +n10207169 +n10208189 +n10208847 +n10208950 +n10209082 +n10209731 +n10210137 +n10210512 +n10210648 +n10210911 +n10211036 +n10211666 +n10211830 +n10212231 +n10212501 +n10212780 +n10213034 +n10213429 +n10214062 +n10214390 +n10215623 +n10216106 +n10216403 +n10217208 +n10218043 +n10218164 +n10218292 +n10219240 +n10219453 +n10219879 +n10220080 +n10220924 +n10221312 +n10221520 +n10222170 +n10222259 +n10222497 +n10222716 +n10223069 +n10223177 +n10223606 +n10224578 +n10225219 +n10225931 +n10226413 +n10227166 +n10227266 +n10227393 +n10227490 +n10227698 +n10227793 +n10227985 +n10228278 +n10228468 +n10228592 +n10228712 +n10229883 +n10230216 +n10233248 +n10235024 +n10235269 +n10235385 +n10236304 +n10236521 +n10236842 +n10237069 +n10237196 +n10237464 +n10237556 +n10237676 +n10237799 +n10238272 +n10238375 +n10239928 +n10240082 +n10240235 +n10240417 +n10240821 +n10241024 +n10241300 +n10242328 +n10243137 +n10243273 +n10243483 +n10243664 +n10243872 +n10244108 +n10244359 +n10244913 +n10245029 +n10245341 +n10245507 +n10245639 +n10245863 +n10246317 +n10246395 +n10246703 +n10247358 +n10247880 +n10248008 +n10248198 +n10248377 +n10249191 +n10249270 +n10249459 +n10249869 +n10249950 +n10250712 +n10251329 +n10251612 +n10252075 +n10252222 +n10252354 +n10252547 +n10253122 +n10253296 +n10253479 +n10253611 +n10253703 +n10255459 +n10257221 +n10258602 +n10258786 +n10259348 +n10259780 +n10259997 +n10260473 +n10260706 +n10260800 +n10261211 +n10261511 +n10261624 +n10261862 +n10262343 +n10262445 +n10262561 +n10262655 +n10262880 +n10263146 +n10263411 +n10263790 +n10265281 +n10265801 +n10265891 +n10266016 +n10266328 +n10266848 +n10267166 +n10267311 +n10267865 +n10268629 +n10269199 +n10269289 +n10271677 +n10272782 +n10272913 +n10273064 +n10274173 +n10274318 +n10274815 +n10275249 +n10275395 +n10275848 +n10276045 +n10276477 +n10276942 +n10277027 +n10277638 +n10277815 +n10277912 +n10278456 +n10279018 +n10279778 +n10280034 +n10280130 +n10280598 +n10280674 +n10281546 +n10281770 +n10281896 +n10282482 +n10282672 +n10283170 +n10283366 +n10283546 +n10284064 +n10284871 +n10284965 +n10286282 +n10286539 +n10286749 +n10288964 +n10289039 +n10289176 +n10289462 +n10289766 +n10290422 +n10290541 +n10290813 +n10290919 +n10291110 +n10291469 +n10291822 +n10291942 +n10292316 +n10293332 +n10293590 +n10293861 +n10294020 +n10294139 +n10295371 +n10295479 +n10296176 +n10296444 +n10297234 +n10297367 +n10297531 +n10297841 +n10298202 +n10298271 +n10298647 +n10298912 +n10299125 +n10299250 +n10299700 +n10299875 +n10300041 +n10300154 +n10300303 +n10300500 +n10300654 +n10300829 +n10302576 +n10302700 +n10302905 +n10303037 +n10303814 +n10304086 +n10304650 +n10304914 +n10305635 +n10305802 +n10306004 +n10306279 +n10306496 +n10306595 +n10306890 +n10307114 +n10308066 +n10308168 +n10308275 +n10308504 +n10308653 +n10308732 +n10310783 +n10311506 +n10311661 +n10312287 +n10312491 +n10312600 +n10313000 +n10313239 +n10313441 +n10313724 +n10314054 +n10314182 +n10314517 +n10314836 +n10315217 +n10315456 +n10315561 +n10315730 +n10316360 +n10316527 +n10316862 +n10317007 +n10317500 +n10317963 +n10318293 +n10318607 +n10318686 +n10319313 +n10320484 +n10320863 +n10321126 +n10321340 +n10321632 +n10321882 +n10322238 +n10323634 +n10323752 +n10323999 +n10324560 +n10325549 +n10325774 +n10326776 +n10327143 +n10327987 +n10328123 +n10328328 +n10328437 +n10328696 +n10328941 +n10329035 +n10330593 +n10330931 +n10331098 +n10331167 +n10331258 +n10331347 +n10331841 +n10332110 +n10332385 +n10332861 +n10332953 +n10333044 +n10333165 +n10333317 +n10333439 +n10333601 +n10333838 +n10334009 +n10334461 +n10334782 +n10335246 +n10335801 +n10335931 +n10336411 +n10336904 +n10337488 +n10338231 +n10338391 +n10339179 +n10339251 +n10339717 +n10340312 +n10341243 +n10341343 +n10341446 +n10341573 +n10341955 +n10342180 +n10342367 +n10342543 +n10342893 +n10342992 +n10343088 +n10343355 +n10343449 +n10343554 +n10343869 +n10344121 +n10344203 +n10344319 +n10344656 +n10344774 +n10345015 +n10345100 +n10345302 +n10345422 +n10345659 +n10346015 +n10347204 +n10347446 +n10348526 +n10349243 +n10349750 +n10349836 +n10350220 +n10350774 +n10351064 +n10353016 +n10353355 +n10353928 +n10354265 +n10354754 +n10355142 +n10355306 +n10355449 +n10355688 +n10355806 +n10356450 +n10356877 +n10357012 +n10357613 +n10357737 +n10358032 +n10358124 +n10358575 +n10359117 +n10359422 +n10359546 +n10359659 +n10360366 +n10360747 +n10361060 +n10361194 +n10361296 +n10361525 +n10362003 +n10362319 +n10362557 +n10363445 +n10363573 +n10364198 +n10364502 +n10365514 +n10366145 +n10366276 +n10366966 +n10368291 +n10368528 +n10368624 +n10368711 +n10368798 +n10369095 +n10369317 +n10369417 +n10369528 +n10369699 +n10369955 +n10370381 +n10370955 +n10371052 +n10371221 +n10371330 +n10371450 +n10373390 +n10373525 +n10374541 +n10374849 +n10374943 +n10375052 +n10375314 +n10375402 +n10376523 +n10376890 +n10377021 +n10377185 +n10377291 +n10377542 +n10377633 +n10378026 +n10378113 +n10378780 +n10379376 +n10380126 +n10380499 +n10380672 +n10381804 +n10381981 +n10382157 +n10382302 +n10382480 +n10382710 +n10382825 +n10383094 +n10383237 +n10383505 +n10383816 +n10384214 +n10384392 +n10384496 +n10385566 +n10386196 +n10386754 +n10386874 +n10386984 +n10387196 +n10387324 +n10387836 +n10389865 +n10389976 +n10390600 +n10390698 +n10390807 +n10391416 +n10393909 +n10394434 +n10394786 +n10395073 +n10395209 +n10395390 +n10395828 +n10396106 +n10396337 +n10396727 +n10396908 +n10397001 +n10397142 +n10397392 +n10399130 +n10400003 +n10400108 +n10400205 +n10400437 +n10400618 +n10400998 +n10401204 +n10401331 +n10401639 +n10402709 +n10402824 +n10403633 +n10403876 +n10404426 +n10404998 +n10405540 +n10405694 +n10406266 +n10406391 +n10406765 +n10407310 +n10407954 +n10408809 +n10409459 +n10409752 +n10410246 +n10410996 +n10411356 +n10411551 +n10411867 +n10414239 +n10414768 +n10414865 +n10415037 +n10416567 +n10417288 +n10417424 +n10417551 +n10417682 +n10417843 +n10417969 +n10418101 +n10418735 +n10419047 +n10419472 +n10419630 +n10419785 +n10420031 +n10420277 +n10420507 +n10420649 +n10421016 +n10421470 +n10421956 +n10422405 +n10425946 +n10426454 +n10426630 +n10427223 +n10427359 +n10427764 +n10428004 +n10431122 +n10431625 +n10432189 +n10432441 +n10432875 +n10432957 +n10433077 +n10433452 +n10433610 +n10433737 +n10435169 +n10435251 +n10435716 +n10435988 +n10436334 +n10437014 +n10437137 +n10437262 +n10437698 +n10438172 +n10438619 +n10438842 +n10439373 +n10439523 +n10439727 +n10439851 +n10441037 +n10441124 +n10441694 +n10441962 +n10442093 +n10442232 +n10442417 +n10442573 +n10443032 +n10443659 +n10443830 +n10444194 +n10448322 +n10448455 +n10449664 +n10450038 +n10450161 +n10450303 +n10451450 +n10451590 +n10451858 +n10453184 +n10455619 +n10456070 +n10456138 +n10456696 +n10457214 +n10457444 +n10457903 +n10458111 +n10458356 +n10458596 +n10459882 +n10460033 +n10461060 +n10462588 +n10462751 +n10462860 +n10464052 +n10464542 +n10464711 +n10464870 +n10465002 +n10465451 +n10465831 +n10466198 +n10466564 +n10466918 +n10467179 +n10467395 +n10468750 +n10469611 +n10469874 +n10470779 +n10471640 +n10471732 +n10471859 +n10472129 +n10472447 +n10473453 +n10473562 +n10473789 +n10473917 +n10474064 +n10474343 +n10474446 +n10474645 +n10475835 +n10475940 +n10476467 +n10477713 +n10477955 +n10478118 +n10478293 +n10478462 +n10478827 +n10478960 +n10479135 +n10479328 +n10481167 +n10481268 +n10482054 +n10482220 +n10482587 +n10482921 +n10483138 +n10483395 +n10483799 +n10483890 +n10484858 +n10485298 +n10485883 +n10486166 +n10486236 +n10486561 +n10487182 +n10487363 +n10487592 +n10488016 +n10488309 +n10488656 +n10489426 +n10490421 +n10491998 +n10492086 +n10492727 +n10493199 +n10493419 +n10493685 +n10493835 +n10493922 +n10494195 +n10494373 +n10495167 +n10495421 +n10495555 +n10495756 +n10496393 +n10496489 +n10497135 +n10497534 +n10497645 +n10498046 +n10498699 +n10498816 +n10498986 +n10499110 +n10499232 +n10499355 +n10499631 +n10499857 +n10500217 +n10500419 +n10500603 +n10500824 +n10500942 +n10501453 +n10501635 +n10502046 +n10502329 +n10502950 +n10503818 +n10504090 +n10504206 +n10505347 +n10505613 +n10505732 +n10505942 +n10506336 +n10506544 +n10506915 +n10507070 +n10507380 +n10507482 +n10507565 +n10507692 +n10508141 +n10508379 +n10508710 +n10509063 +n10509161 +n10509810 +n10510245 +n10510974 +n10511771 +n10512201 +n10512372 +n10512708 +n10512859 +n10513509 +n10513823 +n10513938 +n10514051 +n10514121 +n10514255 +n10514429 +n10514784 +n10515863 +n10516527 +n10517137 +n10517283 +n10518349 +n10519126 +n10519494 +n10519984 +n10520286 +n10520544 +n10520964 +n10521100 +n10521662 +n10521853 +n10522035 +n10522324 +n10522759 +n10523341 +n10524076 +n10524223 +n10524869 +n10525134 +n10525436 +n10525617 +n10525878 +n10526534 +n10527147 +n10527334 +n10528023 +n10528148 +n10528493 +n10529231 +n10530150 +n10530383 +n10530571 +n10530959 +n10531109 +n10531445 +n10531838 +n10533874 +n10533983 +n10536134 +n10536274 +n10536416 +n10537708 +n10537906 +n10538629 +n10538733 +n10538853 +n10539015 +n10539160 +n10539278 +n10540114 +n10540252 +n10540656 +n10541833 +n10542608 +n10542761 +n10542888 +n10543161 +n10543937 +n10544232 +n10544748 +n10545792 +n10546428 +n10546633 +n10548419 +n10548537 +n10548681 +n10549510 +n10550252 +n10550369 +n10550468 +n10551576 +n10552393 +n10553140 +n10553235 +n10554024 +n10554141 +n10554846 +n10555059 +n10555430 +n10556033 +n10556518 +n10556704 +n10556825 +n10557246 +n10557854 +n10559009 +n10559288 +n10559508 +n10559683 +n10559996 +n10560106 +n10560637 +n10561222 +n10561320 +n10561736 +n10562135 +n10562283 +n10562509 +n10562968 +n10563314 +n10563403 +n10563711 +n10564098 +n10565502 +n10565667 +n10566072 +n10567613 +n10567722 +n10567848 +n10568200 +n10568358 +n10568443 +n10568608 +n10568915 +n10569011 +n10569179 +n10570019 +n10570704 +n10571907 +n10572706 +n10572889 +n10573957 +n10574311 +n10574538 +n10574840 +n10575463 +n10575594 +n10575787 +n10576223 +n10576316 +n10576676 +n10576818 +n10576962 +n10577182 +n10577284 +n10577710 +n10577820 +n10578021 +n10578162 +n10578471 +n10578656 +n10579062 +n10579549 +n10580030 +n10580437 +n10580535 +n10581648 +n10581890 +n10582604 +n10582746 +n10583387 +n10583790 +n10585077 +n10585217 +n10585628 +n10586166 +n10586265 +n10586444 +n10586903 +n10586998 +n10588074 +n10588357 +n10588724 +n10588965 +n10589666 +n10590146 +n10590239 +n10590452 +n10590903 +n10591072 +n10591811 +n10592049 +n10592811 +n10593521 +n10594147 +n10594523 +n10594857 +n10595164 +n10595647 +n10596517 +n10596899 +n10597505 +n10597745 +n10597889 +n10598013 +n10598181 +n10598459 +n10598904 +n10599215 +n10599806 +n10601234 +n10601362 +n10602119 +n10602470 +n10602985 +n10603528 +n10603851 +n10604275 +n10604380 +n10604634 +n10604880 +n10604979 +n10605253 +n10605737 +n10607291 +n10607478 +n10609092 +n10609198 +n10610465 +n10610850 +n10611267 +n10611613 +n10612210 +n10612373 +n10612518 +n10613996 +n10614507 +n10614629 +n10615179 +n10615334 +n10616578 +n10617024 +n10617193 +n10617397 +n10618234 +n10618342 +n10618465 +n10618685 +n10618848 +n10619492 +n10619642 +n10619888 +n10620212 +n10620586 +n10620758 +n10621294 +n10621400 +n10621514 +n10622053 +n10624074 +n10624310 +n10624437 +n10624540 +n10625860 +n10626630 +n10627252 +n10628097 +n10628644 +n10629329 +n10629647 +n10629939 +n10630093 +n10630188 +n10631131 +n10631309 +n10631654 +n10632576 +n10633298 +n10633450 +n10634464 +n10634849 +n10634990 +n10635788 +n10636488 +n10637483 +n10638922 +n10639238 +n10639359 +n10639637 +n10639817 +n10641223 +n10642596 +n10642705 +n10643095 +n10643837 +n10643937 +n10644598 +n10645017 +n10645223 +n10646032 +n10646140 +n10646433 +n10646641 +n10646780 +n10646942 +n10647745 +n10648237 +n10648696 +n10649197 +n10649308 +n10650162 +n10652605 +n10652703 +n10654015 +n10654211 +n10654321 +n10654827 +n10654932 +n10655169 +n10655442 +n10655594 +n10655730 +n10655986 +n10656120 +n10656223 +n10656969 +n10657306 +n10657556 +n10657835 +n10658304 +n10659042 +n10659762 +n10660128 +n10660621 +n10660883 +n10661002 +n10661216 +n10661563 +n10661732 +n10663315 +n10663549 +n10665302 +n10665587 +n10665698 +n10666752 +n10667477 +n10667709 +n10667863 +n10668450 +n10668666 +n10669991 +n10671042 +n10671613 +n10671736 +n10671898 +n10672371 +n10672540 +n10672662 +n10673296 +n10673776 +n10674130 +n10674713 +n10675010 +n10675142 +n10675609 +n10676018 +n10676434 +n10676569 +n10678937 +n10679174 +n10679503 +n10679610 +n10679723 +n10680609 +n10680796 +n10681194 +n10681557 +n10682713 +n10682953 +n10683675 +n10684146 +n10684630 +n10684827 +n10685398 +n10686073 +n10686517 +n10686694 +n10686885 +n10688356 +n10688811 +n10689306 +n10690268 +n10690421 +n10690648 +n10691318 +n10691937 +n10692090 +n10692482 +n10692883 +n10693235 +n10693334 +n10693824 +n10694258 +n10694939 +n10695450 +n10696101 +n10696508 +n10697135 +n10697282 +n10698368 +n10699558 +n10699752 +n10699981 +n10700105 +n10700201 +n10700640 +n10700963 +n10701180 +n10701644 +n10701962 +n10702167 +n10702615 +n10703221 +n10703336 +n10703480 +n10703692 +n10704238 +n10704712 +n10704886 +n10705448 +n10705615 +n10706812 +n10707134 +n10707233 +n10707707 +n10708292 +n10708454 +n10709529 +n10710171 +n10710259 +n10710778 +n10710913 +n10711483 +n10711766 +n10712229 +n10712374 +n10712474 +n10712690 +n10712835 +n10713254 +n10713686 +n10713843 +n10714195 +n10715030 +n10715347 +n10715789 +n10716576 +n10716864 +n10717055 +n10717196 +n10717337 +n10718131 +n10718349 +n10718509 +n10718665 +n10718952 +n10719036 +n10719132 +n10719267 +n10719807 +n10720197 +n10720453 +n10720964 +n10721124 +n10721321 +n10721612 +n10721708 +n10721819 +n10722029 +n10722575 +n10722965 +n10723230 +n10723597 +n10724132 +n10724372 +n10724570 +n10725280 +n10726031 +n10726786 +n10727016 +n10727171 +n10727458 +n10728117 +n10728233 +n10728624 +n10728998 +n10729330 +n10730542 +n10730728 +n10731013 +n10731732 +n10732010 +n10732521 +n10732854 +n10732967 +n10733820 +n10734394 +n10734741 +n10734891 +n10734963 +n10735173 +n10735298 +n10735984 +n10737103 +n10737264 +n10738111 +n10738215 +n10738670 +n10738871 +n10739135 +n10739297 +n10739391 +n10740594 +n10740732 +n10740868 +n10741152 +n10741367 +n10741493 +n10742005 +n10742111 +n10742546 +n10742997 +n10743124 +n10743356 +n10744078 +n10744164 +n10745006 +n10745770 +n10746931 +n10747119 +n10747424 +n10747548 +n10747965 +n10748142 +n10748506 +n10748620 +n10749928 +n10750031 +n10750188 +n10750640 +n10751026 +n10751152 +n10751265 +n10751710 +n10752480 +n10753061 +n10753182 +n10753339 +n10753442 +n10753989 +n10754189 +n10754281 +n10754449 +n10755080 +n10755164 +n10755394 +n10755648 +n10756061 +n10756148 +n10756261 +n10756641 +n10756837 +n10757050 +n10757492 +n10758337 +n10758445 +n10758949 +n10759151 +n10759331 +n10759982 +n10760199 +n10760622 +n10760951 +n10761190 +n10761326 +n10761519 +n10762212 +n10762480 +n10763075 +n10763245 +n10763383 +n10763620 +n10764465 +n10764622 +n10764719 +n10765305 +n10765587 +n10765679 +n10765885 +n10766260 +n10768148 +n10768272 +n10768903 +n10769084 +n10769188 +n10769321 +n10769459 +n10771066 +n10772092 +n10772580 +n10772937 +n10773665 +n10773800 +n10774329 +n10774756 +n10775003 +n10775128 +n10776052 +n10776339 +n10776887 +n10777299 +n10778044 +n10778148 +n10778711 +n10778999 +n10779610 +n10779897 +n10779995 +n10780284 +n10780632 +n10781236 +n10781817 +n10782362 +n10782471 +n10782791 +n10782940 +n10783240 +n10783539 +n10783646 +n10783734 +n10784113 +n10784544 +n10784922 +n10785480 +n10787470 +n10788852 +n10789415 +n10789709 +n10791115 +n10791221 +n10791820 +n10791890 +n10792335 +n10792506 +n10792856 +n10793570 +n10793799 +n10794014 +n10801561 +n10801802 +n10802507 +n10802621 +n10802953 +n10803031 +n10803282 +n10803978 +n10804287 +n10804636 +n10804732 +n10805501 +n10806113 +n10994097 +n11100798 +n11196627 +n11242849 +n11318824 +n11346873 +n11448153 +n11487732 +n11508382 +n11511327 +n11524451 +n11530008 +n11531193 +n11531334 +n11532682 +n11533212 +n11533999 +n11536567 +n11536673 +n11537327 +n11539289 +n11542137 +n11542640 +n11544015 +n11545350 +n11545524 +n11545714 +n11547562 +n11547855 +n11548728 +n11548870 +n11549009 +n11549245 +n11549779 +n11549895 +n11552133 +n11552386 +n11552594 +n11552806 +n11552976 +n11553240 +n11553522 +n11596108 +n11597657 +n11598287 +n11598686 +n11598886 +n11599324 +n11600372 +n11601177 +n11601333 +n11601918 +n11602091 +n11602478 +n11602873 +n11603246 +n11603462 +n11603835 +n11604046 +n11608250 +n11609475 +n11609684 +n11609862 +n11610047 +n11610215 +n11610437 +n11610602 +n11610823 +n11611087 +n11611233 +n11611356 +n11611561 +n11611758 +n11612018 +n11612235 +n11612349 +n11612575 +n11612923 +n11613219 +n11613459 +n11613692 +n11613867 +n11614039 +n11614250 +n11614420 +n11614713 +n11615026 +n11615259 +n11615387 +n11615607 +n11615812 +n11615967 +n11616260 +n11616486 +n11616662 +n11616852 +n11617090 +n11617272 +n11617631 +n11617878 +n11618079 +n11618290 +n11618525 +n11618861 +n11619227 +n11619455 +n11619687 +n11619845 +n11620016 +n11620389 +n11620673 +n11621029 +n11621281 +n11621547 +n11621727 +n11621950 +n11622184 +n11622368 +n11622591 +n11622771 +n11623105 +n11623815 +n11623967 +n11624192 +n11624531 +n11625003 +n11625223 +n11625391 +n11625632 +n11625804 +n11626010 +n11626152 +n11626409 +n11626585 +n11626826 +n11627168 +n11627512 +n11627714 +n11627908 +n11628087 +n11628456 +n11628793 +n11629047 +n11629354 +n11630017 +n11630489 +n11631159 +n11631405 +n11631619 +n11631854 +n11631985 +n11632167 +n11632376 +n11632619 +n11632929 +n11633284 +n11634736 +n11635152 +n11635433 +n11635830 +n11636204 +n11636835 +n11639084 +n11639306 +n11639445 +n11640132 +n11643835 +n11644046 +n11644226 +n11644462 +n11644872 +n11645163 +n11645590 +n11645914 +n11646167 +n11646344 +n11646517 +n11646694 +n11646955 +n11647306 +n11647703 +n11647868 +n11648039 +n11648268 +n11648776 +n11649150 +n11649359 +n11649878 +n11650160 +n11650307 +n11650430 +n11650558 +n11650759 +n11652039 +n11652217 +n11652376 +n11652578 +n11652753 +n11652966 +n11653126 +n11653570 +n11653904 +n11654293 +n11654438 +n11654984 +n11655152 +n11655592 +n11655974 +n11656123 +n11656549 +n11656771 +n11657585 +n11658331 +n11658544 +n11658709 +n11659248 +n11659627 +n11660300 +n11661372 +n11661909 +n11662128 +n11662371 +n11662585 +n11662937 +n11663263 +n11664418 +n11665372 +n11666854 +n11668117 +n11669786 +n11669921 +n11672269 +n11672400 +n11674019 +n11674332 +n11675025 +n11675404 +n11675738 +n11676500 +n11676743 +n11676850 +n11677485 +n11677902 +n11678010 +n11678299 +n11678377 +n11679378 +n11680457 +n11680596 +n11682659 +n11683216 +n11683838 +n11684264 +n11684499 +n11684654 +n11685091 +n11685621 +n11686195 +n11686652 +n11686780 +n11686912 +n11687071 +n11687432 +n11687789 +n11687964 +n11688069 +n11688378 +n11689197 +n11689367 +n11689483 +n11689678 +n11689815 +n11689957 +n11690088 +n11690254 +n11690455 +n11691046 +n11691857 +n11692265 +n11692792 +n11693981 +n11694300 +n11694469 +n11694664 +n11694866 +n11695085 +n11695285 +n11695599 +n11695974 +n11696450 +n11696935 +n11697560 +n11697802 +n11698042 +n11698245 +n11699442 +n11699751 +n11700058 +n11700279 +n11700864 +n11701066 +n11701302 +n11702713 +n11703669 +n11704093 +n11704620 +n11704791 +n11705171 +n11705387 +n11705573 +n11705776 +n11706325 +n11706761 +n11706942 +n11707229 +n11707827 +n11708658 +n11708857 +n11709045 +n11709205 +n11709674 +n11710136 +n11710393 +n11710658 +n11710827 +n11710987 +n11711289 +n11711537 +n11711764 +n11711971 +n11712282 +n11713164 +n11713370 +n11713763 +n11714382 +n11715430 +n11715678 +n11716698 +n11717399 +n11717577 +n11718296 +n11718681 +n11719286 +n11720353 +n11720643 +n11720891 +n11721337 +n11721642 +n11722036 +n11722342 +n11722466 +n11722621 +n11722982 +n11723227 +n11723452 +n11723770 +n11723986 +n11724109 +n11724660 +n11725015 +n11725311 +n11725480 +n11725623 +n11725821 +n11725973 +n11726145 +n11726269 +n11726433 +n11726707 +n11727091 +n11727358 +n11727540 +n11727738 +n11728099 +n11728769 +n11728945 +n11729142 +n11729478 +n11729860 +n11730015 +n11730458 +n11730602 +n11730750 +n11730933 +n11731157 +n11731659 +n11732052 +n11732567 +n11733054 +n11733312 +n11733548 +n11734493 +n11734698 +n11735053 +n11735570 +n11735977 +n11736362 +n11736694 +n11736851 +n11737009 +n11737125 +n11737534 +n11738547 +n11738997 +n11739365 +n11739978 +n11740414 +n11741175 +n11741350 +n11741575 +n11741797 +n11742310 +n11742878 +n11744011 +n11744108 +n11744471 +n11745817 +n11746600 +n11747468 +n11748002 +n11748811 +n11749112 +n11749603 +n11750173 +n11750508 +n11750989 +n11751765 +n11751974 +n11752578 +n11752798 +n11752937 +n11753143 +n11753355 +n11753562 +n11753700 +n11754893 +n11756092 +n11756329 +n11756669 +n11756870 +n11757017 +n11757190 +n11757653 +n11757851 +n11758122 +n11758276 +n11758483 +n11758799 +n11759224 +n11759404 +n11759609 +n11759853 +n11760785 +n11761202 +n11761650 +n11761836 +n11762018 +n11762433 +n11762927 +n11763142 +n11763625 +n11763874 +n11764478 +n11764814 +n11765568 +n11766046 +n11766189 +n11766432 +n11767354 +n11767877 +n11768816 +n11769176 +n11769621 +n11769803 +n11770256 +n11771147 +n11771539 +n11771746 +n11771924 +n11772408 +n11772879 +n11773408 +n11773628 +n11773987 +n11774513 +n11774972 +n11775340 +n11775626 +n11776234 +n11777080 +n11778092 +n11778257 +n11779300 +n11780148 +n11780424 +n11781176 +n11782036 +n11782266 +n11782761 +n11782878 +n11783162 +n11783920 +n11784126 +n11784497 +n11785276 +n11785668 +n11785875 +n11786131 +n11786539 +n11786843 +n11787190 +n11788039 +n11788727 +n11789066 +n11789438 +n11789589 +n11789962 +n11790089 +n11790788 +n11790936 +n11791341 +n11791569 +n11792029 +n11792341 +n11792742 +n11793403 +n11793779 +n11794024 +n11794139 +n11794519 +n11795049 +n11795216 +n11795580 +n11796005 +n11796188 +n11797321 +n11797508 +n11797981 +n11798270 +n11798496 +n11798688 +n11798978 +n11799331 +n11799732 +n11800236 +n11800565 +n11801392 +n11801665 +n11801891 +n11802410 +n11802586 +n11802800 +n11802995 +n11805255 +n11805544 +n11805956 +n11806219 +n11806369 +n11806521 +n11806679 +n11806814 +n11807108 +n11807525 +n11807696 +n11807979 +n11808299 +n11808468 +n11808721 +n11808932 +n11809094 +n11809271 +n11809437 +n11809594 +n11809754 +n11810030 +n11810358 +n11811059 +n11811473 +n11811706 +n11811921 +n11812094 +n11812910 +n11813077 +n11814584 +n11814996 +n11815491 +n11815721 +n11815918 +n11816121 +n11816336 +n11816649 +n11816829 +n11817160 +n11817501 +n11817914 +n11818069 +n11818636 +n11819509 +n11819912 +n11820965 +n11821184 +n11822300 +n11823043 +n11823305 +n11823436 +n11823756 +n11824146 +n11824344 +n11824747 +n11825351 +n11825749 +n11826198 +n11826569 +n11827541 +n11828577 +n11828973 +n11829205 +n11829672 +n11829922 +n11830045 +n11830252 +n11830400 +n11830714 +n11830906 +n11831100 +n11831297 +n11831521 +n11832214 +n11832480 +n11832671 +n11832899 +n11833373 +n11833749 +n11834272 +n11834654 +n11834890 +n11835251 +n11836327 +n11836722 +n11837204 +n11837351 +n11837562 +n11837743 +n11837970 +n11838413 +n11838916 +n11839460 +n11839568 +n11839823 +n11840067 +n11840246 +n11840476 +n11840764 +n11841247 +n11843441 +n11844371 +n11844892 +n11845557 +n11845793 +n11845913 +n11846312 +n11846425 +n11846765 +n11847169 +n11848479 +n11848867 +n11849271 +n11849467 +n11849871 +n11849983 +n11850521 +n11850918 +n11851258 +n11851578 +n11851839 +n11852028 +n11852148 +n11852531 +n11853079 +n11853356 +n11853813 +n11854479 +n11855274 +n11855435 +n11855553 +n11855842 +n11856573 +n11857696 +n11857875 +n11858077 +n11858703 +n11858814 +n11859275 +n11859472 +n11859737 +n11860208 +n11860555 +n11861238 +n11861487 +n11861641 +n11861853 +n11862835 +n11863467 +n11863877 +n11865071 +n11865276 +n11865429 +n11865574 +n11865874 +n11866248 +n11866706 +n11867311 +n11868814 +n11869351 +n11869689 +n11870044 +n11870418 +n11870747 +n11871059 +n11871496 +n11871748 +n11872146 +n11872324 +n11872658 +n11873182 +n11873612 +n11874081 +n11874423 +n11874878 +n11875523 +n11875691 +n11875938 +n11876204 +n11876432 +n11876634 +n11876803 +n11877193 +n11877283 +n11877473 +n11877646 +n11877860 +n11878101 +n11878283 +n11878633 +n11879054 +n11879722 +n11879895 +n11881189 +n11882074 +n11882237 +n11882426 +n11882636 +n11882821 +n11882972 +n11883328 +n11883628 +n11883945 +n11884384 +n11884967 +n11885856 +n11887119 +n11887310 +n11887476 +n11887750 +n11888061 +n11888424 +n11888800 +n11889205 +n11889619 +n11890022 +n11890150 +n11890884 +n11891175 +n11892029 +n11892181 +n11892637 +n11892817 +n11893640 +n11893916 +n11894327 +n11894558 +n11894770 +n11895092 +n11895472 +n11895714 +n11896141 +n11896722 +n11897116 +n11897466 +n11898639 +n11898775 +n11899223 +n11899762 +n11899921 +n11900569 +n11901294 +n11901452 +n11901597 +n11901759 +n11901977 +n11902200 +n11902389 +n11902709 +n11902982 +n11903333 +n11903671 +n11904109 +n11904274 +n11905392 +n11905749 +n11906127 +n11906514 +n11906917 +n11907100 +n11907405 +n11907689 +n11908549 +n11908846 +n11909864 +n11910271 +n11910460 +n11910666 +n11915214 +n11915658 +n11915899 +n11916467 +n11916696 +n11917407 +n11917835 +n11918286 +n11918473 +n11918808 +n11919447 +n11919761 +n11919975 +n11920133 +n11920498 +n11920663 +n11920998 +n11921395 +n11921792 +n11922661 +n11922755 +n11922839 +n11922926 +n11923174 +n11923397 +n11923637 +n11924014 +n11924445 +n11924849 +n11925303 +n11925450 +n11925898 +n11926365 +n11926833 +n11926976 +n11927215 +n11927740 +n11928352 +n11928858 +n11929743 +n11930038 +n11930203 +n11930353 +n11930571 +n11930788 +n11930994 +n11931135 +n11931540 +n11931918 +n11932745 +n11932927 +n11933099 +n11933257 +n11933387 +n11933546 +n11933728 +n11933903 +n11934041 +n11934239 +n11934463 +n11934616 +n11934807 +n11935027 +n11935187 +n11935330 +n11935469 +n11935627 +n11935715 +n11935794 +n11935877 +n11935953 +n11936027 +n11936113 +n11936199 +n11936287 +n11936369 +n11936448 +n11936539 +n11936624 +n11936707 +n11936782 +n11936864 +n11936946 +n11937023 +n11937102 +n11937195 +n11937278 +n11937360 +n11937446 +n11937692 +n11938556 +n11939180 +n11939491 +n11939699 +n11940006 +n11940349 +n11940599 +n11940750 +n11941094 +n11941478 +n11941924 +n11942659 +n11943133 +n11943407 +n11943660 +n11943992 +n11944196 +n11944751 +n11944954 +n11945367 +n11945514 +n11945783 +n11946051 +n11946313 +n11946727 +n11946918 +n11947251 +n11947629 +n11947802 +n11948044 +n11948264 +n11948469 +n11948864 +n11949015 +n11949402 +n11949857 +n11950345 +n11950686 +n11950877 +n11951052 +n11951511 +n11951820 +n11952346 +n11952541 +n11953038 +n11953339 +n11953610 +n11953884 +n11954161 +n11954345 +n11954484 +n11954642 +n11954798 +n11955040 +n11955153 +n11955532 +n11955896 +n11956348 +n11956850 +n11957317 +n11957514 +n11957678 +n11958080 +n11958499 +n11958888 +n11959259 +n11959632 +n11959862 +n11960245 +n11960673 +n11961100 +n11961446 +n11961871 +n11962272 +n11962667 +n11962994 +n11963572 +n11963932 +n11964446 +n11964848 +n11965218 +n11965627 +n11965962 +n11966083 +n11966215 +n11966385 +n11966617 +n11966896 +n11967142 +n11967315 +n11967744 +n11967878 +n11968519 +n11968704 +n11968931 +n11969166 +n11969607 +n11969806 +n11970101 +n11970298 +n11970586 +n11971248 +n11971406 +n11971783 +n11971927 +n11972291 +n11972759 +n11972959 +n11973341 +n11973634 +n11973749 +n11974373 +n11974557 +n11974888 +n11975254 +n11976170 +n11976314 +n11976511 +n11976933 +n11977303 +n11977660 +n11977887 +n11978233 +n11978551 +n11978713 +n11978961 +n11979187 +n11979354 +n11979527 +n11979715 +n11979964 +n11980318 +n11980682 +n11981192 +n11981475 +n11982115 +n11982545 +n11982939 +n11983375 +n11983606 +n11984144 +n11984542 +n11985053 +n11985321 +n11985739 +n11985903 +n11986511 +n11986729 +n11987126 +n11987349 +n11987511 +n11988132 +n11988596 +n11988893 +n11989087 +n11989393 +n11989869 +n11990167 +n11990313 +n11990627 +n11990920 +n11991263 +n11991549 +n11991777 +n11992479 +n11992806 +n11993203 +n11993444 +n11993675 +n11994150 +n11995092 +n11995396 +n11996251 +n11996677 +n11997032 +n11997160 +n11997969 +n11998492 +n11998888 +n11999278 +n11999656 +n12000191 +n12001294 +n12001707 +n12001924 +n12002428 +n12002651 +n12002826 +n12003167 +n12003696 +n12004120 +n12004547 +n12004987 +n12005656 +n12006306 +n12006766 +n12006930 +n12007196 +n12007406 +n12007766 +n12008252 +n12008487 +n12008749 +n12009047 +n12009420 +n12009792 +n12010628 +n12010815 +n12011370 +n12011620 +n12012111 +n12012253 +n12012510 +n12013035 +n12013511 +n12013701 +n12014085 +n12014355 +n12014923 +n12015221 +n12015525 +n12015959 +n12016434 +n12016567 +n12016777 +n12016914 +n12017127 +n12017326 +n12017511 +n12017664 +n12017853 +n12018014 +n12018100 +n12018188 +n12018271 +n12018363 +n12018447 +n12018530 +n12018760 +n12019035 +n12019827 +n12020184 +n12020507 +n12020736 +n12020941 +n12022054 +n12022382 +n12022821 +n12023108 +n12023407 +n12023726 +n12024176 +n12024445 +n12024690 +n12024805 +n12025220 +n12026018 +n12026476 +n12026981 +n12027222 +n12027658 +n12028424 +n12029039 +n12029635 +n12030092 +n12030654 +n12030908 +n12031139 +n12031388 +n12031547 +n12031927 +n12032429 +n12032686 +n12033139 +n12033504 +n12033709 +n12034141 +n12034384 +n12034594 +n12035631 +n12035907 +n12036067 +n12036226 +n12036939 +n12037499 +n12037691 +n12038038 +n12038208 +n12038406 +n12038585 +n12038760 +n12038898 +n12039317 +n12041446 +n12043444 +n12043673 +n12043836 +n12044041 +n12044467 +n12044784 +n12045157 +n12045514 +n12045860 +n12046028 +n12046428 +n12046815 +n12047345 +n12047884 +n12048056 +n12048399 +n12048928 +n12049282 +n12049562 +n12050533 +n12050959 +n12051103 +n12051514 +n12051792 +n12052267 +n12052447 +n12052787 +n12053405 +n12053690 +n12053962 +n12054195 +n12055073 +n12055516 +n12056099 +n12056217 +n12056601 +n12056758 +n12056990 +n12057211 +n12057447 +n12057660 +n12057895 +n12058192 +n12058630 +n12058822 +n12059314 +n12059625 +n12060546 +n12061104 +n12061380 +n12061614 +n12062105 +n12062468 +n12062626 +n12062781 +n12063211 +n12063639 +n12064389 +n12064591 +n12065316 +n12065649 +n12065777 +n12066018 +n12066261 +n12066451 +n12066630 +n12066821 +n12067029 +n12067193 +n12067433 +n12067672 +n12067817 +n12068138 +n12068432 +n12068615 +n12069009 +n12069217 +n12069679 +n12070016 +n12070381 +n12070583 +n12070712 +n12071259 +n12071477 +n12071744 +n12072210 +n12072722 +n12073217 +n12073554 +n12073991 +n12074408 +n12074867 +n12075010 +n12075151 +n12075299 +n12075830 +n12076223 +n12076577 +n12076852 +n12077244 +n12077944 +n12078172 +n12078451 +n12078747 +n12079120 +n12079523 +n12079963 +n12080395 +n12080588 +n12080820 +n12081215 +n12081649 +n12082131 +n12083113 +n12083591 +n12083847 +n12084158 +n12084400 +n12084555 +n12084890 +n12085267 +n12085664 +n12086012 +n12086192 +n12086539 +n12086778 +n12087961 +n12088223 +n12088327 +n12088495 +n12088909 +n12089320 +n12089496 +n12089846 +n12090890 +n12091213 +n12091377 +n12091550 +n12091697 +n12091953 +n12092262 +n12092417 +n12092629 +n12092930 +n12093329 +n12093600 +n12093885 +n12094244 +n12094401 +n12094612 +n12095020 +n12095281 +n12095412 +n12095543 +n12095647 +n12095934 +n12096089 +n12096395 +n12096563 +n12096674 +n12097396 +n12097556 +n12098403 +n12098524 +n12098827 +n12099342 +n12100187 +n12101870 +n12102133 +n12103680 +n12103894 +n12104104 +n12104238 +n12104501 +n12104734 +n12105125 +n12105353 +n12105828 +n12105981 +n12106134 +n12106323 +n12107002 +n12107191 +n12107710 +n12107970 +n12108432 +n12108613 +n12108871 +n12109365 +n12109827 +n12110085 +n12110236 +n12110352 +n12110475 +n12110778 +n12111238 +n12111627 +n12112008 +n12112337 +n12112609 +n12112918 +n12113195 +n12113323 +n12113657 +n12114010 +n12114590 +n12115180 +n12116058 +n12116429 +n12116734 +n12117017 +n12117235 +n12117326 +n12117695 +n12117912 +n12118414 +n12118661 +n12119099 +n12119238 +n12119390 +n12119539 +n12119717 +n12120347 +n12120578 +n12121033 +n12121187 +n12121610 +n12122442 +n12122725 +n12122918 +n12123648 +n12123741 +n12124172 +n12124627 +n12124818 +n12125001 +n12125183 +n12125584 +n12126084 +n12126360 +n12126736 +n12127460 +n12127575 +n12127768 +n12128071 +n12128306 +n12128490 +n12129134 +n12129738 +n12129986 +n12130549 +n12131405 +n12131550 +n12132092 +n12132956 +n12133151 +n12133462 +n12133682 +n12134025 +n12134486 +n12134695 +n12134836 +n12135049 +n12135576 +n12135729 +n12135898 +n12136392 +n12136581 +n12136720 +n12137120 +n12137569 +n12137791 +n12137954 +n12138110 +n12138248 +n12138444 +n12138578 +n12139196 +n12139575 +n12139793 +n12139921 +n12140511 +n12140759 +n12140903 +n12141167 +n12141385 +n12141495 +n12142085 +n12142357 +n12142450 +n12143065 +n12143215 +n12143405 +n12143676 +n12144313 +n12144580 +n12144987 +n12145148 +n12145477 +n12146311 +n12146488 +n12146654 +n12147226 +n12147835 +n12148757 +n12150722 +n12150969 +n12151170 +n12151615 +n12152031 +n12152251 +n12152532 +n12152722 +n12153033 +n12153224 +n12153580 +n12153741 +n12153914 +n12154114 +n12154773 +n12155009 +n12155583 +n12155773 +n12156679 +n12156819 +n12157056 +n12157179 +n12157769 +n12158031 +n12158443 +n12158798 +n12159055 +n12159388 +n12159555 +n12159804 +n12159942 +n12160125 +n12160303 +n12160490 +n12160857 +n12161056 +n12161285 +n12161577 +n12161744 +n12161969 +n12162181 +n12162425 +n12162758 +n12163035 +n12163279 +n12164363 +n12164656 +n12164881 +n12165170 +n12165384 +n12165758 +n12166128 +n12166424 +n12166793 +n12166929 +n12167075 +n12167436 +n12167602 +n12168565 +n12169099 +n12170585 +n12171098 +n12171316 +n12171966 +n12172364 +n12172481 +n12172906 +n12173069 +n12173664 +n12173912 +n12174311 +n12174521 +n12174926 +n12175181 +n12175370 +n12175598 +n12176453 +n12176709 +n12176953 +n12177129 +n12177455 +n12178129 +n12178780 +n12178896 +n12179122 +n12179632 +n12180168 +n12180456 +n12180885 +n12181352 +n12181612 +n12182049 +n12182276 +n12183026 +n12183452 +n12183816 +n12184095 +n12184468 +n12184912 +n12185254 +n12185859 +n12186352 +n12186554 +n12186839 +n12187247 +n12187663 +n12187891 +n12188289 +n12188635 +n12189429 +n12189779 +n12189987 +n12190410 +n12190869 +n12191240 +n12192132 +n12192877 +n12193334 +n12193665 +n12194147 +n12194613 +n12195391 +n12195533 +n12195734 +n12196129 +n12196336 +n12196527 +n12196694 +n12196954 +n12197359 +n12197601 +n12198286 +n12198793 +n12199266 +n12199399 +n12199790 +n12199982 +n12200143 +n12200504 +n12200905 +n12201331 +n12201580 +n12201938 +n12202936 +n12203529 +n12203699 +n12203896 +n12204032 +n12204175 +n12204730 +n12205460 +n12205694 +n12214789 +n12215022 +n12215210 +n12215579 +n12215824 +n12216215 +n12216628 +n12216968 +n12217453 +n12217851 +n12218274 +n12218490 +n12218868 +n12219668 +n12220019 +n12220496 +n12220829 +n12221191 +n12221368 +n12221522 +n12221801 +n12222090 +n12222493 +n12222900 +n12223160 +n12223569 +n12223764 +n12224978 +n12225222 +n12225349 +n12225563 +n12226932 +n12227658 +n12227909 +n12228229 +n12228387 +n12228689 +n12228886 +n12229111 +n12229651 +n12229887 +n12230540 +n12230794 +n12231192 +n12231709 +n12232114 +n12232280 +n12232851 +n12233249 +n12234318 +n12234669 +n12235051 +n12235479 +n12236160 +n12236546 +n12236768 +n12236977 +n12237152 +n12237486 +n12237641 +n12237855 +n12238756 +n12238913 +n12239240 +n12239647 +n12239880 +n12240150 +n12240477 +n12240965 +n12241192 +n12241426 +n12241880 +n12242123 +n12242409 +n12242850 +n12243109 +n12243693 +n12244153 +n12244458 +n12244650 +n12244819 +n12245319 +n12245695 +n12245885 +n12246037 +n12246232 +n12246773 +n12246941 +n12247202 +n12247407 +n12247963 +n12248141 +n12248359 +n12248574 +n12248780 +n12248941 +n12249122 +n12249294 +n12249542 +n12251001 +n12251278 +n12251740 +n12252168 +n12252383 +n12252866 +n12253229 +n12253487 +n12253664 +n12253835 +n12254168 +n12255225 +n12256112 +n12256325 +n12256522 +n12256708 +n12256920 +n12257570 +n12257725 +n12258101 +n12258885 +n12259316 +n12260799 +n12261359 +n12261571 +n12261808 +n12262018 +n12262185 +n12262553 +n12263038 +n12263204 +n12263410 +n12263588 +n12263738 +n12263987 +n12264512 +n12264786 +n12265083 +n12265394 +n12265600 +n12266217 +n12266528 +n12266644 +n12266796 +n12266984 +n12267133 +n12267265 +n12267411 +n12267534 +n12267677 +n12267931 +n12268246 +n12269241 +n12269406 +n12269652 +n12270027 +n12270278 +n12270460 +n12270741 +n12270946 +n12271187 +n12271451 +n12271643 +n12271933 +n12272239 +n12272432 +n12272735 +n12272883 +n12273114 +n12273344 +n12273515 +n12273768 +n12273939 +n12274151 +n12274358 +n12274630 +n12274863 +n12275131 +n12275317 +n12275489 +n12275675 +n12275888 +n12276110 +n12276314 +n12276477 +n12276628 +n12276872 +n12277150 +n12277334 +n12277578 +n12277800 +n12278107 +n12278371 +n12278650 +n12278865 +n12279060 +n12279293 +n12279458 +n12279772 +n12280060 +n12280364 +n12281241 +n12281788 +n12281974 +n12282235 +n12282527 +n12282737 +n12282933 +n12283147 +n12283395 +n12283542 +n12283790 +n12284262 +n12284821 +n12285049 +n12285195 +n12285369 +n12285512 +n12285705 +n12285900 +n12286068 +n12286197 +n12286826 +n12286988 +n12287195 +n12287642 +n12287836 +n12288005 +n12288823 +n12289310 +n12289433 +n12289585 +n12290748 +n12290975 +n12291143 +n12291459 +n12291671 +n12291959 +n12292463 +n12292877 +n12293723 +n12294124 +n12294331 +n12294542 +n12294723 +n12294871 +n12295033 +n12295237 +n12295429 +n12295796 +n12296045 +n12296432 +n12296735 +n12296929 +n12297110 +n12297280 +n12297507 +n12297846 +n12298165 +n12299640 +n12300840 +n12301180 +n12301445 +n12301613 +n12301766 +n12302071 +n12302248 +n12302565 +n12303083 +n12303462 +n12304115 +n12304286 +n12304420 +n12304703 +n12304899 +n12305089 +n12305293 +n12305475 +n12305654 +n12305819 +n12305986 +n12306089 +n12306270 +n12306717 +n12306938 +n12307076 +n12307240 +n12307756 +n12308112 +n12308447 +n12308907 +n12309277 +n12309630 +n12310021 +n12310349 +n12310638 +n12311045 +n12311224 +n12311413 +n12311579 +n12312110 +n12312728 +n12315060 +n12315245 +n12315598 +n12315999 +n12316444 +n12316572 +n12317296 +n12318378 +n12318782 +n12318965 +n12319204 +n12319414 +n12320010 +n12320414 +n12320627 +n12320806 +n12321077 +n12321395 +n12321669 +n12321873 +n12322099 +n12322501 +n12322699 +n12323665 +n12324056 +n12324222 +n12324388 +n12324558 +n12324906 +n12325234 +n12325787 +n12327022 +n12327528 +n12327846 +n12328398 +n12328567 +n12328801 +n12329260 +n12329473 +n12330239 +n12330469 +n12330587 +n12330891 +n12331066 +n12331263 +n12331655 +n12331788 +n12332030 +n12332218 +n12332555 +n12333053 +n12333530 +n12333771 +n12333961 +n12334153 +n12334293 +n12334891 +n12335483 +n12335664 +n12335800 +n12335937 +n12336092 +n12336224 +n12336333 +n12336586 +n12336727 +n12336973 +n12337131 +n12337246 +n12337391 +n12337617 +n12337800 +n12337922 +n12338034 +n12338146 +n12338258 +n12338454 +n12338655 +n12338796 +n12338979 +n12339526 +n12339831 +n12340383 +n12340581 +n12340755 +n12341542 +n12341931 +n12342299 +n12342498 +n12342852 +n12343480 +n12343753 +n12344283 +n12344483 +n12344700 +n12344837 +n12345280 +n12345899 +n12346578 +n12346813 +n12346986 +n12347158 +n12349315 +n12349711 +n12350032 +n12350758 +n12351091 +n12351790 +n12352287 +n12352639 +n12352844 +n12352990 +n12353203 +n12353431 +n12353754 +n12355760 +n12356023 +n12356395 +n12356960 +n12357485 +n12357968 +n12358293 +n12360108 +n12360534 +n12360684 +n12360817 +n12360958 +n12361135 +n12361560 +n12361754 +n12361946 +n12362274 +n12362514 +n12362668 +n12363301 +n12363768 +n12364604 +n12364940 +n12365158 +n12365285 +n12365462 +n12365900 +n12366053 +n12366186 +n12366313 +n12366675 +n12366870 +n12367611 +n12368028 +n12368257 +n12368451 +n12369066 +n12369309 +n12369476 +n12369665 +n12369845 +n12370174 +n12370549 +n12371202 +n12371439 +n12371704 +n12372233 +n12373100 +n12373739 +n12374418 +n12374705 +n12374862 +n12375769 +n12377198 +n12377494 +n12378249 +n12378753 +n12378963 +n12379531 +n12380761 +n12381511 +n12382233 +n12382875 +n12383737 +n12383894 +n12384037 +n12384227 +n12384375 +n12384569 +n12384680 +n12384839 +n12385429 +n12385566 +n12385830 +n12386945 +n12387103 +n12387633 +n12387839 +n12388143 +n12388293 +n12388858 +n12388989 +n12389130 +n12389501 +n12389727 +n12389932 +n12390099 +n12390314 +n12392070 +n12392549 +n12392765 +n12393269 +n12394118 +n12394328 +n12394638 +n12395068 +n12395289 +n12395463 +n12395906 +n12396091 +n12396924 +n12397431 +n12399132 +n12399384 +n12399534 +n12399656 +n12399899 +n12400489 +n12400720 +n12400924 +n12401335 +n12401684 +n12401893 +n12402051 +n12402348 +n12402596 +n12402840 +n12403075 +n12403276 +n12403513 +n12403994 +n12404729 +n12405714 +n12406304 +n12406488 +n12406715 +n12406902 +n12407079 +n12407222 +n12407396 +n12407545 +n12407715 +n12407890 +n12408077 +n12408280 +n12408466 +n12408717 +n12408873 +n12409231 +n12409470 +n12409651 +n12409840 +n12411461 +n12412355 +n12412606 +n12412987 +n12413165 +n12413301 +n12413419 +n12413642 +n12413880 +n12414035 +n12414159 +n12414329 +n12414449 +n12414818 +n12414932 +n12415595 +n12416073 +n12416423 +n12416703 +n12417836 +n12418221 +n12418507 +n12419037 +n12419878 +n12420124 +n12420535 +n12420722 +n12421137 +n12421467 +n12421683 +n12421917 +n12422129 +n12422559 +n12425281 +n12426623 +n12426749 +n12427184 +n12427391 +n12427566 +n12427757 +n12427946 +n12428076 +n12428242 +n12428412 +n12428747 +n12429352 +n12430198 +n12430471 +n12430675 +n12431434 +n12432069 +n12432356 +n12432574 +n12432707 +n12433081 +n12433178 +n12433769 +n12433952 +n12434106 +n12434483 +n12434634 +n12434775 +n12434985 +n12435152 +n12435486 +n12435649 +n12435777 +n12435965 +n12436090 +n12436907 +n12437513 +n12437769 +n12437930 +n12439154 +n12439830 +n12441183 +n12441390 +n12441552 +n12441958 +n12442548 +n12443323 +n12443736 +n12444095 +n12444898 +n12446200 +n12446519 +n12446737 +n12446908 +n12447121 +n12447346 +n12447581 +n12447891 +n12448136 +n12448361 +n12448700 +n12449296 +n12449526 +n12449784 +n12449934 +n12450344 +n12450607 +n12450840 +n12451070 +n12451240 +n12451399 +n12451566 +n12451915 +n12452256 +n12452480 +n12452673 +n12452836 +n12453018 +n12453186 +n12453714 +n12453857 +n12454159 +n12454436 +n12454556 +n12454705 +n12454793 +n12454949 +n12455950 +n12457091 +n12458550 +n12458713 +n12458874 +n12459629 +n12460146 +n12460697 +n12460957 +n12461109 +n12461466 +n12461673 +n12462032 +n12462221 +n12462582 +n12462805 +n12463134 +n12463743 +n12463975 +n12464128 +n12464476 +n12464649 +n12465557 +n12466727 +n12467018 +n12467197 +n12467433 +n12467592 +n12468545 +n12468719 +n12469517 +n12470092 +n12470512 +n12470907 +n12472024 +n12473608 +n12473840 +n12474167 +n12474418 +n12475035 +n12475242 +n12475774 +n12476510 +n12477163 +n12477401 +n12477583 +n12477747 +n12477983 +n12478768 +n12479537 +n12480456 +n12480895 +n12481150 +n12481289 +n12481458 +n12482437 +n12482668 +n12482893 +n12483282 +n12483427 +n12483625 +n12483841 +n12484244 +n12484784 +n12485653 +n12485981 +n12486574 +n12487058 +n12488454 +n12488709 +n12489046 +n12489676 +n12489815 +n12490490 +n12491017 +n12491435 +n12491826 +n12492106 +n12492460 +n12492682 +n12492900 +n12493208 +n12493426 +n12493868 +n12494794 +n12495146 +n12495670 +n12495895 +n12496427 +n12496949 +n12497669 +n12498055 +n12498457 +n12499163 +n12499757 +n12499979 +n12500309 +n12500518 +n12500751 +n12501202 +n12504570 +n12504783 +n12505253 +n12506181 +n12506341 +n12506991 +n12507379 +n12507823 +n12508309 +n12508618 +n12508762 +n12509109 +n12509476 +n12509665 +n12509821 +n12509993 +n12510343 +n12510774 +n12511488 +n12511856 +n12512095 +n12512294 +n12512674 +n12513172 +n12513613 +n12513933 +n12514138 +n12514592 +n12514992 +n12515393 +n12515711 +n12515925 +n12516165 +n12516584 +n12516828 +n12517077 +n12517445 +n12517642 +n12518013 +n12518481 +n12519089 +n12519563 +n12520406 +n12521186 +n12521394 +n12522188 +n12522678 +n12522894 +n12523141 +n12523475 +n12523850 +n12524188 +n12525168 +n12525513 +n12525753 +n12526178 +n12526516 +n12526754 +n12527081 +n12527738 +n12528109 +n12528382 +n12528549 +n12528768 +n12528974 +n12529220 +n12529500 +n12529905 +n12530629 +n12530818 +n12531328 +n12531727 +n12532564 +n12532886 +n12533190 +n12533437 +n12534208 +n12534625 +n12534862 +n12536291 +n12537253 +n12537569 +n12538209 +n12539074 +n12539306 +n12539832 +n12540250 +n12540647 +n12540966 +n12541157 +n12541403 +n12542043 +n12542240 +n12543186 +n12543455 +n12543639 +n12543826 +n12544240 +n12544539 +n12545232 +n12545635 +n12545865 +n12546183 +n12546420 +n12546617 +n12546962 +n12547215 +n12547503 +n12548280 +n12548564 +n12548804 +n12549005 +n12549192 +n12549420 +n12549799 +n12550210 +n12550408 +n12551173 +n12551457 +n12552309 +n12552893 +n12553742 +n12554029 +n12554526 +n12554729 +n12554911 +n12555255 +n12555859 +n12556656 +n12557064 +n12557438 +n12557556 +n12557681 +n12558230 +n12558425 +n12558680 +n12559044 +n12559518 +n12560282 +n12560621 +n12560775 +n12561169 +n12561309 +n12561594 +n12562141 +n12562577 +n12562785 +n12563045 +n12563702 +n12564083 +n12564613 +n12565102 +n12565912 +n12566331 +n12566954 +n12567950 +n12568186 +n12568649 +n12569037 +n12569616 +n12569851 +n12570394 +n12570703 +n12570972 +n12571781 +n12572546 +n12572759 +n12572858 +n12573256 +n12573474 +n12573647 +n12573911 +n12574320 +n12574470 +n12574866 +n12575322 +n12575812 +n12576323 +n12576451 +n12576695 +n12577362 +n12577895 +n12578255 +n12578626 +n12578916 +n12579038 +n12579404 +n12579822 +n12580012 +n12580654 +n12580786 +n12580896 +n12581110 +n12582231 +n12582665 +n12582846 +n12583126 +n12583401 +n12583681 +n12583855 +n12584191 +n12584365 +n12584715 +n12585137 +n12585373 +n12585629 +n12586298 +n12586499 +n12586725 +n12586989 +n12587132 +n12587487 +n12587803 +n12588320 +n12588780 +n12589142 +n12589458 +n12589687 +n12589841 +n12590232 +n12590499 +n12590600 +n12590715 +n12591017 +n12591351 +n12591702 +n12592058 +n12592544 +n12592839 +n12593122 +n12593341 +n12593994 +n12594324 +n12594989 +n12595699 +n12595964 +n12596148 +n12596345 +n12596709 +n12596849 +n12597134 +n12597466 +n12597798 +n12598027 +n12599185 +n12599435 +n12599661 +n12599874 +n12600095 +n12600267 +n12601494 +n12601805 +n12602262 +n12602434 +n12602612 +n12602980 +n12603273 +n12603449 +n12603672 +n12604228 +n12604460 +n12604639 +n12604845 +n12605683 +n12606438 +n12606545 +n12607456 +n12609379 +n12610328 +n12610740 +n12611640 +n12612170 +n12612811 +n12613706 +n12614096 +n12614477 +n12614625 +n12615232 +n12615710 +n12616248 +n12616630 +n12616996 +n12617559 +n12618146 +n12618727 +n12620196 +n12620546 +n12620969 +n12621410 +n12621619 +n12621945 +n12622297 +n12622875 +n12623077 +n12623211 +n12623818 +n12624381 +n12624568 +n12625003 +n12625383 +n12625670 +n12625823 +n12626674 +n12626878 +n12627119 +n12627347 +n12627526 +n12628356 +n12628705 +n12628986 +n12629305 +n12629666 +n12630763 +n12630999 +n12631331 +n12631637 +n12631932 +n12632335 +n12632733 +n12633061 +n12633638 +n12633994 +n12634211 +n12634429 +n12634734 +n12634986 +n12635151 +n12635359 +n12635532 +n12635744 +n12635955 +n12636224 +n12636885 +n12637123 +n12637485 +n12638218 +n12638556 +n12638753 +n12638964 +n12639168 +n12639376 +n12639584 +n12639736 +n12639910 +n12640081 +n12640284 +n12640435 +n12640607 +n12640839 +n12641007 +n12641180 +n12641413 +n12641931 +n12642090 +n12642200 +n12642435 +n12642600 +n12642964 +n12643113 +n12643313 +n12643473 +n12643688 +n12643877 +n12644283 +n12644902 +n12645174 +n12645530 +n12646072 +n12646197 +n12646397 +n12646605 +n12646740 +n12646950 +n12647231 +n12647376 +n12647560 +n12647787 +n12647893 +n12648045 +n12648196 +n12648424 +n12648693 +n12648888 +n12649065 +n12649317 +n12649539 +n12649866 +n12650038 +n12650229 +n12650379 +n12650556 +n12650805 +n12650915 +n12651229 +n12651611 +n12651821 +n12653218 +n12653436 +n12653633 +n12654227 +n12654857 +n12655062 +n12655245 +n12655351 +n12655498 +n12655605 +n12655726 +n12655869 +n12656369 +n12656528 +n12656685 +n12656909 +n12657082 +n12657755 +n12658118 +n12658308 +n12658481 +n12658603 +n12658715 +n12658846 +n12659064 +n12659356 +n12659539 +n12660601 +n12661045 +n12661227 +n12661538 +n12662074 +n12662379 +n12662772 +n12663023 +n12663254 +n12663359 +n12663804 +n12664005 +n12664187 +n12664469 +n12664710 +n12665048 +n12665271 +n12665659 +n12665857 +n12666050 +n12666159 +n12666369 +n12666965 +n12667406 +n12667582 +n12667964 +n12668131 +n12669803 +n12670334 +n12670758 +n12670962 +n12671651 +n12672289 +n12673588 +n12674120 +n12674685 +n12674895 +n12675299 +n12675515 +n12675876 +n12676134 +n12676370 +n12676534 +n12676703 +n12677120 +n12677331 +n12677612 +n12677841 +n12678794 +n12679023 +n12679432 +n12679593 +n12679876 +n12680402 +n12680652 +n12680864 +n12681376 +n12681579 +n12681893 +n12682411 +n12682668 +n12682882 +n12683096 +n12683407 +n12683571 +n12683791 +n12684379 +n12685431 +n12685831 +n12686077 +n12686274 +n12686496 +n12686676 +n12686877 +n12687044 +n12687462 +n12687698 +n12687957 +n12688187 +n12688372 +n12688716 +n12689305 +n12690653 +n12691428 +n12691661 +n12692024 +n12692160 +n12692521 +n12692714 +n12693244 +n12693352 +n12693865 +n12694486 +n12695144 +n12695975 +n12696492 +n12696830 +n12697152 +n12697514 +n12698027 +n12698435 +n12698598 +n12698774 +n12699031 +n12699301 +n12699922 +n12700088 +n12700357 +n12702124 +n12703190 +n12703383 +n12703557 +n12703716 +n12703856 +n12704041 +n12704343 +n12704513 +n12705013 +n12705220 +n12705458 +n12705698 +n12705978 +n12706410 +n12707199 +n12707781 +n12708293 +n12708654 +n12708941 +n12709103 +n12709349 +n12709688 +n12709901 +n12710295 +n12710415 +n12710577 +n12710693 +n12710917 +n12711182 +n12711398 +n12711596 +n12711817 +n12711984 +n12712320 +n12712626 +n12713063 +n12713358 +n12713521 +n12713866 +n12714254 +n12714755 +n12714949 +n12715195 +n12715914 +n12716400 +n12716594 +n12717072 +n12717224 +n12717644 +n12718074 +n12718483 +n12718995 +n12719684 +n12719944 +n12720200 +n12720354 +n12721122 +n12721477 +n12722071 +n12723062 +n12723610 +n12724942 +n12725521 +n12725738 +n12725940 +n12726159 +n12726357 +n12726528 +n12726670 +n12726902 +n12727101 +n12727301 +n12727518 +n12727729 +n12727960 +n12728164 +n12728322 +n12728508 +n12728656 +n12728864 +n12729023 +n12729164 +n12729315 +n12729521 +n12729729 +n12729950 +n12730143 +n12730370 +n12730544 +n12730776 +n12731029 +n12731401 +n12731835 +n12732009 +n12732252 +n12732491 +n12732605 +n12732756 +n12732966 +n12733218 +n12733428 +n12733647 +n12733870 +n12734070 +n12734215 +n12735160 +n12736603 +n12736999 +n12737383 +n12737898 +n12738259 +n12739332 +n12739966 +n12740967 +n12741222 +n12741586 +n12741792 +n12742290 +n12742741 +n12742878 +n12743009 +n12743352 +n12743823 +n12743976 +n12744142 +n12744387 +n12744850 +n12745386 +n12745564 +n12746884 +n12747120 +n12748248 +n12749049 +n12749456 +n12749679 +n12749852 +n12750076 +n12750767 +n12751172 +n12751675 +n12752205 +n12753007 +n12753245 +n12753573 +n12753762 +n12754003 +n12754174 +n12754311 +n12754468 +n12754648 +n12754781 +n12754981 +n12755225 +n12755387 +n12755559 +n12755727 +n12755876 +n12756457 +n12757115 +n12757303 +n12757458 +n12757668 +n12757816 +n12757930 +n12758014 +n12758099 +n12758176 +n12758250 +n12758325 +n12758399 +n12758471 +n12758555 +n12759273 +n12759668 +n12760539 +n12760875 +n12761284 +n12761702 +n12761905 +n12762049 +n12762405 +n12762896 +n12763529 +n12764008 +n12764202 +n12764507 +n12764978 +n12765115 +n12765402 +n12765846 +n12766043 +n12766595 +n12766869 +n12767208 +n12767423 +n12767648 +n12768369 +n12768682 +n12768809 +n12768933 +n12769065 +n12769219 +n12769318 +n12770529 +n12770892 +n12771085 +n12771192 +n12771390 +n12771597 +n12771890 +n12772753 +n12772908 +n12773142 +n12773651 +n12773917 +n12774299 +n12774641 +n12775070 +n12775393 +n12775717 +n12775919 +n12776558 +n12776774 +n12777436 +n12777680 +n12777778 +n12777892 +n12778398 +n12778605 +n12779603 +n12779851 +n12780325 +n12780563 +n12781940 +n12782530 +n12782915 +n12783316 +n12783730 +n12784371 +n12784889 +n12785724 +n12785889 +n12786273 +n12786464 +n12786836 +n12787364 +n12788854 +n12789054 +n12789554 +n12789977 +n12790430 +n12791064 +n12791329 +n12793015 +n12793284 +n12793494 +n12793695 +n12793886 +n12794135 +n12794367 +n12794568 +n12794985 +n12795209 +n12795352 +n12795555 +n12796022 +n12796385 +n12796849 +n12797368 +n12797860 +n12798284 +n12798910 +n12799269 +n12799776 +n12800049 +n12800586 +n12801072 +n12801520 +n12801781 +n12801966 +n12803226 +n12803754 +n12803958 +n12804352 +n12805146 +n12805561 +n12805762 +n12806015 +n12806732 +n12807251 +n12807409 +n12807624 +n12807773 +n12808007 +n12809868 +n12810007 +n12810151 +n12810595 +n12811027 +n12811713 +n12812235 +n12812478 +n12812801 +n12813189 +n12814643 +n12814857 +n12814960 +n12815198 +n12815668 +n12815838 +n12816508 +n12816942 +n12817464 +n12817694 +n12817855 +n12818004 +n12818346 +n12818601 +n12818966 +n12819141 +n12819354 +n12819728 +n12820113 +n12820669 +n12820853 +n12821505 +n12821895 +n12822115 +n12822466 +n12822769 +n12822955 +n12823717 +n12823859 +n12824053 +n12824289 +n12824735 +n12825497 +n12826143 +n12827270 +n12827537 +n12827907 +n12828220 +n12828379 +n12828520 +n12828791 +n12828977 +n12829582 +n12829975 +n12830222 +n12830568 +n12831141 +n12831535 +n12831932 +n12832315 +n12832538 +n12832822 +n12833149 +n12833985 +n12834190 +n12834798 +n12834938 +n12835331 +n12835766 +n12836212 +n12836337 +n12836508 +n12836862 +n12837052 +n12837259 +n12837466 +n12837803 +n12839574 +n12839979 +n12840168 +n12840362 +n12840502 +n12840749 +n12841007 +n12841193 +n12841354 +n12842302 +n12842519 +n12842642 +n12842887 +n12843144 +n12843316 +n12843557 +n12843970 +n12844409 +n12844939 +n12845187 +n12845413 +n12845908 +n12846335 +n12846690 +n12847008 +n12847374 +n12847927 +n12848499 +n12849061 +n12849279 +n12849416 +n12849952 +n12850168 +n12850336 +n12850906 +n12851094 +n12851469 +n12851860 +n12852234 +n12852428 +n12852570 +n12853080 +n12853287 +n12853482 +n12854048 +n12854193 +n12854600 +n12855365 +n12855494 +n12855710 +n12855886 +n12856091 +n12856287 +n12856479 +n12856680 +n12857204 +n12857779 +n12858150 +n12858397 +n12858618 +n12858871 +n12858987 +n12859153 +n12859272 +n12859679 +n12859986 +n12860365 +n12860978 +n12861345 +n12861541 +n12861892 +n12862512 +n12862828 +n12863234 +n12863624 +n12864160 +n12865037 +n12865562 +n12865708 +n12865824 +n12866002 +n12866162 +n12866333 +n12866459 +n12866635 +n12866968 +n12867184 +n12867449 +n12867826 +n12868019 +n12868880 +n12869061 +n12869478 +n12869668 +n12870048 +n12870225 +n12870535 +n12870682 +n12870891 +n12871272 +n12871696 +n12871859 +n12872458 +n12872914 +n12873341 +n12873984 +n12875269 +n12875697 +n12875861 +n12876899 +n12877244 +n12877493 +n12877637 +n12877838 +n12878169 +n12878325 +n12878784 +n12879068 +n12879527 +n12879963 +n12880244 +n12880462 +n12880638 +n12880799 +n12881105 +n12881913 +n12882158 +n12882779 +n12882945 +n12883265 +n12883628 +n12884100 +n12884260 +n12885045 +n12885265 +n12885510 +n12885754 +n12886185 +n12886402 +n12886600 +n12886831 +n12887293 +n12887532 +n12887713 +n12888016 +n12888234 +n12888457 +n12889219 +n12889412 +n12889579 +n12889713 +n12890265 +n12890490 +n12890685 +n12890928 +n12891093 +n12891305 +n12891469 +n12891643 +n12891824 +n12892013 +n12893463 +n12893993 +n12895298 +n12895811 +n12896615 +n12897118 +n12897788 +n12897999 +n12898342 +n12898774 +n12899166 +n12899537 +n12899752 +n12899971 +n12900783 +n12901724 +n12902466 +n12902662 +n12903014 +n12903367 +n12903503 +n12903964 +n12904314 +n12904562 +n12904938 +n12905135 +n12905412 +n12906214 +n12906498 +n12906771 +n12907057 +n12907671 +n12907857 +n12908093 +n12908645 +n12908854 +n12909421 +n12909614 +n12909759 +n12909917 +n12911079 +n12911264 +n12911440 +n12911673 +n12911914 +n12912274 +n12912670 +n12912801 +n12913144 +n12913524 +n12913791 +n12914923 +n12915140 +n12915568 +n12915811 +n12916179 +n12916511 +n12917901 +n12918609 +n12918810 +n12918991 +n12919195 +n12919403 +n12919646 +n12919847 +n12920043 +n12920204 +n12920521 +n12920719 +n12920955 +n12921315 +n12921499 +n12921660 +n12921868 +n12922119 +n12922458 +n12922763 +n12923108 +n12923257 +n12924623 +n12925179 +n12925583 +n12926039 +n12926480 +n12926689 +n12927013 +n12927194 +n12927494 +n12927758 +n12928071 +n12928307 +n12928491 +n12928819 +n12929403 +n12929600 +n12930778 +n12930951 +n12931231 +n12931542 +n12931906 +n12932173 +n12932365 +n12932706 +n12932966 +n12933274 +n12934036 +n12934174 +n12934479 +n12934685 +n12934985 +n12935166 +n12935609 +n12936155 +n12936826 +n12937130 +n12938081 +n12938193 +n12938445 +n12938667 +n12939104 +n12939282 +n12939479 +n12939874 +n12940226 +n12940609 +n12941220 +n12941536 +n12941717 +n12942025 +n12942395 +n12942572 +n12942729 +n12943049 +n12943443 +n12943912 +n12944095 +n12945177 +n12945366 +n12945549 +n12946849 +n12947313 +n12947544 +n12947756 +n12947895 +n12948053 +n12948251 +n12948495 +n12949160 +n12949361 +n12950126 +n12950314 +n12950796 +n12951146 +n12951835 +n12952165 +n12952469 +n12952590 +n12952717 +n12953206 +n12953484 +n12953712 +n12954353 +n12954799 +n12955414 +n12955840 +n12956170 +n12956367 +n12956588 +n12956922 +n12957608 +n12957803 +n12957924 +n12958261 +n12958615 +n12959074 +n12959538 +n12960378 +n12960552 +n12960863 +n12961242 +n12961393 +n12961536 +n12961879 +n12963628 +n12964920 +n12965626 +n12965951 +n12966804 +n12966945 +n12968136 +n12968309 +n12969131 +n12969425 +n12969670 +n12969927 +n12970193 +n12970293 +n12970733 +n12971400 +n12971804 +n12972136 +n12973443 +n12973791 +n12973937 +n12974987 +n12975804 +n12976198 +n12976554 +n12978076 +n12979316 +n12979829 +n12980080 +n12980840 +n12981086 +n12981301 +n12981443 +n12981954 +n12982468 +n12982590 +n12982915 +n12983048 +n12983654 +n12983873 +n12983961 +n12984267 +n12984489 +n12984595 +n12985420 +n12985773 +n12985857 +n12986227 +n12987056 +n12987423 +n12987535 +n12988158 +n12988341 +n12988572 +n12989007 +n12989938 +n12990597 +n12991184 +n12991837 +n12992177 +n12992868 +n12994892 +n12995601 +n12997654 +n12997919 +n12998815 +n13000891 +n13001041 +n13001206 +n13001366 +n13001529 +n13001930 +n13002209 +n13002750 +n13002925 +n13003061 +n13003254 +n13003522 +n13003712 +n13004423 +n13004640 +n13004826 +n13004992 +n13005329 +n13005984 +n13006171 +n13006631 +n13006894 +n13007034 +n13007417 +n13007629 +n13008157 +n13008315 +n13008485 +n13008689 +n13008839 +n13009085 +n13009244 +n13009429 +n13009656 +n13010694 +n13010951 +n13011221 +n13011595 +n13012253 +n13012469 +n13012973 +n13013534 +n13013764 +n13013965 +n13014097 +n13014265 +n13014409 +n13014581 +n13014741 +n13014879 +n13015509 +n13015688 +n13016076 +n13016289 +n13017102 +n13017240 +n13017439 +n13017610 +n13017789 +n13017979 +n13018088 +n13018232 +n13018407 +n13018906 +n13019496 +n13019643 +n13019835 +n13020191 +n13020481 +n13020964 +n13021166 +n13021332 +n13021543 +n13021689 +n13021867 +n13022210 +n13022709 +n13022903 +n13023134 +n13024012 +n13024500 +n13024653 +n13025647 +n13025854 +n13026015 +n13027557 +n13027879 +n13028611 +n13028937 +n13029122 +n13029326 +n13029610 +n13029760 +n13030337 +n13030616 +n13030852 +n13031193 +n13031323 +n13031474 +n13032115 +n13032381 +n13032618 +n13032923 +n13033134 +n13033396 +n13033577 +n13033879 +n13034062 +n13034555 +n13034788 +n13035241 +n13035389 +n13035707 +n13035925 +n13036116 +n13036312 +n13036804 +n13037406 +n13037585 +n13037805 +n13038068 +n13038376 +n13038577 +n13038744 +n13039349 +n13040303 +n13040629 +n13040796 +n13041312 +n13041943 +n13042134 +n13042316 +n13042982 +n13043926 +n13044375 +n13044778 +n13045210 +n13045594 +n13045975 +n13046130 +n13046669 +n13047862 +n13048447 +n13049953 +n13050397 +n13050705 +n13050940 +n13051346 +n13052014 +n13052248 +n13052670 +n13052931 +n13053608 +n13054073 +n13054560 +n13055423 +n13055577 +n13055792 +n13055949 +n13056135 +n13056349 +n13056607 +n13056799 +n13057054 +n13057242 +n13057422 +n13057639 +n13058037 +n13058272 +n13058608 +n13059298 +n13059657 +n13060017 +n13060190 +n13061172 +n13061348 +n13061471 +n13061704 +n13062421 +n13063269 +n13063514 +n13064111 +n13064457 +n13065089 +n13065514 +n13066129 +n13066448 +n13066979 +n13067191 +n13067330 +n13067532 +n13067672 +n13068255 +n13068434 +n13068735 +n13068917 +n13069224 +n13069773 +n13070308 +n13070875 +n13071371 +n13071553 +n13071815 +n13072031 +n13072209 +n13072350 +n13072528 +n13072706 +n13072863 +n13073055 +n13073703 +n13074619 +n13074814 +n13075020 +n13075272 +n13075441 +n13075684 +n13075847 +n13076041 +n13076405 +n13076643 +n13076831 +n13077033 +n13077295 +n13078021 +n13079073 +n13079419 +n13079567 +n13080306 +n13080866 +n13081229 +n13081999 +n13082568 +n13083023 +n13083461 +n13084184 +n13084834 +n13085113 +n13085747 +n13090018 +n13090871 +n13091620 +n13091774 +n13091982 +n13092078 +n13092240 +n13092385 +n13092987 +n13093275 +n13093629 +n13094145 +n13094273 +n13095013 +n13096779 +n13098515 +n13098962 +n13099833 +n13099999 +n13100156 +n13100677 +n13102648 +n13102775 +n13103023 +n13103660 +n13103750 +n13103877 +n13104059 +n13107694 +n13107807 +n13107891 +n13108131 +n13108323 +n13108481 +n13108545 +n13108662 +n13108841 +n13109733 +n13110915 +n13111174 +n13111340 +n13111504 +n13111881 +n13112035 +n13112201 +n13118330 +n13118707 +n13119870 +n13120211 +n13120958 +n13121104 +n13121349 +n13122364 +n13123309 +n13123431 +n13123841 +n13124358 +n13124654 +n13125117 +n13126050 +n13126856 +n13127001 +n13127303 +n13127666 +n13127843 +n13128278 +n13128582 +n13128976 +n13129078 +n13130014 +n13130161 +n13130726 +n13131028 +n13131618 +n13132034 +n13132156 +n13132338 +n13132486 +n13132656 +n13132756 +n13132940 +n13133140 +n13133233 +n13133316 +n13133613 +n13133932 +n13134302 +n13134531 +n13134844 +n13134947 +n13135692 +n13135832 +n13136316 +n13136556 +n13136781 +n13137010 +n13137225 +n13137409 +n13137672 +n13137951 +n13138155 +n13138308 +n13138658 +n13138842 +n13139055 +n13139321 +n13139482 +n13139647 +n13139837 +n13140049 +n13140367 +n13141141 +n13141415 +n13141564 +n13141797 +n13141972 +n13142182 +n13142504 +n13142907 +n13143285 +n13143758 +n13144084 +n13145040 +n13145250 +n13145444 +n13146403 +n13146583 +n13146928 +n13147153 +n13147270 +n13147386 +n13147532 +n13147689 +n13147918 +n13148208 +n13148384 +n13149296 +n13149970 +n13150378 +n13150592 +n13150894 +n13151082 +n13152339 +n13154388 +n13154494 +n13154841 +n13155095 +n13155305 +n13155611 +n13156986 +n13157137 +n13157346 +n13157481 +n13157684 +n13157971 +n13158167 +n13158512 +n13158605 +n13158714 +n13158815 +n13159357 +n13159691 +n13159890 +n13160116 +n13160254 +n13160365 +n13160604 +n13160831 +n13160938 +n13161151 +n13161254 +n13161904 +n13163553 +n13163649 +n13163991 +n13164501 +n13170840 +n13171210 +n13171797 +n13172923 +n13173132 +n13173259 +n13173488 +n13173697 +n13173882 +n13174354 +n13174670 +n13174823 +n13175682 +n13176363 +n13176714 +n13177048 +n13177529 +n13177768 +n13177884 +n13178284 +n13178707 +n13179056 +n13179804 +n13180534 +n13180875 +n13181055 +n13181244 +n13181406 +n13181811 +n13182164 +n13182338 +n13182799 +n13182937 +n13183056 +n13183489 +n13184394 +n13185269 +n13185658 +n13186388 +n13186546 +n13187367 +n13188096 +n13188268 +n13188462 +n13188767 +n13190060 +n13190747 +n13191148 +n13191620 +n13191884 +n13192625 +n13193143 +n13193269 +n13193466 +n13193642 +n13193856 +n13194036 +n13194212 +n13194572 +n13194758 +n13194918 +n13195341 +n13195761 +n13196003 +n13196234 +n13196369 +n13196738 +n13197274 +n13197507 +n13198054 +n13198482 +n13198914 +n13199717 +n13199970 +n13200193 +n13200542 +n13200651 +n13200986 +n13201423 +n13201566 +n13201969 +n13202125 +n13202355 +n13202602 +n13205058 +n13205249 +n13206178 +n13206817 +n13207094 +n13207335 +n13207572 +n13207736 +n13207923 +n13208302 +n13208705 +n13208965 +n13209129 +n13209270 +n13209460 +n13209808 +n13210350 +n13210597 +n13211020 +n13211790 +n13212025 +n13212175 +n13212379 +n13212559 +n13213066 +n13213397 +n13213577 +n13214217 +n13214340 +n13214485 +n13215258 +n13215586 +n13217005 +n13219422 +n13219833 +n13219976 +n13220122 +n13220355 +n13220525 +n13220663 +n13221529 +n13222877 +n13222985 +n13223090 +n13223588 +n13223710 +n13223843 +n13224673 +n13224922 +n13225244 +n13225365 +n13225617 +n13226320 +n13226871 +n13228017 +n13228536 +n13229543 +n13229951 +n13230190 +n13230662 +n13230843 +n13231078 +n13231678 +n13231919 +n13232106 +n13232363 +n13232779 +n13233727 +n13234114 +n13234519 +n13234678 +n13234857 +n13235011 +n13235159 +n13235319 +n13235503 +n13235766 +n13236100 +n13237188 +n13237508 +n13238375 +n13238654 +n13238988 +n13239177 +n13239736 +n13239921 +n13240362 +n13252672 +n13354021 +n13555775 +n13579829 +n13650447 +n13653902 +n13862407 +n13862552 +n13862780 +n13863020 +n13863186 +n13863473 +n13863771 +n13864035 +n13864153 +n13864965 +n13865298 +n13865483 +n13865904 +n13866144 +n13866626 +n13866827 +n13867005 +n13867492 +n13868248 +n13868371 +n13868515 +n13868944 +n13869045 +n13869547 +n13869788 +n13869896 +n13871717 +n13872592 +n13872822 +n13873361 +n13873502 +n13873917 +n13874073 +n13874558 +n13875392 +n13875571 +n13875884 +n13876561 +n13877547 +n13877667 +n13878306 +n13879049 +n13879320 +n13879816 +n13880199 +n13880415 +n13880551 +n13880704 +n13880994 +n13881512 +n13881644 +n13882201 +n13882276 +n13882487 +n13882563 +n13882639 +n13882713 +n13882961 +n13883603 +n13883763 +n13884261 +n13884384 +n13884930 +n13885011 +n13886260 +n13888491 +n13889066 +n13889331 +n13891547 +n13891937 +n13893786 +n13894154 +n13894434 +n13895262 +n13896100 +n13896217 +n13897198 +n13897528 +n13897996 +n13898207 +n13898315 +n13898645 +n13899735 +n13900287 +n13900422 +n13901211 +n13901321 +n13901423 +n13901490 +n13901858 +n13902048 +n13902336 +n13902793 +n13903079 +n13905121 +n13905275 +n13905792 +n13906484 +n13906669 +n13906767 +n13906936 +n13907272 +n13908201 +n13908580 +n13911045 +n13912260 +n13912540 +n13914141 +n13914265 +n13914608 +n13915023 +n13915113 +n13915209 +n13915305 +n13915999 +n13916363 +n13916721 +n13917690 +n13917785 +n13918274 +n13918387 +n13918717 +n13919547 +n13919919 +n13926786 +n14131950 +n14175579 +n14564779 +n14582716 +n14583400 +n14585392 +n14592309 +n14603798 +n14633206 +n14685296 +n14696793 +n14698884 +n14714645 +n14720833 +n14765422 +n14785065 +n14786943 +n14804958 +n14810561 +n14820180 +n14821852 +n14844693 +n14853210 +n14858292 +n14867545 +n14891255 +n14899328 +n14900184 +n14900342 +n14908027 +n14909584 +n14914945 +n14915184 +n14919819 +n14938389 +n14941787 +n14942411 +n14973585 +n14974264 +n14975598 +n14976759 +n14976871 +n14977188 +n14977504 +n14992287 +n14993378 +n15005577 +n15006012 +n15019030 +n15048888 +n15060326 +n15060688 +n15062057 +n15067877 +n15075141 +n15086247 +n15089258 +n15089472 +n15089645 +n15089803 +n15090065 +n15090238 +n15090742 +n15091129 +n15091304 +n15091473 +n15091669 +n15091846 +n15092059 +n15092227 +n15092409 +n15092650 +n15092751 +n15092942 +n15093049 +n15093137 +n15093298 +n15102359 +n15102455 +n15102894 diff --git a/testbed/huggingface__pytorch-image-models/results/imagenet_a_indices.txt b/testbed/huggingface__pytorch-image-models/results/imagenet_a_indices.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e373bc707cb853e1b41ed35c6b21d078553a206 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/imagenet_a_indices.txt @@ -0,0 +1,200 @@ +6 +11 +13 +15 +17 +22 +23 +27 +30 +37 +39 +42 +47 +50 +57 +70 +71 +76 +79 +89 +90 +94 +96 +97 +99 +105 +107 +108 +110 +113 +124 +125 +130 +132 +143 +144 +150 +151 +207 +234 +235 +254 +277 +283 +287 +291 +295 +298 +301 +306 +307 +308 +309 +310 +311 +313 +314 +315 +317 +319 +323 +324 +326 +327 +330 +334 +335 +336 +347 +361 +363 +372 +378 +386 +397 +400 +401 +402 +404 +407 +411 +416 +417 +420 +425 +428 +430 +437 +438 +445 +456 +457 +461 +462 +470 +472 +483 +486 +488 +492 +496 +514 +516 +528 +530 +539 +542 +543 +549 +552 +557 +561 +562 +569 +572 +573 +575 +579 +589 +606 +607 +609 +614 +626 +627 +640 +641 +642 +643 +658 +668 +677 +682 +684 +687 +701 +704 +719 +736 +746 +749 +752 +758 +763 +765 +768 +773 +774 +776 +779 +780 +786 +792 +797 +802 +803 +804 +813 +815 +820 +823 +831 +833 +835 +839 +845 +847 +850 +859 +862 +870 +879 +880 +888 +890 +897 +900 +907 +913 +924 +932 +933 +934 +937 +943 +945 +947 +951 +954 +956 +957 +959 +971 +972 +980 +981 +984 +986 +987 +988 diff --git a/testbed/huggingface__pytorch-image-models/results/imagenet_a_synsets.txt b/testbed/huggingface__pytorch-image-models/results/imagenet_a_synsets.txt new file mode 100644 index 0000000000000000000000000000000000000000..6eeaaf66f3f471b7f622a608607a0c4e8409df33 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/imagenet_a_synsets.txt @@ -0,0 +1,200 @@ +n01498041 +n01531178 +n01534433 +n01558993 +n01580077 +n01614925 +n01616318 +n01631663 +n01641577 +n01669191 +n01677366 +n01687978 +n01694178 +n01698640 +n01735189 +n01770081 +n01770393 +n01774750 +n01784675 +n01819313 +n01820546 +n01833805 +n01843383 +n01847000 +n01855672 +n01882714 +n01910747 +n01914609 +n01924916 +n01944390 +n01985128 +n01986214 +n02007558 +n02009912 +n02037110 +n02051845 +n02077923 +n02085620 +n02099601 +n02106550 +n02106662 +n02110958 +n02119022 +n02123394 +n02127052 +n02129165 +n02133161 +n02137549 +n02165456 +n02174001 +n02177972 +n02190166 +n02206856 +n02219486 +n02226429 +n02231487 +n02233338 +n02236044 +n02259212 +n02268443 +n02279972 +n02280649 +n02281787 +n02317335 +n02325366 +n02346627 +n02356798 +n02361337 +n02410509 +n02445715 +n02454379 +n02486410 +n02492035 +n02504458 +n02655020 +n02669723 +n02672831 +n02676566 +n02690373 +n02701002 +n02730930 +n02777292 +n02782093 +n02787622 +n02793495 +n02797295 +n02802426 +n02814860 +n02815834 +n02837789 +n02879718 +n02883205 +n02895154 +n02906734 +n02948072 +n02951358 +n02980441 +n02992211 +n02999410 +n03014705 +n03026506 +n03124043 +n03125729 +n03187595 +n03196217 +n03223299 +n03250847 +n03255030 +n03291819 +n03325584 +n03355925 +n03384352 +n03388043 +n03417042 +n03443371 +n03444034 +n03445924 +n03452741 +n03483316 +n03584829 +n03590841 +n03594945 +n03617480 +n03666591 +n03670208 +n03717622 +n03720891 +n03721384 +n03724870 +n03775071 +n03788195 +n03804744 +n03837869 +n03840681 +n03854065 +n03888257 +n03891332 +n03935335 +n03982430 +n04019541 +n04033901 +n04039381 +n04067472 +n04086273 +n04099969 +n04118538 +n04131690 +n04133789 +n04141076 +n04146614 +n04147183 +n04179913 +n04208210 +n04235860 +n04252077 +n04252225 +n04254120 +n04270147 +n04275548 +n04310018 +n04317175 +n04344873 +n04347754 +n04355338 +n04366367 +n04376876 +n04389033 +n04399382 +n04442312 +n04456115 +n04482393 +n04507155 +n04509417 +n04532670 +n04540053 +n04554684 +n04562935 +n04591713 +n04606251 +n07583066 +n07695742 +n07697313 +n07697537 +n07714990 +n07718472 +n07720875 +n07734744 +n07749582 +n07753592 +n07760859 +n07768694 +n07831146 +n09229709 +n09246464 +n09472597 +n09835506 +n11879895 +n12057211 +n12144580 +n12267677 diff --git a/testbed/huggingface__pytorch-image-models/results/imagenet_r_indices.txt b/testbed/huggingface__pytorch-image-models/results/imagenet_r_indices.txt new file mode 100644 index 0000000000000000000000000000000000000000..e4ff6ffbbbf9ad310932b3df08863feaecdadeba --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/imagenet_r_indices.txt @@ -0,0 +1,200 @@ +1 +2 +4 +6 +8 +9 +11 +13 +22 +23 +26 +29 +31 +39 +47 +63 +71 +76 +79 +84 +90 +94 +96 +97 +99 +100 +105 +107 +113 +122 +125 +130 +132 +144 +145 +147 +148 +150 +151 +155 +160 +161 +162 +163 +171 +172 +178 +187 +195 +199 +203 +207 +208 +219 +231 +232 +234 +235 +242 +245 +247 +250 +251 +254 +259 +260 +263 +265 +267 +269 +276 +277 +281 +288 +289 +291 +292 +293 +296 +299 +301 +308 +309 +310 +311 +314 +315 +319 +323 +327 +330 +334 +335 +337 +338 +340 +341 +344 +347 +353 +355 +361 +362 +365 +366 +367 +368 +372 +388 +390 +393 +397 +401 +407 +413 +414 +425 +428 +430 +435 +437 +441 +447 +448 +457 +462 +463 +469 +470 +471 +472 +476 +483 +487 +515 +546 +555 +558 +570 +579 +583 +587 +593 +594 +596 +609 +613 +617 +621 +629 +637 +657 +658 +701 +717 +724 +763 +768 +774 +776 +779 +780 +787 +805 +812 +815 +820 +824 +833 +847 +852 +866 +875 +883 +889 +895 +907 +928 +931 +932 +933 +934 +936 +937 +943 +945 +947 +948 +949 +951 +953 +954 +957 +963 +965 +967 +980 +981 +983 +988 diff --git a/testbed/huggingface__pytorch-image-models/results/imagenet_real_labels.json b/testbed/huggingface__pytorch-image-models/results/imagenet_real_labels.json new file mode 100644 index 0000000000000000000000000000000000000000..2818c1f2f6113756498b40de12de3fafd05b7c05 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/imagenet_real_labels.json @@ -0,0 +1 @@ +[[], [970, 795], [230, 231], [809], [516, 850], [57], [334], [700], [674], [332], [109], [286], [370], [757], [595], [147], [327, 108], [21, 22], [478], [517], [334], [], [948], [727], [23], [619, 526, 846], [270], [167], [64, 55], [858], [324], [573], [150], [981], [586], [887], [], [398], [], [74], [516], [756], [129], [198], [256], [725], [565], [162, 167], [717, 581], [390, 467], [92], [29], [844], [591], [358], [468], [], [994], [872], [588], [608, 474], [183], [107], [40, 46], [842], [390], [101], [887], [870], [903, 841], [], [149], [21], [476], [80], [424], [159], [275], [175], [461], [970], [160], [788], [58], [479, 817], [498], [374], [28], [487], [50], [270], [383], [366], [484, 724], [373], [705], [330], [142], [949], [348, 349], [473], [159], [872], [878], [201], [906], [70], [889, 486], [632], [608, 774, 630, 636], [122], [720], [227], [], [162], [959], [638], [], [655, 851, 598], [645], [718], [483], [852], [397], [312, 988, 311], [457, 834], [352], [82], [934], [283], [802], [742], [276], [234, 236], [751], [342], [526, 528, 784], [328], [], [251], [163], [328], [771], [726], [977], [], [265], [], [590], [977, 978], [681, 810, 620, 508], [637], [39], [115], [937], [274], [277], [763], [905, 789], [646], [], [894], [647], [504], [937], [687], [781], [666], [583], [158], [825], [212], [659], [257, 222], [436], [199], [140], [248], [339], [230], [361], [909, 910, 926], [935], [638, 639], [654, 785], [289], [867], [], [103], [584], [243], [703], [449, 975], [771], [118], [396], [934], [16], [548], [993], [704], [841, 457], [233], [401, 593, 819], [827], [376], [146], [606], [922], [431], [284], [889], [475], [977, 978], [475], [984], [16], [77], [610, 453], [254], [636], [662], [473], [207], [25], [427, 463], [215], [230, 173], [35], [741], [125], [518, 652, 663, 465], [289], [425], [973], [], [167], [121], [445], [702], [], [366], [678], [764], [125], [349], [13], [179], [522], [], [989], [], [647, 438], [660], [801, 836, 837, 983], [533], [487], [27], [644], [750, 721], [865, 850], [1], [176], [694], [488, 664, 695, 508], [798], [809], [652, 413], [], [], [821], [421], [361], [920], [761], [27], [464], [92], [182], [897], [612], [610, 918], [283], [881], [906], [728], [426], [554], [], [531], [869], [730], [0], [866], [738, 580], [547], [43], [64], [69], [176], [329], [544, 926], [288, 290], [991], [591], [346], [1], [607], [934], [784, 828], [572], [], [888], [654], [546, 402], [390], [702], [24], [102], [949, 953, 954, 923], [810, 508], [361], [280], [65], [777], [359], [234], [21], [7], [525], [737, 886, 760, 894], [938], [254], [616, 733], [707], [463], [60], [], [531, 487, 623, 893], [380], [982], [305], [355], [503], [], [495], [472], [293], [816], [195], [738, 905], [475], [481], [431], [260], [130], [627], [977, 978], [622], [696], [300], [37], [133], [637], [867], [465], [592], [741], [908, 404, 895], [91], [109], [426], [694], [546], [208], [488, 649], [786], [959], [], [834, 906], [879, 568], [649], [228], [621], [630, 703], [107], [818, 598], [420], [], [133], [185], [471], [230], [974], [74], [76], [852], [383], [267], [], [359], [484], [510], [33], [177], [935], [310], [987, 998], [270], [598], [199], [998], [836, 837, 608], [14], [97], [856], [398], [319], [549, 681, 620], [92], [765], [840, 728, 412], [769, 945], [160], [265, 266], [638, 639], [846], [722], [183], [674], [468], [], [748, 636], [867], [636], [], [912], [721], [16], [199], [170], [], [946], [350], [557], [361], [361], [594], [861], [208], [606], [734], [767], [746], [788], [346], [153], [739], [414], [915], [], [152], [943], [849], [], [100], [546], [657], [764], [141], [39], [993], [758], [190], [888], [18], [], [341], [875], [359], [388], [894], [437], [987, 998], [517], [372], [286], [754, 662], [713], [915], [964], [146], [529], [416], [376], [147], [902], [26], [398], [175], [270], [335], [899, 559, 532, 505, 762, 923], [540], [607], [495], [257, 222], [801], [576, 879, 982, 472], [301], [166], [56], [868, 967, 968, 659], [], [], [567], [277], [], [651], [377], [684], [832], [39], [219], [863], [868], [794], [80], [983], [269, 347], [238], [781], [223], [521, 926], [830], [260], [491], [896], [220], [680], [48], [542], [], [820], [148], [113, 114], [99], [143], [691, 570], [796], [986], [346], [367], [939], [875], [625], [481, 482, 848], [464], [812], [705], [], [466], [781], [499], [617, 338], [679, 488], [858], [795], [437], [11], [625], [965], [874], [949, 954], [600, 517], [86], [133], [149], [865], [480, 582, 760, 886], [325], [499], [834], [506, 421], [298], [900], [905], [202], [740], [258], [762], [297, 295], [132], [240, 238], [833], [471], [386], [898], [162], [288, 290], [450], [850], [232], [273], [954], [965], [611], [643], [147], [290], [866, 977], [186], [156], [776, 683], [775], [987, 998], [333], [325], [572], [927], [744, 657], [777, 623], [833], [551], [301], [716], [485], [102], [791], [959], [404], [987, 998], [415], [455], [242, 852], [], [517], [16], [320], [632], [568], [], [216], [332], [769, 726], [923, 959], [861, 605], [134], [677], [288], [10], [919, 733], [852], [], [104], [712], [388], [261], [609, 479], [673, 681, 620, 526, 664, 508], [], [579], [450], [628], [217], [810, 878], [763], [208], [126], [442, 497], [864], [232], [776], [942], [336], [978], [681, 620], [512, 587], [78], [668], [699], [746], [46, 39], [968, 809, 618, 828], [330], [615], [], [62], [116], [127], [955], [306], [425], [190], [370], [187], [971], [897, 411], [396], [744, 657], [840, 463], [718], [116], [836, 837], [994], [419], [764], [214], [285], [641], [951], [882], [13], [829], [453], [216], [665], [521], [268], [468], [418], [728], [], [449], [194], [362], [928, 963, 948, 923], [924], [249], [524, 461], [992], [571], [283], [608], [129], [486], [859], [498], [21], [467], [591], [924], [556], [97], [898], [586], [10], [202], [67], [649], [141], [603], [727], [101], [995], [278], [964], [238, 240], [423, 424], [489, 634], [533], [424, 423], [451], [555], [732], [514], [803], [300], [551], [753], [411], [315], [963], [], [389], [559, 578, 601], [673, 742, 526, 527, 662, 664, 508], [839], [299], [578, 689], [112], [960], [632], [867], [], [61], [427], [367], [926], [465, 597, 413], [34], [773], [654], [131], [874], [281, 282], [891], [956], [201], [267], [], [200], [673, 508], [424, 423], [907], [57], [27], [906, 578, 834, 459], [7], [322, 946], [934], [663], [423, 424], [687], [836, 837], [958], [645], [119], [306], [930], [124], [694], [777, 524, 461], [205], [137], [849], [681, 620, 526, 508], [380], [586], [916], [478], [182], [874], [715], [487], [], [19], [161, 162, 785], [915], [730], [678, 487, 830], [822], [], [699], [689, 819, 578], [673], [], [], [624], [679], [887], [581], [665], [903], [746, 622], [585, 440], [800], [899], [669], [81], [746], [866], [935], [668], [295], [893], [265], [628], [987, 923], [367], [294], [727], [12], [435, 876], [192, 186], [589], [70], [129], [454], [17], [946], [204], [181], [163], [80], [940], [587], [21], [198], [25], [932], [339], [480], [465, 413], [883], [453, 619, 818], [807], [287], [], [614], [814], [591, 689, 601], [919], [508], [479], [452], [155], [41], [163], [606], [8, 7], [], [515, 808, 693], [858], [506], [23], [976, 447], [801, 397, 983], [856, 595], [753], [5], [186], [667], [305], [46], [303], [], [927], [91], [34], [675, 654], [406], [65], [76], [517], [806], [330, 331], [], [130], [103], [56], [], [78], [31], [372], [225, 235], [431], [159], [187], [930], [888], [96], [836, 837, 655, 879, 444], [994], [872, 622, 759], [302], [566], [33], [619], [694], [406], [20], [18], [371], [320], [780], [997], [730], [613], [105], [810, 878], [311], [883], [367], [243], [], [], [515, 39, 47], [412], [921], [332], [514, 464], [276], [629], [917], [77], [643], [556], [998], [328], [723], [161], [250], [1], [919], [392], [264], [652, 847, 465, 408, 413], [488, 633], [968, 495, 504], [188], [884], [335], [795], [241, 238], [842], [71], [862], [254], [27], [409], [444], [433], [324], [322], [688], [579], [562], [917, 335], [803], [863], [44], [719], [16], [384], [328], [348], [194], [678], [593], [9], [], [25], [913, 983], [260, 667], [104], [72, 815], [223], [268], [283], [784, 477], [53], [615, 465], [100], [543], [133], [159], [439], [151], [355], [392], [577], [72], [383], [619, 846], [145], [109], [988], [824], [293], [], [821], [484], [608, 806, 966, 572], [259], [344], [132], [128], [154], [210], [508], [638, 639], [138, 83], [256, 233, 252], [376], [720], [464], [960, 968, 504], [999], [455], [613], [314], [993], [17], [759], [843], [591, 721], [330], [681, 810, 620, 531], [432], [778], [489, 372], [468], [489], [375], [263], [], [418], [377], [878], [283], [838, 631], [442], [382], [641], [628], [592], [59], [223], [587], [724], [207], [228], [8], [962], [575], [988], [402, 889], [551], [990], [141], [120], [207], [118], [946], [828, 463], [786], [166, 167], [256], [986], [28], [283], [636, 834, 671], [720], [411], [80], [678, 211], [29], [606], [636, 748], [156], [91], [734], [569], [458], [84], [230], [274], [707], [75], [965], [260], [978], [709], [372], [717], [763, 764], [96], [958], [884], [327], [140], [88], [156], [137, 98, 99], [559, 836, 837, 842], [669], [492], [771], [653], [484, 871, 913], [], [787], [827], [644], [393], [386], [654], [137], [715], [906], [724], [633, 477, 823], [516], [64], [850], [321], [611], [392], [509], [207], [903, 655, 638], [397], [582, 949], [188], [652, 465, 830], [750], [259], [294], [450], [511], [477], [255], [814], [781], [177], [654], [806, 911], [680], [769], [830], [273], [24], [463, 977, 978], [321], [480], [331], [21], [556], [481], [420], [195], [216], [215], [152], [333], [646], [152], [635], [128], [993], [351], [928], [267], [830], [], [335], [319], [786], [816], [334], [509], [444], [155], [902], [526, 527, 664], [483, 581, 479, 817, 511], [346], [482], [173], [438], [], [], [374], [548], [552], [619, 607], [411, 478], [451], [277], [715, 652], [], [855], [694], [709], [611], [168], [113], [782, 851], [974], [147], [69], [546, 650, 402, 818, 819], [11], [543], [629], [127], [652, 465, 764, 413], [349], [975, 628], [922, 412], [484], [78], [204], [399], [192, 186], [543], [89], [423], [323], [764], [970], [829], [645], [542], [809, 925], [195], [732], [474], [741], [820], [238], [643], [977, 978], [234], [844], [717], [925], [57], [806, 911], [444], [], [245], [], [923, 868], [791], [401], [896, 804], [773], [977], [875], [637], [442], [652, 847], [873], [472], [977, 978, 608, 502], [926], [102], [810, 878], [784], [], [355], [643], [279], [92], [523], [50], [510], [765], [681, 620, 526, 664, 281, 508], [870], [748], [253], [749], [], [452, 911], [824, 775], [261], [562], [911], [289], [950], [456], [449], [117], [97], [101], [291], [346], [809], [997], [168], [896, 861], [714], [126], [593], [8], [432], [72], [158], [958], [662], [945], [47], [919], [427], [809, 762], [185], [685], [122, 124], [660], [449, 536], [434, 533], [178], [356], [128], [819, 517], [157], [404], [23], [939, 582, 943], [204, 155], [756], [797], [916], [254], [9], [471], [577, 904], [255], [882], [654], [261, 174], [923, 931], [950], [360], [246], [872], [578, 982], [675], [418], [556], [216, 220], [928, 923, 960], [402], [911], [601], [179], [975, 638, 639], [303], [709, 526, 470, 767], [778], [664, 553, 697, 851], [178], [500], [], [557], [745], [611], [401], [571], [621], [206], [89], [394], [481], [627], [333], [701], [644], [364], [450], [979], [203], [872], [795], [265, 267], [118], [705], [565], [519], [641], [75], [], [590], [749], [374], [986], [76], [83], [14], [945], [683], [770], [74], [211], [429], [269], [], [505], [150], [344], [858], [45], [959], [884], [333], [953], [86], [204], [62], [928, 960], [257], [178], [178], [274], [], [552, 37], [147], [919, 920, 555, 733], [566], [74], [248], [399], [281], [768], [296], [327], [502], [721], [310], [944], [377], [825], [404], [], [17], [356], [860], [750], [926], [345], [957], [488, 830], [843], [430], [656, 919], [871], [424, 610], [141, 142], [653], [930], [977], [744], [673, 681, 620, 526, 527, 782, 664, 508], [840], [471], [], [863], [122], [851, 981, 664], [803], [544], [365], [326], [80], [166], [304], [398], [821], [456], [738, 428, 580], [149], [505], [366, 367, 369], [872], [173], [944], [220], [780], [492], [437], [888], [185], [12], [33], [763, 764], [740], [522], [917], [921, 638, 639], [86], [193, 187, 852], [], [300], [741], [262], [839], [307], [673, 681, 620, 526, 632, 508], [859], [49], [658], [966], [], [215], [64], [867], [370], [690], [68], [403], [433], [313], [138], [868, 813], [968, 504], [966, 907, 572], [], [587], [862], [67], [328], [390], [81], [968, 187], [15], [872], [519], [494], [405], [786], [423], [593], [917, 454], [65], [149], [558, 541, 542], [], [868, 945, 923], [894], [454, 921], [651], [943], [559], [], [72], [921, 763], [567], [861], [687], [40, 47], [257], [766], [169], [578, 982], [889, 486], [87], [448], [654], [789], [790], [185], [798], [35], [275], [636], [783], [353], [81], [960], [139, 140], [586], [44], [254], [603], [533], [37], [489], [159], [30], [963], [551], [906], [374], [816], [951], [671], [724], [671, 535], [37], [219], [669], [532, 762], [482, 754], [42, 26], [898], [], [330, 331], [951], [810, 878], [874], [481], [641], [], [472], [92], [559, 846, 818], [890], [659, 828], [840], [684], [235], [559], [264], [673, 526, 527, 782, 664, 508], [979], [518], [840], [548], [956], [221], [548], [], [167], [157], [], [547], [470], [665], [251], [53], [897], [350], [], [607], [], [264], [], [209], [343], [681, 620], [], [786], [127], [323], [861], [836, 837], [], [361], [581, 479, 656], [715, 652, 439], [43], [872, 917], [968], [114], [27], [536], [740], [417], [100], [692], [902, 488], [], [779], [307], [482], [31], [327], [896], [299], [994], [122, 124], [387], [114], [390], [327], [90], [478], [16], [320], [654], [711], [486], [518], [], [219, 220], [816], [78], [494], [255], [308], [204, 243, 155], [], [78], [977], [263, 185], [401], [603, 653], [779], [556], [690], [399], [265, 181], [304], [167], [950], [152], [438, 647], [227], [157], [588, 790], [599], [924], [], [475], [877], [763], [809, 925], [358, 359], [785], [927], [434], [812], [642], [867], [884, 406], [785], [139], [779], [39], [786], [771], [466], [], [894], [170], [867], [492], [905, 831], [175], [], [631], [778], [25, 28], [884], [116], [], [860], [467], [965], [923, 960], [370], [171], [936], [602], [781], [], [142], [605], [894], [700, 999], [], [306], [546], [550], [761], [621], [595], [515, 583], [320], [939, 813, 909, 910, 567], [179], [840], [], [769, 798], [215], [954], [385, 101], [223], [], [729], [759], [559], [87], [116], [236], [554], [911, 636], [661, 479], [168, 211], [828], [520, 529, 516, 431], [719], [978, 437], [100], [538], [], [697], [21], [240, 241], [312], [634], [515], [309], [685], [783], [61], [998, 987], [886], [111], [427], [314], [350], [719], [71], [286], [588], [616], [132], [670], [], [], [877], [558], [591], [251], [788], [232], [908, 895], [471], [754], [959], [767], [8], [690], [496], [], [407], [767], [647], [715], [629], [13], [407], [268], [842], [738], [943], [320], [810, 878], [195, 202], [922], [262], [185], [184], [], [197, 199], [502], [40], [941], [106], [900], [6], [949, 954], [247], [30], [47], [505], [460], [40, 46], [921, 604], [216], [473], [590], [872, 759], [315], [], [39], [], [404], [765], [608, 678, 841], [155, 204], [124], [181], [386], [113], [575], [689], [557, 624, 436], [230], [499], [818], [726], [932, 415], [217], [727], [737, 455, 760, 886], [230, 231], [541], [732], [686], [395], [547, 565], [623], [732], [344], [670], [506], [650, 818, 819], [], [675], [120], [970, 979, 858], [74], [292], [831, 721, 745], [483, 460, 975], [529, 831], [212], [961], [715], [751, 479], [583], [], [706, 762], [893], [865], [749], [134], [131], [248, 249, 537], [], [], [314], [540], [565], [661], [382], [235], [750, 721, 697], [], [], [], [406], [768], [562], [452], [196, 198], [899, 968, 504, 505], [10], [171], [500], [716], [318], [357], [330, 331], [106], [22], [577], [573], [481, 482], [910, 438], [283], [542], [457], [897], [502], [72], [305], [541, 542], [915], [633], [755], [991], [333], [571], [524], [51], [16], [479], [932], [894], [644], [822, 542], [515, 467, 728], [175], [126], [483, 698], [402], [270], [352], [792], [248, 250], [828], [772], [], [340], [14], [285], [351], [77], [529], [356], [46, 47], [505], [162], [868], [859], [672], [959], [369], [832], [907, 440], [674], [783], [673, 526, 527, 664, 508], [146], [785], [883], [628], [871], [632], [586], [219], [951], [946], [93], [64], [877], [980], [497], [296], [61, 62], [673, 650, 664, 526, 527, 632, 508], [896], [489, 981], [677], [], [758], [653], [487], [507], [496], [], [417], [668], [471], [628], [847], [658], [90], [987], [135], [308], [], [], [724], [64, 55], [299], [810, 878], [730], [575], [835], [394], [0, 758], [988], [376], [300], [612], [546], [137], [412], [874], [277], [398], [392], [156], [581], [124], [992], [65], [552, 903], [781], [121], [447], [662], [845], [449], [847], [34], [792], [754], [148], [996], [23], [692], [141], [513], [89], [796], [636], [673, 681, 620, 664, 526, 527, 508], [190], [84], [], [952], [683], [], [610], [414], [958], [838], [974], [954], [], [532, 799], [], [10], [129], [682, 708], [184, 232], [613], [585], [], [614], [547], [332], [683, 889], [437], [637], [809], [741], [854], [5], [154], [594], [569], [538], [499], [867], [153], [727], [251], [956], [583], [442], [400, 667], [962, 923], [187], [640], [607], [320, 319], [933, 923], [449], [24], [679, 488], [104], [62], [37], [879], [241], [578, 982], [745], [842, 977, 978], [738, 580], [], [650, 819, 854], [1], [133], [123], [424, 423], [614], [162, 167], [229], [610], [534], [524], [840, 911], [932], [559], [560, 981], [333], [565], [821], [904], [269], [222], [114, 947], [], [91], [846], [139], [537], [252], [], [652, 413], [928, 927], [354], [556], [345, 690], [722], [601], [803], [241], [682], [300], [490], [721, 831], [386], [250], [5], [651, 659, 411, 813], [], [742, 713], [156], [981], [570], [608, 610, 841, 894], [662], [598], [217, 852, 239], [43], [], [212], [218], [763], [106], [839, 873], [238], [220], [744, 657], [301], [777], [356], [625], [98], [], [138], [545], [199], [574], [217], [614], [243], [200, 155], [247], [185], [984], [539], [211], [684], [173], [92, 95], [654], [174], [297], [246], [], [775], [799], [370], [808], [956], [500], [2], [358], [801], [686], [773], [936, 939, 940], [605], [749], [779], [618], [993], [805], [924], [589], [145], [215], [], [938, 37], [752], [12], [481], [906, 834], [769], [401], [918], [836, 837, 951], [], [275], [799], [369], [515, 40, 42], [504], [137], [761], [8, 765], [166], [677], [767], [430], [469, 616], [400, 834, 667], [325], [927], [390], [802], [84], [], [736], [745], [23], [92], [712], [630], [410], [474], [221], [793], [83], [309], [], [165], [843], [579, 881], [397], [222], [104], [426], [488, 479, 695], [195], [886], [401, 881], [265], [466], [194, 185], [949], [331], [551], [315], [221], [172], [550], [629], [806, 658], [889], [897], [769], [832], [10], [608, 774], [525], [111], [593], [968, 504], [704], [868], [347], [569], [], [596], [738], [763], [59], [953], [506], [585], [922], [22], [807], [676], [279], [363], [14], [378], [], [60], [608], [357], [872], [612], [154], [740, 587, 783, 477], [877], [265], [230, 220], [612], [785], [690], [433, 728], [423], [89], [275], [975], [653], [584], [292], [330], [580], [284], [976], [374], [669], [70], [6], [251], [443], [340], [263], [208], [59], [483], [140], [535], [947, 997], [140], [241], [707], [755], [977, 978], [121], [452], [571], [508], [677], [357], [122], [163], [150], [60], [979], [418], [701], [428], [], [24], [37], [977, 638, 639], [37], [122], [243], [766], [399, 786], [588], [652], [457, 158, 151], [102], [405], [29], [395], [467, 596], [449, 718, 975], [672], [752], [366, 367], [612], [825], [], [908, 404], [], [18], [996], [430], [82], [968], [834], [558], [370], [977, 978, 879], [700], [261], [513], [464], [185], [218], [274], [432], [831, 765, 799], [652], [768], [758], [289], [201], [209], [105, 106], [378], [560], [435], [632], [270], [309], [733], [57], [959], [106], [22], [289], [197], [861], [996], [192, 187], [632], [296], [382], [681, 810, 620, 508], [775], [320], [350], [704], [972, 977], [498], [442], [596], [525], [748], [514, 819, 638, 639], [578, 903, 689, 601], [536], [965], [], [470, 921], [899], [526, 527, 782, 664, 508], [880], [699], [292], [113], [820], [763], [925], [807], [681, 620, 526], [994], [247], [865], [423, 424, 762], [803], [760], [243], [590], [181], [489], [865, 850], [929, 443], [727], [415], [55], [690], [738, 703], [385], [527], [617], [117], [544], [200], [784, 507], [850], [141], [754], [255], [496], [847], [435], [515, 596], [291], [415], [808], [707], [956], [62], [289], [], [238], [316], [], [398], [383], [997], [393, 108], [17], [166], [617], [139], [284], [433], [628], [617, 823], [700], [80], [701], [773], [793], [11], [347], [785], [678], [], [806, 850], [538], [920], [851], [965], [147], [57], [545], [19], [836, 837, 975], [999, 153, 700], [119], [910], [962], [183], [73, 74, 815], [517], [579], [140], [346], [], [627], [863], [814], [665], [65], [529], [466], [155], [655], [540], [712], [726], [247], [894], [239], [34], [820], [], [528], [678], [189], [117], [695], [380], [293], [413], [114], [389, 391], [784], [332], [], [68], [690, 346], [313], [855], [842, 731], [10], [830], [472, 718], [456], [723], [668], [461], [558], [143], [745], [523, 975, 978], [221], [946], [123], [931], [654], [27], [406], [510], [261], [746], [981], [2], [933], [], [508], [754], [342], [456], [704], [905, 750], [545], [363], [113], [205], [187, 636], [945], [370], [647, 600], [155], [790], [589], [532], [191], [28], [532], [67], [456], [506], [143], [245], [433], [417], [35, 37], [203], [803], [273], [322, 769], [919], [340], [873], [87], [754], [816], [241, 238], [], [843], [844], [643], [563], [842, 978], [421], [87], [406], [946], [979], [806], [877], [619, 846], [417], [782], [105], [465], [624], [830], [352], [746], [33], [552], [863], [855], [688], [597], [281], [185], [15], [907], [221], [625], [506], [563], [], [37], [605], [679], [944], [58], [402], [80], [961], [763], [828], [577], [93], [315], [300], [81], [178], [1], [841], [729], [923], [66, 68], [409], [256], [76], [818], [369], [128], [391], [567], [591], [365], [387], [766], [], [], [], [458], [172], [769, 767], [457, 541], [20], [883], [356], [545], [405], [716], [212], [730], [635], [277], [256], [25], [868], [728], [540], [386], [864], [971], [53], [351], [], [320], [707], [691, 570], [978], [870, 903], [630], [62], [989], [128], [515, 775, 564, 669], [362], [311], [822, 542], [325], [642], [695], [473], [761], [365], [715], [], [559], [950], [999], [158, 151], [704], [647], [627], [227], [3], [676, 236], [349], [936, 923], [715], [993], [60], [17], [703], [290], [33], [225], [968], [323], [377], [462], [108], [51], [], [592], [870], [421], [126], [564], [801], [959], [270], [928, 923, 960], [578, 982], [651, 732], [577], [129], [958], [841], [350], [78], [259], [559, 721], [0, 391], [958], [877], [474], [650], [141], [755], [283], [907, 440], [2, 3], [744, 657], [104], [929], [991], [94], [772], [879], [628], [105], [975, 634], [619, 846], [828], [988], [936], [591], [], [738], [280], [156], [240], [708, 460, 975], [80], [546], [936], [196], [694], [340], [], [386, 101], [672, 792], [], [135], [191], [611], [472, 693], [624, 453, 454], [221], [894, 638, 639], [72], [535, 479], [824], [], [298], [286], [758, 472], [85], [555], [794], [961], [958], [936], [990], [180, 243], [71], [608, 514], [], [224], [868], [454], [611], [309], [539], [317], [393], [610, 443], [338], [520], [430], [276], [841], [93], [749], [281, 282], [892], [294], [457], [945], [923], [458], [817, 479], [555], [624], [855], [135], [110], [510], [], [783], [451], [866], [419], [206], [492], [265, 266], [94], [713], [720], [457, 459], [89], [], [47], [], [435], [908, 404], [119], [762, 766], [890], [215], [719], [844], [717], [358], [687], [643], [970], [394], [730], [618, 813, 659, 567], [914], [639], [475], [473], [18], [492], [396], [64], [679], [105], [], [450], [834], [49], [538, 668], [821], [697], [794], [781], [656], [748], [968], [624, 454], [909, 567], [], [915], [689, 601], [126], [13], [724], [379], [], [673, 508], [734], [80], [763], [43], [352], [], [697], [535], [532, 923, 868], [894], [504], [884], [], [688], [297, 295], [154], [984, 475], [629], [238], [268], [653], [681, 620], [63], [302], [914], [439], [144], [643, 819], [150], [1], [], [228], [144], [211], [57], [820], [28], [387], [], [37], [690], [567], [351], [785], [787, 524], [343], [488, 600], [135], [24], [369], [528], [271], [520], [585], [683], [225, 235], [912], [44], [265], [], [335], [74], [2], [793], [868], [573], [374], [590], [26], [911, 533], [306], [443], [387], [603], [602], [231], [877], [202], [652], [978, 510], [907], [337], [612, 670], [2], [169], [607], [681, 810, 620], [135], [518, 920, 671], [917], [761], [847], [362], [27, 455], [707], [647, 968], [524, 461], [479], [724], [], [351], [756], [342], [253, 703], [351], [873], [176], [956], [673, 681, 620, 526, 527, 664, 508], [724], [633], [199], [613], [479], [], [777], [], [419, 605], [320], [939, 567, 926], [669], [256], [223], [605], [880], [593], [469], [337], [630], [839], [752, 852], [846], [528], [105], [630], [514, 515], [125], [742], [94], [776], [], [512], [738], [968], [270], [455], [182], [58], [181], [674], [96], [118], [37], [453], [148], [203], [770], [894, 799], [11, 14], [101], [715, 671], [970], [601], [495], [786], [57], [33, 973], [990], [400], [716], [788], [337], [812, 908], [739], [292], [878], [9], [61], [361], [605], [218], [344], [232], [844], [832], [246], [596], [120], [950, 953], [896, 999, 648, 861], [975], [853], [921], [348, 349], [537], [866], [], [836, 837, 518, 898, 671], [39], [76], [], [449, 975, 472], [862], [138], [719], [], [262], [981, 429], [930], [22], [], [913], [617, 823], [821], [150], [825], [369], [474], [922], [], [343], [], [312, 937], [823], [951], [676, 235], [862], [92], [346], [28], [497], [549], [72], [195], [212, 251], [37], [112], [648], [107], [623], [139], [929], [170], [99], [475], [713], [], [264], [813], [432], [916], [475], [526, 664], [976], [44], [749, 526], [204], [121], [622, 759, 414], [194], [685], [283], [362], [555], [474], [17], [587], [368], [460, 718], [247], [885], [109], [737], [865], [783], [739], [462], [548], [136], [999, 876, 435], [579, 402], [351], [274], [641], [982], [426], [], [538], [354], [], [890], [954], [229], [824, 911], [728, 861], [932], [626], [681, 620], [107], [646], [69], [702], [987, 998], [607], [478], [], [792], [908], [898], [304], [73], [401], [], [923, 122], [268, 151], [593], [], [373], [503], [302], [402], [481, 482], [750, 721], [], [374], [446], [492], [755], [277], [91], [], [], [], [372], [531], [479], [763], [359], [595], [642], [654, 603], [499], [467], [346], [650], [804, 631], [111], [], [487], [693, 472], [929], [134], [661], [629], [117], [689], [743], [479, 817, 511], [447], [232], [321], [351], [621], [494], [200], [470], [316], [786], [981, 429], [258], [485], [960, 923], [144], [], [395], [506], [789], [325], [297], [384], [], [453], [614], [349], [645], [608], [673, 526, 527, 916, 664, 508], [807], [818], [830], [370], [754], [762, 923], [], [903], [213], [581], [6], [], [802], [579, 881], [242], [696], [683], [939], [139], [475], [749], [685], [219], [702], [661], [834, 906, 630], [495], [816], [945], [327], [386], [374], [298], [377], [979], [625], [888], [227], [176], [677, 587, 784], [11], [907, 953], [276], [105], [546], [650, 834, 906], [578], [739], [298], [14], [4], [526], [47], [281, 897], [791], [984], [839], [842, 625], [986], [411], [582], [52], [43], [778], [306], [820], [150], [], [822, 542], [78], [887], [], [], [948], [829], [259], [199], [804], [580, 315], [513], [84], [821], [814], [929], [928, 960], [957], [921], [608], [744, 657], [172], [392], [820], [937], [610, 114], [331], [51], [113], [7], [7], [60], [836, 837], [592], [396], [650], [687], [873], [431], [159], [182], [430], [837, 465, 597], [950], [566], [31], [440], [234], [726], [113], [980], [517], [221], [572], [598], [376], [913], [843], [531], [803], [125], [685], [255], [801, 983, 327], [417], [666], [891], [530], [57], [520], [166], [745], [450], [], [385, 386], [716], [650, 819], [564], [355], [353], [308], [883, 725], [775], [131], [939], [83], [116, 126], [], [955], [18], [628], [480], [841, 823], [594], [405], [162], [95], [605, 748], [430], [338], [86], [305], [648, 794], [403], [], [939, 943], [497], [236], [413], [350], [854], [93], [49, 50], [719], [187], [262], [405], [62], [955], [608, 679], [260], [331], [265], [938], [370], [497], [26], [581, 407], [660], [119], [67], [89], [683], [779], [801], [263], [368], [862], [], [486, 776, 683], [143], [689, 501], [], [541], [916], [530], [40], [], [634, 561], [177], [208], [425, 825], [836, 638, 639], [], [547], [198], [671, 518, 615], [110], [232], [1], [558], [911, 796], [126], [807], [619], [616, 618], [526], [88], [759, 474], [249], [33], [386], [30], [900], [898, 680], [472, 693], [370], [123], [608, 897, 651, 567], [], [482], [546, 631], [197], [566], [515], [721], [18], [774], [962, 923], [272], [361], [494], [55], [], [783], [946], [430], [293], [195], [100], [120], [619, 409, 442, 892], [578, 689, 982, 601], [751], [764], [336], [441], [448], [558], [55], [283, 435], [561], [291], [189], [465], [182], [179], [574], [257], [896], [487], [696, 738], [], [534], [856], [346], [], [416], [], [911], [391], [762], [258], [535], [230], [983], [378], [402], [310], [780], [11], [923, 891], [369, 379], [672], [329], [39, 44, 47], [147], [88], [582, 953, 954], [879, 638, 639], [687], [102], [97], [631], [917, 921], [969], [221], [85], [], [999, 435, 794], [246], [159], [76], [], [962, 923, 935], [155], [794], [951], [481, 482], [146], [31], [737, 519], [], [872], [563], [252], [300], [665], [317], [195, 245], [], [517], [126], [125], [29], [17], [230, 231], [25, 28], [453, 553], [48], [976], [767], [572], [201], [752], [352], [230], [671], [312], [110], [316], [711], [13], [670], [788], [551], [770], [903], [90], [771], [656, 468], [181], [464], [579, 881], [492, 588], [619, 846], [24], [957], [500], [582, 692], [817], [890], [], [986], [769], [785], [989], [13], [38], [165], [548], [457], [923, 934], [565], [263], [492], [858], [607], [744, 657], [417], [483], [466, 799], [795], [698], [747], [851], [161], [614], [627], [971, 542], [478], [896], [], [174], [934], [], [929], [332], [487, 457], [962, 923], [7], [], [245], [956], [286], [], [115], [], [481, 482], [609], [547], [356], [994], [637], [636, 748], [604], [448], [152], [163], [107], [], [97], [353], [898, 585, 631], [404], [946], [853], [292], [140], [860], [734], [739], [243], [964], [673, 526, 508], [519], [899, 968, 725, 504, 572], [341], [277], [759, 635], [707], [661], [286], [863], [401], [79], [299], [826], [274, 277], [31], [604], [88], [697], [755], [], [538], [], [305], [440, 441], [327], [867], [], [756, 412], [275], [521, 926], [138], [791], [365], [305], [976, 977, 978], [10, 858], [478], [489], [50], [896], [324], [86], [386], [789, 614], [699], [133], [408], [565], [], [611], [77], [998, 941, 987], [769], [248, 537, 250], [543], [255], [458], [573], [], [338], [944], [697, 819, 854], [797], [491], [714, 402], [578, 627, 982], [16], [382], [531], [58], [933], [269], [653], [960, 813], [200], [791, 922], [549], [522], [155], [632], [110], [453, 894], [182], [513], [396], [243], [], [368], [440, 455], [181], [438], [759], [], [331], [259], [728], [807], [596], [634], [517], [], [599], [157], [159], [324], [581, 479], [179], [115], [645], [816], [155], [604], [673, 526, 527, 664, 508], [29], [], [550], [972], [284], [403], [874], [315], [637], [393], [421], [], [459, 845], [221], [963], [558], [258, 222], [400], [40, 44], [991], [444, 670], [147], [50], [763], [502, 638, 639], [], [72], [648], [137], [119], [548, 851], [610], [475], [487], [72], [], [559], [858], [935], [731], [455], [], [973], [395], [786], [263], [734], [], [308], [728, 412], [256], [289], [79], [349], [461], [591], [72, 815], [], [408], [54], [634], [601, 578], [70], [671], [166], [439], [869, 841, 523], [72], [923], [327], [651], [856], [427, 756], [989], [834, 982, 906], [], [551], [389, 983], [559], [75], [75], [902], [830], [102], [369], [921], [551], [], [70], [366], [769], [107], [517], [314], [272], [434], [238, 239], [607], [58], [8, 7], [832], [220], [182], [975, 980, 703], [978], [869, 617], [378], [748], [248], [431], [866], [960], [659], [468, 919], [953, 954], [930], [873], [40, 46], [518], [466], [833], [408], [923], [937], [], [508], [314], [734], [696], [956], [615], [320], [946], [196], [914], [123], [903], [34], [56], [213], [512], [208], [99], [372], [205], [677], [786], [], [788], [772], [94], [99], [783, 677, 463], [], [555], [770, 836, 837, 733, 862, 610], [882], [937, 938], [592], [11], [], [13], [736, 515], [418, 767], [197], [914], [524], [233], [882], [], [], [315], [83], [788, 502], [124], [979], [320], [169], [491], [861], [784], [893], [517, 540], [340], [501], [52], [514], [72], [366], [961], [224, 208], [702], [275], [], [267], [421], [54], [963], [275], [756], [316], [945], [606], [198], [177], [928], [612], [497, 663], [587, 784], [], [375], [75], [709], [678, 638, 523, 818, 819], [360], [442], [330, 331], [566], [150], [], [], [], [8, 7], [557, 663, 442], [706, 421, 970], [458], [51], [2, 3], [702], [659, 923], [553], [8], [17], [17], [880], [734, 407], [273], [933], [953], [891], [464], [237], [860], [669], [857], [2], [448, 858], [869], [69, 110], [350], [273], [73], [], [727], [506], [306], [91], [], [181], [926], [17], [24], [545], [957], [845], [104], [513], [53, 759], [777], [847], [187], [378], [696], [940], [200], [155], [409], [229], [123], [850], [723], [578], [247], [847], [956], [51], [890], [907], [], [646], [182], [12], [958], [980], [172, 173], [999, 499, 700], [844], [21], [811], [830], [512], [531], [236], [581, 479, 511], [542], [565], [169], [], [453], [900], [], [740], [235], [192], [371], [475], [121], [126], [50], [161], [240], [421, 976, 978], [422], [172], [803], [], [673, 526, 527, 782, 664, 508], [230], [479], [978, 515], [841, 501], [545], [758], [4], [123], [329], [503], [966], [281], [209], [401], [687], [483], [30], [949], [873, 839], [974], [576], [514], [988], [173], [], [164], [991], [882], [609], [756], [], [], [936], [113, 125], [453], [], [471], [67], [242], [257, 850], [141], [571], [321], [631], [586], [902], [793], [378], [608, 421, 869], [520, 669], [748], [121], [84, 7], [396], [670], [453], [797], [917], [746], [608, 972], [421, 506], [997], [910], [257], [802], [688], [210], [905, 859], [337], [753], [519], [750], [625], [476], [651, 527, 664], [683], [962, 923], [800], [802], [874], [853], [625], [585], [93], [928], [407], [955], [807], [54], [], [402, 836, 837], [494], [87], [756], [], [244], [522], [347], [692], [886], [182], [864, 867], [884], [985], [914, 484, 780], [635], [304, 302], [18], [], [610], [440], [808, 968, 504], [850], [698, 483], [469, 926], [], [518, 568, 570], [575], [928, 762, 923, 927], [106], [977, 638, 639], [770], [519], [542, 559, 541], [456, 733], [], [617], [235], [200], [280], [842, 879, 977, 978], [191], [740], [553, 750, 831, 894], [576], [983], [962, 923], [217], [199], [7], [828], [656], [217], [23], [47], [439], [152, 155], [400, 667], [18], [197], [631], [442, 494], [235], [377], [], [966, 572], [777], [528], [238], [997, 947], [183], [133], [861], [394], [425], [389], [13], [294], [243], [69], [850], [56], [143], [360], [547], [462], [], [552], [611], [322], [572], [494], [197], [833], [708], [548, 851, 632], [918], [124], [459], [149], [361], [520], [458], [270], [186, 193], [667], [675, 850, 757], [453], [833], [716], [190], [], [30], [949, 954], [211], [834, 517, 906, 630, 671], [374], [], [670, 518], [450], [914], [39], [261], [], [463], [], [100], [488, 679], [995], [760], [230, 231], [110], [], [251], [], [814], [490, 600], [38], [683], [994], [553], [673, 508], [277], [839], [564, 669], [920], [483], [551, 629], [757], [217], [877], [60], [785], [533], [1], [401], [214], [853], [126], [295], [318], [892], [719], [462], [124], [240], [516], [535], [149], [521], [152], [393], [562], [195], [962, 933, 923], [419], [1], [103], [423], [824], [582], [780], [370], [228], [581], [456], [], [984], [], [997, 947], [114], [837, 841], [333], [490, 524, 461, 787], [889], [858], [93], [6, 983], [], [656], [986], [991], [812], [608, 465, 597], [857], [311], [652], [610], [445], [246], [231], [673, 664, 526, 527, 632, 508], [221], [], [149], [304], [], [], [285], [354], [966], [78], [], [31], [500], [617], [665], [946], [604], [130], [246], [464], [237], [339], [50], [809, 923], [859], [], [], [581], [550], [], [898, 585], [201], [701], [274], [12], [153], [12], [], [345], [], [368], [225], [], [9], [41], [527, 782, 916, 664], [932], [981], [776], [363], [239], [694], [], [232], [905], [669, 564], [850], [195], [179], [328], [849], [167], [539], [173], [166], [829], [680], [145], [37], [268], [523], [394], [718, 975], [779], [567], [377], [670], [965], [139], [472, 693], [355], [538], [167], [841, 501], [212], [788, 795], [918], [897], [610], [718, 888], [726], [158], [145], [868], [], [361], [654], [327], [869], [417], [305], [350], [578, 689], [879], [401], [241], [937], [600], [284], [537], [172], [494], [408, 414, 465, 608], [695], [696], [525], [805], [961, 909], [627], [949], [647], [35, 37], [911, 658, 824, 568], [944, 946], [923], [346], [457, 834], [349], [79], [], [612], [104], [104], [596, 284], [835], [614], [568], [322], [301], [265], [758], [866], [829], [358], [977, 978], [906], [24], [571], [334], [785], [694], [299], [], [654], [722], [511, 479], [272], [271], [409], [515], [6], [927], [337], [708, 557, 538], [997], [673, 664, 526, 527, 782, 632, 508], [895], [353], [], [385, 101], [236], [174], [214], [642], [932], [440], [904], [903], [766], [975], [11], [283], [416], [792], [36], [35, 37], [544, 521, 910, 926], [598], [578], [281], [990], [110], [391], [859], [], [959], [693], [688], [588], [497], [753], [350], [44], [529], [760], [945], [303], [985], [51], [], [111], [412], [708], [179], [52], [581], [852], [734], [884], [608, 610, 836, 837, 557], [625], [711], [960], [936, 938], [807, 637], [226], [], [276], [195], [863], [457], [88], [], [760], [180], [593, 650], [543], [654], [939, 943], [698], [956], [594], [841, 911], [], [694], [496], [544], [198], [693], [956], [243], [102], [118], [783], [248, 250], [189], [5], [479], [507], [438], [973], [168], [434], [814, 913], [214], [349], [817], [726], [821], [585], [9], [908, 895], [333], [334], [], [580], [201], [386], [985, 716], [195, 159], [430], [546, 776, 650, 819, 632], [207], [261], [209], [895], [358], [321], [681, 620, 951], [333], [711], [286], [445], [293], [880, 430], [], [818], [996], [327], [573], [526], [843], [713], [847], [179], [268], [248, 250], [337], [177], [968], [688], [652], [962], [383], [220], [815], [810, 878], [146], [39], [455], [52], [141], [463], [828], [981], [787], [497], [620], [786], [615], [240, 238], [893], [30], [486], [825], [418], [649], [64, 55], [779], [48], [621], [159], [570], [43], [539], [], [], [], [945], [392], [606], [208, 250], [538], [949], [91], [207], [985], [951], [580], [79], [259], [645], [826], [581, 751, 817, 479], [640], [47], [453, 454, 624], [896, 435], [725], [], [384], [121], [], [234, 214], [894], [991], [315], [374], [], [], [614], [569], [497], [605], [339], [], [378], [82], [], [576], [610], [905, 532, 441, 572, 834, 966], [416], [780], [129], [], [386], [573], [628], [853], [982], [786], [672, 970], [908], [325], [331], [380], [551], [487], [], [859], [882, 613], [125], [245], [379], [561], [840], [867], [437], [52], [646], [536], [382], [647], [323], [], [175], [874], [578, 903, 689, 885], [535], [937], [462], [433], [189], [654], [592], [357], [94], [341, 703], [468, 919, 920], [377], [148], [362], [14], [326], [319], [659], [857], [681, 620, 508], [849], [841, 608, 636], [289], [785], [779], [870], [], [302], [], [373], [29], [486], [201], [239], [735], [954], [143], [563], [48], [807], [430], [571], [345, 690], [690], [129], [399], [393], [181], [391], [907, 478], [400], [647], [544], [], [871], [697], [263], [774], [916], [708], [509], [135], [812], [385], [214], [285], [76], [261], [390], [590], [595], [397], [936], [168], [525], [], [502], [914], [449], [750], [471], [528], [19], [966], [879], [38, 44], [121], [837, 441], [801], [893], [], [793], [104], [873], [144, 977], [313], [267], [204], [868, 923], [488], [236], [334], [26], [427], [621], [715, 764], [692], [104], [627], [578, 903, 601], [595], [921], [785], [204], [759], [721], [214], [330], [564], [565], [59], [], [489], [515], [23], [191], [125], [629], [578], [514, 788], [580], [171], [444], [681, 620, 508], [215], [289], [742], [175], [821], [826], [93], [241], [], [719], [62], [11], [995], [497], [77], [162], [], [494, 442], [357, 337], [497], [977], [990], [363], [506], [264, 263], [103], [609], [671], [548, 905, 851, 831, 598], [306], [], [48], [445], [875], [387], [731], [361], [], [132], [82], [923, 924], [257], [945], [843], [819], [481], [147], [292], [968, 651, 504], [971, 815], [76, 568], [245], [82], [870], [671], [], [46], [463], [845], [944], [12], [602], [483], [4], [182], [958], [900], [301], [335], [734], [515], [428], [789], [481, 482], [902], [234, 852], [417], [802], [655, 500], [363], [354], [64, 55], [281], [796], [150], [668], [618, 284], [144], [159], [194], [345], [414], [482], [296], [446], [814], [516, 601], [425, 730], [534, 729], [729], [868, 923], [813], [475], [129], [740], [573], [437], [760], [792], [873], [644, 470], [279], [907], [434], [229], [610, 655], [795], [185], [521], [232], [672, 570], [165], [902, 769, 726], [649, 979], [400, 667], [788], [812], [715], [237], [747], [608, 728, 824, 630, 414], [39, 48], [424, 423], [590, 487], [291], [771], [15], [485], [836], [297], [38], [967], [329], [138], [739], [518, 652, 691, 570], [867], [608, 459], [804], [665], [515, 906], [9], [919], [111], [], [923], [5], [421], [533, 539], [674], [828], [836, 837], [963, 567], [399], [545], [892], [381], [597, 763], [473], [540], [248, 250], [80], [612], [806], [31], [965], [823], [446], [346], [921], [817], [195], [479], [725], [518], [467], [634], [968], [921], [141], [284], [546, 650, 402], [923], [454], [296], [171], [865], [276], [132], [970], [970, 980], [443], [653], [66, 68], [874], [259], [300], [445], [580], [738], [], [889], [904], [252], [], [897], [8, 7], [168, 205], [960], [235, 242], [510], [839], [752], [958], [436], [543], [797], [442, 538], [537], [7], [491], [160], [659, 438, 647], [872, 759, 622, 732, 414], [402, 703], [876], [465], [453, 454], [800], [757], [626], [912, 716], [880], [720], [880], [248, 249], [801], [452], [265], [379], [841, 447], [108, 991], [862], [394], [715], [812], [32], [452, 433], [678], [8], [79], [747], [316], [48], [], [232, 760], [648, 720], [588], [74], [563], [970, 518, 671], [972, 858], [566], [996], [596], [335], [476], [83], [513], [868], [], [968, 849, 725, 504], [892], [309, 984], [496], [673, 742, 664, 526, 527, 508], [632], [40, 46], [], [366], [570], [11], [691], [], [427], [850], [446], [434], [48], [], [20], [796], [467], [528], [363], [871], [112], [222], [872], [434], [561], [238], [962, 692], [713], [41], [338], [847], [943], [15], [774], [610], [148], [497, 442], [84], [836, 837, 678], [130], [885], [288], [340], [844], [654], [213], [974], [849], [419], [669], [35], [280], [323], [142], [157], [64], [553], [931], [379], [357], [466], [646], [967], [377], [256], [283], [289], [486], [], [722], [850], [], [92], [], [330], [378], [538], [151], [], [122], [6], [443], [670], [672, 471], [946], [38], [224], [752], [444], [420], [906], [372], [755], [480], [490], [613], [259], [578, 639], [646], [793], [235], [], [737], [777], [584], [553], [822], [388], [585], [862], [523], [745], [582, 941, 728], [82], [56], [195, 697], [801, 570], [787], [885], [740], [122], [618], [389], [153, 204], [563], [190], [610, 487], [967], [822, 542], [399], [565], [649], [696], [298, 299], [964, 951], [207], [26], [206], [635], [739], [523], [512], [385], [487], [500], [675], [38], [777], [934], [777], [624, 283, 453, 454], [805], [960, 967, 968], [859, 868, 521, 651], [337], [621], [169], [515, 680], [863], [886], [626], [406], [630], [45], [365], [512], [333], [415], [909, 926, 968, 504], [495], [886], [91], [105], [273], [421], [829], [153], [417], [510], [519], [979], [941], [569], [992], [263], [948], [819], [936], [], [84], [750], [139], [716], [395], [677], [949], [], [968, 504], [509], [378], [423], [305], [451], [59], [968, 504], [206], [32], [506, 421], [24], [140], [804], [715], [581, 656, 436, 479], [312, 311], [938, 923, 935], [684], [316], [94], [988], [42], [908, 404, 895], [908], [963], [252], [322], [900, 540, 812], [], [426], [180], [821], [], [502], [739], [261, 174], [650], [494], [581, 479, 817], [35], [90], [591], [432], [613], [626], [102], [489], [411], [168], [627], [834, 458], [903, 558], [], [372], [900], [49], [206], [766], [53], [783], [265], [71], [812], [136], [589], [61], [], [64, 59], [69], [710], [129], [], [496], [15], [911, 474], [659], [120], [], [432], [428], [140], [801], [217], [669], [994], [330, 332], [999, 281, 700], [958], [12], [608], [487, 402], [548, 851], [334], [750, 721], [241], [624, 453, 454], [392], [736], [301], [441, 572], [386, 101], [], [581, 436, 479], [810, 878], [568], [106], [482], [850], [28], [770], [1], [843], [655], [94], [464, 597], [741], [693], [468], [660], [917], [329], [], [654], [871], [390], [342], [], [572, 966], [950], [120], [146], [302], [], [519], [], [], [197], [505], [155], [825], [188, 189], [96], [237], [726], [325], [229], [507], [457, 834], [93], [], [260], [930], [510], [346], [983], [395], [317], [289], [554], [34], [713, 742], [992], [162], [211, 159], [401], [88], [559], [760], [484], [636], [309], [14], [78], [725, 901], [378], [431], [267], [223], [423, 424, 589], [973], [681, 810, 620], [618, 469], [], [167], [383], [117], [], [302], [479, 436], [389], [663], [346], [323], [822], [126], [432], [524], [994], [968], [], [355], [562], [420, 683, 875], [789], [847], [60], [842, 638], [720], [724, 536], [373], [398], [780], [673, 620, 664, 526, 527, 846, 632, 508], [540], [616], [104], [873], [417], [436], [277, 278], [668], [945], [184, 191], [682, 708], [225], [], [546], [674], [146], [580], [903], [665], [821], [682], [216], [684, 784], [571], [621], [287], [120], [774], [849], [223], [498], [608], [193, 194, 187], [982], [1], [771], [882], [469], [], [388], [344], [377], [610], [816], [621], [940, 463], [435], [515], [603], [402, 559, 836], [450], [], [800], [628], [865], [610], [], [15], [762], [775], [539], [531], [185], [579], [482], [398], [419], [976], [650], [771], [491], [910], [69], [207], [939], [], [100], [134], [506, 421], [249], [525], [171], [999, 861], [287], [497, 884], [], [249, 250], [600], [765], [609], [216], [788, 831], [210], [781], [923, 550, 967, 968, 762], [781], [198], [673], [235], [684], [429], [828], [86], [869], [215], [209], [435, 151], [397], [430], [791], [187], [436], [849], [603, 764], [144], [591], [808], [793], [909, 827, 926], [272], [], [80], [313], [923], [251], [53], [430], [119], [562, 825], [499], [919, 733], [359], [57], [820], [131], [330], [507], [781], [975, 703], [286], [761], [231], [841, 885, 630, 636], [128], [713], [780, 724], [604], [], [307], [880], [955], [910, 729, 828], [338], [928], [], [494], [340], [822, 577], [500], [859], [202], [975, 562], [633], [856], [210], [834, 836, 837, 650, 906, 819], [], [658], [366], [634], [160], [134], [277, 278], [155], [570], [102], [27], [421], [50], [401], [785], [906], [288], [487], [966, 572], [671], [788], [759], [377], [690], [816], [655], [72], [748], [592], [241], [893], [560], [18], [246], [901], [270], [782, 664, 830], [414], [819], [196, 198], [122], [839], [622, 759], [456], [278], [724], [333], [664, 971], [610, 841], [498], [965], [409], [241, 238], [136], [114], [453, 553, 894], [8], [869], [932], [587], [519, 950], [354], [648], [], [489, 22], [903], [442], [987, 998], [44], [795], [265], [933], [911], [748], [23], [396], [795], [1], [802], [], [], [479], [81], [525], [836, 837, 841, 978, 501], [626], [356], [610], [470], [666], [846], [91], [137], [], [529], [569], [993], [452], [616], [940], [293], [351], [604], [244], [551], [47], [354], [481], [800], [455, 440], [711], [23], [5], [700, 999], [148], [536], [886], [368], [246], [468], [672], [879], [171], [541, 62], [714], [28], [169], [993], [17], [442, 497, 858], [839], [679, 721], [160], [845], [251], [898], [423], [480], [581, 468], [500], [396], [883, 572], [431], [956], [361], [53], [817], [49], [729], [522], [], [939], [338], [391], [965], [625], [884, 406], [774], [546, 776, 158], [839, 718], [458], [213], [48], [950], [478], [431, 697], [34], [352], [703], [931], [830], [968, 504], [], [938], [320], [195], [121], [774, 977, 978], [437], [563], [26], [362], [16], [328], [841], [673, 526, 527, 782, 664, 508], [469], [13], [463], [14], [922], [231], [26], [921, 445], [], [22], [996], [222], [440, 737, 455], [232], [133], [607], [293], [117], [343], [476], [291], [565], [521], [825], [724], [295], [219, 220], [364], [258], [], [483], [], [710], [474, 911], [538], [64, 55], [], [539], [573], [603], [], [393], [923], [934], [922], [469], [871], [], [402], [474, 799], [616], [544], [50], [], [414], [595, 866], [825], [], [131], [515], [351], [297], [976], [577], [764], [903], [699], [335], [229], [666], [444], [168], [560], [847], [], [286], [], [6], [64], [218], [747], [669], [287], [825], [], [370], [957], [662], [875], [963], [165], [260], [646], [778], [197], [753], [996], [930], [453, 742, 681, 620], [677], [518], [63], [346], [517], [610], [672, 797], [276], [721], [383], [571], [787], [735], [75], [834, 681, 906, 526], [345, 346, 730], [54], [443], [597], [652], [770], [212], [116], [368], [388], [87], [690], [368], [854], [117], [], [105], [457, 834], [93], [], [342], [96], [834], [406], [17], [798], [866], [930, 415], [471], [574], [83], [698], [799], [24], [208], [459, 445], [946], [981], [887], [732], [687], [68], [966, 572], [999], [478], [263], [417], [244], [128], [974], [580], [515], [2], [893], [532], [56], [169], [714], [617, 691, 570], [366], [141], [38, 45], [309], [731, 861], [957], [845], [], [732], [411], [668], [850], [747], [565], [989], [508], [322], [547], [50], [752], [455], [806, 630], [103], [752, 852], [483], [845], [56], [427], [10], [881], [426], [300], [864], [184, 191], [316], [158], [557, 718], [253], [550], [260], [638, 639], [250], [842, 814, 977, 978, 693, 445, 639], [366], [], [388], [237, 158], [252], [743], [391], [816], [76], [399], [897, 285], [441], [], [6], [98], [289], [653, 493], [914], [696], [863], [701], [100], [825], [977, 978], [965], [384], [70], [605], [937, 962, 935], [979], [22], [669], [899], [64, 59], [640], [345, 347], [354], [491], [29], [141], [963], [27], [563], [250], [155], [236], [793], [969], [739], [780, 914], [125], [813, 567], [920, 779], [429, 463], [303], [665, 518], [12], [673, 810, 527, 664, 508], [370], [429], [793], [809], [16], [276], [], [679], [748], [323], [204], [201], [784], [286], [138], [303], [192, 185], [774], [], [669], [288], [489], [659], [588], [912], [735], [611], [99], [], [938], [358, 359], [218], [807], [907], [550], [36, 37], [834, 655], [904], [919], [699], [840], [698], [578, 819], [592], [767], [518, 670], [117], [258], [], [592], [256], [], [666], [265, 267], [833], [602], [474], [541], [614, 894], [760], [241], [507], [557, 22], [854], [418], [260], [673, 892, 681, 620, 526, 508], [269], [203], [277], [491], [48], [738, 999, 905, 700], [455], [239], [642], [236], [178], [403], [], [25], [546, 819], [834, 906], [776], [755], [816], [338], [778], [89], [], [560], [], [665], [939, 943], [914], [29], [683], [130], [0], [], [57], [335], [190], [971], [294], [175], [955], [524, 461], [583], [], [346], [157], [134], [], [112], [987, 998, 809, 923, 925], [90], [], [39], [345, 690], [678], [175], [749], [149], [813, 910, 926], [965], [101], [491], [122], [954], [434, 797], [311], [679], [597], [], [362], [786], [767], [27], [751], [724], [409], [694], [724, 536], [904], [197], [692, 790, 509], [], [901], [554], [928, 930, 923], [865], [65], [789], [958], [427], [927], [950], [274], [379, 381], [656], [320], [132], [855], [10], [41], [938], [553], [557], [897, 651, 760], [839], [811], [692, 760, 700], [616, 830], [593, 650], [610], [366], [885], [835], [291], [543], [448], [435], [486], [679], [750], [919], [734], [], [534], [964], [82], [], [287], [294], [714], [784], [991], [103], [925], [226], [63], [214], [578], [872, 681, 620, 622, 759, 414], [738], [135], [434], [610], [907, 440], [703], [112], [], [772], [606], [137], [162, 167], [744, 657], [], [277, 278], [321], [763], [104], [466], [303], [238], [726], [358], [216], [112], [], [32], [70], [594], [392], [159], [12], [206], [238, 216], [536], [791], [190], [674], [223], [610, 402], [44, 26], [539], [479], [81], [194, 203], [247, 215], [880], [17], [794], [], [], [420], [896], [], [409], [42], [114, 947], [433, 460, 975, 977], [710], [989], [745], [907, 440], [261], [303], [657], [518], [565], [614], [847], [607], [866], [371], [676, 199], [726], [478], [490], [700, 999], [978], [836, 837, 619], [305], [], [768], [648, 631], [265], [399], [523], [], [896, 648], [], [410], [913], [211], [512], [522], [336, 337], [681, 620, 632], [159], [307], [], [944], [808, 638, 639], [731], [796], [20], [392], [571], [576], [518, 830], [873], [789], [928], [311], [24], [858], [974], [422], [241], [729], [569], [494], [684], [387], [746], [45], [95], [582], [819, 854], [380], [521], [252], [504], [], [439, 541, 542], [174], [630], [937], [349], [542], [577], [465], [239], [378], [568], [218], [969], [610, 898], [844], [975, 703], [479, 817], [999, 434], [103], [865, 850], [210], [244], [881], [127], [426], [728, 790], [763], [903], [280], [744, 884], [301], [931], [822], [127], [256], [48], [418, 629], [352], [736], [343], [733], [405], [685], [638, 639], [808], [207], [722], [992], [985], [850], [506], [31], [], [739], [601], [344], [190], [876, 435], [810, 878], [673, 419], [850], [460, 975, 536], [874], [103], [852], [750, 242, 831], [176], [992], [895], [785], [281], [994], [378], [622], [374], [140], [414], [952], [678], [51], [321], [898], [586], [858], [602], [843], [440, 441], [285], [], [553, 493], [699], [109], [945], [948], [746], [293], [217], [223], [474], [42], [955], [332], [424, 423], [], [913], [678], [972], [131], [34], [850], [857], [619, 750, 846, 721], [769], [], [28], [742], [766], [836, 638, 639], [238], [744, 657], [233], [], [762, 923, 959], [], [135], [645], [964, 923], [559], [609], [78], [894], [800], [803], [636], [469], [167], [196], [247], [711], [275], [659, 959, 762, 923], [138], [730], [695], [992], [88], [], [407], [41, 44], [988], [239], [932], [152], [678], [156], [615], [601], [295], [925], [735], [639], [683], [822], [732], [], [665], [651], [859], [619, 846], [500, 825], [736], [388], [346], [183], [943], [152], [443], [479], [52], [150], [174], [911], [828], [281, 282, 539], [56], [595], [49], [699], [589], [817, 573], [80], [538], [130], [], [315], [917], [766], [498], [678], [617, 823, 153], [619, 846, 750, 721], [154], [930], [96], [289], [737, 455], [874], [308], [884], [898, 455, 680, 711, 968, 473, 826], [], [578], [795], [218], [693, 472], [375], [311], [137], [755], [566], [], [107], [606], [540], [774], [510], [911, 824], [392], [], [233], [570], [462], [88], [893], [763], [926], [142], [877], [371], [673, 681], [479], [975, 977], [163], [196, 837, 198, 836], [265], [416], [377], [256], [148], [397], [571], [876, 435], [380], [561], [243], [834], [932], [150], [585], [688], [382], [0], [322], [388], [946], [75], [473], [458], [375], [660], [687], [882], [583], [967, 968], [527], [255], [], [604], [937, 942], [], [249], [680], [250], [243], [62], [791], [62], [154], [73], [596], [754], [47], [], [488, 841, 843], [37], [18], [288, 290], [], [244], [224], [237], [12], [], [624, 453], [443], [727], [], [384], [327], [472], [257], [944], [787], [889, 486], [977, 978, 445], [334], [], [157], [412], [], [892], [26], [40], [815], [603], [265], [977, 978], [16], [547], [352], [49], [339], [608, 610], [349], [742], [401], [495], [], [509], [814], [146], [604], [341], [602], [578], [702], [996], [107], [95], [736, 515], [577, 641], [116], [44, 26], [276], [279], [558], [386], [748, 600], [133], [242], [616], [379], [850], [349], [552], [635], [384], [292], [798], [457], [995], [], [429], [109], [814], [895], [80], [], [723], [335], [810, 878], [449], [245], [159], [907], [209], [933], [80], [762, 959], [690], [728], [184], [522], [109], [208], [551], [984], [982], [138], [], [891], [], [428, 792], [51], [416], [636], [750, 721], [100], [114], [109], [670], [727], [511], [754], [300], [724], [703], [636], [481, 485, 632], [], [189], [460, 437], [621], [513], [150], [755], [875], [351], [759], [301], [202], [198], [324], [144], [119, 120], [171], [971], [620], [656], [305], [907], [113], [865], [270], [345], [706], [980], [], [479], [416], [180], [93], [199], [105], [94], [677, 587], [185], [], [394], [352], [550], [908], [31], [147], [884, 406], [928, 850], [557], [528], [148], [4], [278], [474], [919, 733], [650], [465], [279], [512], [841], [439], [56], [349], [], [747], [271], [740], [916], [112, 506], [100], [449], [319], [375], [513, 579, 881], [542], [300], [220], [496], [866], [645], [107], [816], [506], [32], [472], [850], [330, 331], [598], [653], [360], [179], [172], [175], [984], [806], [970, 915], [579], [544, 926], [226], [401], [117], [372], [], [335], [951], [750, 721], [491], [856], [165, 234], [743, 905], [898, 585], [566, 439], [488, 843], [987, 998], [987, 998], [899], [132], [571], [778], [543], [88], [924], [767], [569], [55, 59], [113], [542], [704], [44], [884, 532, 762, 923, 572], [459], [750], [29], [152, 157], [61], [], [437], [863], [875], [164], [722], [785], [927], [], [751], [364], [864], [250], [700], [554], [830], [794], [365], [219], [], [650, 558], [], [237, 180], [], [773], [295], [413], [177], [914], [563], [569], [303], [921], [], [670], [140], [738, 957], [274], [785, 180], [26], [311], [8], [945, 939, 943], [450], [754], [228], [239], [566], [561], [486, 889], [237], [874], [362], [264, 263], [662], [], [977], [199], [254, 262], [289], [304], [839, 718], [248, 250], [804], [900], [364], [182], [284, 861], [421], [65], [445], [916], [26], [709], [955], [135], [630], [421], [919], [217], [5], [790], [237], [997], [686], [31], [460], [88], [738, 421], [296], [45], [470], [825], [], [323], [956], [570], [352], [442, 494], [366], [311], [749], [87], [479, 817, 511], [894], [868, 470, 923], [213], [981], [347], [533], [483], [724, 536], [76], [395], [903], [367], [], [293], [780], [909], [342], [955], [803], [768], [], [948], [414, 478], [701], [777, 623], [758], [367], [543, 422], [708], [514], [488, 695], [692, 917], [836, 837, 977, 978], [915], [966], [437], [207], [85], [341], [232], [654], [263], [779], [394], [476], [], [367], [643], [741], [883], [412], [327], [758], [291], [936], [739], [560], [778], [141], [153], [890], [207], [734], [846, 619], [896, 999], [979], [570], [903], [109], [868, 987], [], [93], [890], [], [489, 15], [140], [570], [512], [770], [74], [529], [233], [669], [281], [72, 815], [312], [410], [440], [363], [231], [110], [992], [786], [765], [], [578], [619, 858], [351], [619], [905, 495], [857], [518, 652, 691, 570], [253], [799], [129], [686], [916], [100], [26], [299], [617], [745], [214], [], [577], [967], [963], [64, 55], [538], [22], [296], [709], [454, 652], [604], [428], [482], [53], [696], [544], [819], [546, 650, 819, 822, 542], [463, 925], [382], [], [], [362], [8], [726], [625, 554], [771], [717, 733], [767], [356], [554], [293], [396], [684], [235], [552, 733], [932], [3], [679], [507], [203], [398], [943, 945], [470], [50], [662], [936], [114], [508], [574], [846], [125], [628], [637], [358], [56], [], [576], [906], [309, 410], [873], [388], [728], [119], [864], [], [911], [239], [938], [745], [580], [576], [90], [405], [695], [215], [], [600], [519], [868, 532, 762, 923, 572], [436], [465], [372], [423], [878], [853], [696], [93], [976], [53], [360], [955], [942], [679], [], [252], [959], [426], [680], [585], [749], [393], [283], [601], [70], [], [448], [247], [711], [854], [638, 639], [507], [32], [805], [957], [795], [807], [838, 631], [960], [489], [820], [489], [204], [241, 238], [802], [364], [871], [228], [790], [174], [690], [], [540], [466], [], [886], [66], [481, 482], [734], [371], [785], [279], [636], [518], [167], [582], [679], [13], [915], [552], [878], [552], [489, 273], [617, 823], [111], [582, 790], [505], [314], [711], [419], [267], [719], [786], [838], [298, 357], [89], [13], [68], [441], [2, 3], [514], [64], [332], [547], [185, 186], [], [73], [643], [386, 101], [], [752], [647], [], [470], [343], [302], [181], [493], [831], [394], [39, 47], [232], [949], [638, 639], [745], [485, 761], [834, 630, 637], [224, 852, 205], [26], [992], [186], [769, 587], [579, 881], [850, 854], [507], [814], [19], [632, 851, 548], [875], [168, 159], [317], [912], [732], [747], [464], [566], [235], [105], [593], [71], [575], [218], [254], [167], [110], [267], [87], [172], [49, 50], [966, 572], [870], [64], [], [418, 767], [579], [538], [388], [651], [465], [166], [325], [574], [681, 810, 620, 508], [543], [978], [76], [538], [806], [993], [], [964], [104], [204], [696], [370], [610], [949, 647], [208], [558, 699, 541], [894], [164], [763], [428], [485], [514], [220], [211], [98], [399, 501], [114], [749, 542], [32, 30], [780], [395], [333], [626], [488], [841, 523, 412], [433], [566], [82], [], [145], [465], [652, 764], [66, 68], [643], [968], [896, 725], [122], [515, 869, 763], [967], [27], [121], [200], [308], [113], [243], [874], [997], [173], [444], [91], [145], [744, 812, 657], [43], [555], [555], [372], [545], [617, 823, 487], [368], [266], [419], [901], [470], [610], [608, 748], [3], [], [], [910], [386, 101], [486], [336], [760], [130], [513, 776, 875], [976], [132], [541, 542], [], [309], [407], [500], [101], [174], [535], [228], [794], [299], [906], [762, 554], [24], [115], [299], [], [809, 532, 923, 925, 926], [78], [652], [], [666], [451], [391], [784], [243], [924], [655], [], [609], [191], [607], [636], [318], [908, 404], [338], [57], [480], [374], [923], [505], [671, 898, 535], [682, 562], [70], [814], [548], [514], [478], [353], [185], [48], [328, 109], [436], [], [250], [398], [374], [385], [293], [48], [509], [462], [423], [981], [932, 415], [920], [796], [346], [247, 159], [550], [146], [652], [2], [497], [693], [256], [996], [277], [241], [423, 424, 831], [663], [462], [261], [213], [769], [440], [], [640], [228], [8], [450], [101], [948, 950, 957], [48], [57], [608, 523], [405], [805], [369], [133], [467], [], [232], [675, 208], [], [870], [322], [872, 622, 759], [123], [623], [49], [977, 978], [220], [877], [778], [22], [165], [719], [372], [95], [123], [337], [889], [776], [115], [574], [217], [938], [973], [887], [168, 178], [291], [888], [469, 919], [515], [669], [576, 693, 954], [913], [866], [375], [9], [236], [24], [369], [952], [923, 809, 947], [122], [584], [397], [806, 559, 463, 610], [84], [818], [], [394], [778], [619, 846, 721, 831], [212], [754], [245], [654], [244], [250], [156], [562], [933], [202], [910, 567], [764, 413], [], [427, 756], [155], [197, 199], [334], [24], [118], [110], [397], [420], [244], [640], [933], [228], [659, 952], [582, 680, 791], [463], [92], [138], [692, 960, 582], [77], [939], [38], [842, 433], [381], [174], [431], [987, 938, 923], [440], [971], [560], [424], [92], [508], [839], [698], [558], [729, 495], [711], [669], [811], [84], [744, 657], [426], [662], [26], [960, 931, 415], [799], [964, 813], [651], [813], [874], [366], [], [183], [738], [878], [975], [916], [149], [923], [451], [944], [761], [836, 837, 610, 870], [], [249], [960], [168], [610], [834, 588], [996], [747], [738], [892], [145], [426], [987, 998], [205, 213], [546, 650, 819], [239], [781], [673, 664, 526, 527, 782, 632, 508], [127], [726], [117], [562], [653], [404], [476], [9], [288], [531], [172, 177], [513, 715, 439], [586], [364], [453], [133], [180], [899, 619, 849], [553, 728], [836, 837, 885], [474], [858], [692], [252], [161], [525], [737], [487], [686], [73, 74], [339], [979], [810, 878], [230, 231], [426], [687], [835], [187], [998], [546], [37], [593], [990], [367], [508], [526], [36], [217], [], [473], [242], [207], [963], [750, 721], [563], [281, 282], [], [678], [742], [208], [18], [218], [212], [728], [367], [74], [520], [890], [570], [692], [275], [971], [428], [408], [442], [274], [702], [131], [849, 505], [994], [400, 667], [216], [501], [453, 624], [729], [844], [397], [], [], [], [987], [459], [173], [513], [650], [609], [581, 656], [865], [647, 659], [544], [870], [137], [522], [681, 620], [867], [817], [300], [675], [205], [463], [223], [], [52], [830], [443], [431], [893], [512], [461], [402], [41], [257], [750, 846, 721], [69], [127], [700], [759], [608], [384], [937], [298], [492], [362], [14], [958], [705], [827], [613], [427], [783], [673, 742, 526, 527, 782, 664, 508], [376], [577], [569], [894], [384], [262], [556], [162], [394], [898], [439], [48], [998], [188], [700], [459], [933], [985], [828], [399], [396], [], [801], [411], [769], [198], [829], [493], [632], [751, 479], [211], [222], [163], [979], [594], [189], [557], [927], [], [783], [114], [49], [885], [490, 524, 461], [893], [872], [128], [488], [], [472, 693], [694], [150], [796], [22], [608, 873, 414], [867], [372], [711], [900], [204], [175], [606], [392], [283], [692], [821], [], [20], [255], [880], [105], [710], [], [737], [183], [], [399], [462], [963], [816], [783], [38], [20], [809, 925], [18], [489], [809, 659, 925], [619, 846, 818], [916], [257], [526, 539, 588, 738, 883], [815], [360], [128], [608, 515], [138], [415], [987, 998], [499], [216], [], [804], [878], [505], [242], [598], [56], [865], [166, 167], [544], [677], [370], [558, 889], [487], [28], [678], [713, 742], [324, 946], [73, 74], [747], [584], [96], [382], [578], [161], [291], [940], [129], [929], [749], [], [349], [454], [44, 634], [362], [473], [552], [798], [87], [953], [676, 597], [643], [47], [915], [968, 534, 504], [896], [435, 876], [879], [563], [871], [], [59], [682], [645, 735], [], [487], [737], [], [43], [210], [103], [584], [595], [834, 906], [274], [], [860], [], [], [977, 978], [928, 659, 949, 927], [574], [630], [901, 725], [555], [503], [397], [414], [717], [727], [503, 828], [631], [783], [795, 862], [457], [27], [447], [365], [342], [], [48], [216], [724], [840], [934], [111, 114], [255], [544, 909, 469, 926], [93], [245], [563], [347], [814], [278], [810, 878], [185, 193], [313], [465], [358], [752], [457], [204], [5], [601], [937], [818], [490], [632], [559], [67, 54], [987, 998], [787], [741, 885], [], [221, 206], [252], [52], [546], [666], [749], [196], [724], [963], [], [955], [321], [223], [63], [759], [442, 497, 409], [47], [42, 44], [412], [637], [974], [388], [328], [162], [292], [825], [692], [192], [], [519, 478], [375], [918], [147], [992], [29], [173], [61, 62], [709], [889], [685], [109], [321], [580], [754], [], [315], [159], [772], [693], [349], [607], [], [699], [118], [305], [126], [], [], [606], [769], [387], [220], [55], [], [516], [470], [75], [48], [947], [126], [361], [494], [392], [780, 914, 536], [277], [268], [635], [274], [397], [394], [586], [703], [458], [402], [], [143], [949, 923], [177], [892], [478], [500], [451], [820], [958], [15], [113], [532, 762, 923, 572], [5], [995], [530], [258], [974], [661], [731], [140], [975], [421, 825], [863], [180], [739], [709], [548], [821], [653, 535], [866], [555], [765], [448], [336], [147], [207], [], [229], [67], [720], [822, 541, 542], [750], [651], [], [703], [953], [652, 847, 471], [612], [481, 485, 592, 605], [18], [681, 810, 620, 508], [59], [113], [524, 461], [977, 978], [261], [819, 541, 542], [153], [403], [910], [541, 542], [316], [558, 917, 921], [845, 638], [509], [768], [477], [704], [72], [470], [41], [775, 842, 977, 978, 445], [236, 237], [55], [153], [], [184, 202, 191], [442], [], [20], [920], [328], [23], [77], [994], [654], [489], [547], [], [308], [], [472], [64], [991], [], [723], [649], [99, 100], [970, 795], [681, 620], [727], [785], [486], [106], [], [137], [102], [705, 466, 799], [647], [], [395], [2], [61], [845], [894], [647], [843], [823], [472, 693], [336], [127], [], [153, 204], [], [685], [149], [851, 532, 831], [929, 227], [781], [329], [987, 998], [387], [721], [119], [502], [802], [373], [523], [398], [896, 999, 281, 700], [372], [369], [675], [261], [944, 946], [725, 572], [785], [461], [229], [568], [130], [59], [367], [752, 852], [358], [456], [555], [320], [957], [716], [846], [9], [], [630], [149], [], [515, 836, 559], [839], [198], [], [103], [696], [774, 614, 879], [157], [841, 825], [895], [], [476], [415], [509], [], [552], [70], [856], [36], [294], [225], [649], [840], [184], [489], [210], [133], [196], [307], [896, 876, 435], [694], [136], [710], [336], [400, 857, 667], [271, 277], [228], [699], [], [536], [347], [216, 716, 220], [203], [822, 542], [275], [714], [828], [571], [137], [6], [654], [679, 459], [663], [187], [260], [464], [670], [72], [612], [985], [5, 6], [471], [], [206], [244], [524], [971], [659], [642], [598], [264], [714], [156], [420], [420, 650, 402, 818, 819, 889], [129], [223], [903, 501], [479, 511], [612], [713], [720], [452], [283], [120], [836, 453, 837], [521, 962], [748, 636], [919], [251], [972, 23], [481], [594], [579], [171], [859], [769, 767], [26], [625], [306], [913], [236], [679], [], [152], [611], [], [490], [476], [376], [840], [249], [953], [938], [872], [507], [202, 189], [947, 997], [464], [627], [326], [865], [388], [], [870], [777, 596, 597, 763], [971], [197, 183], [811], [], [181], [1], [51], [194], [566], [855], [805], [635], [452], [58], [716], [752], [264], [345], [143], [619, 846], [441], [39], [179], [193], [917, 921], [538], [231], [466], [169], [776], [64], [484], [258], [275], [977, 978], [706, 423, 532, 923], [173], [277], [361], [536], [718, 510], [587], [859], [430], [977, 978, 853], [506, 733], [337], [986], [351], [679], [533], [666], [337], [350], [50], [968, 504], [852], [837, 703, 921], [674], [215], [755], [311], [88], [15], [253], [553], [616], [790], [963], [717], [822], [23], [], [786], [], [403], [], [732], [725], [72, 815], [], [394], [0], [333], [339], [461], [145], [903], [500], [977, 978], [23], [561], [921], [607], [708], [291], [292], [682], [617], [278], [957], [206, 221], [668], [40], [293], [594], [655], [344], [475], [142], [160], [469], [108], [780], [641], [229], [96], [88], [411], [249, 537], [849], [773], [108], [740, 587, 784, 477], [600, 823], [554], [770, 898, 649], [280], [248, 249, 250], [288], [340], [809, 943, 499], [972], [626], [255], [479], [396], [592], [169], [912, 339], [57], [586], [665], [687], [533], [850], [243], [], [956], [702], [408], [], [622], [778], [657], [429], [138], [455], [927], [985], [242, 703], [204], [0], [78], [677], [87], [421], [567], [706, 879], [140], [141], [159], [754], [113, 125], [790], [453, 740], [355], [491], [524], [5], [290], [139], [708], [917], [], [698], [609], [73], [545], [399], [7], [166], [347], [970], [790], [253], [440], [86], [954], [444], [902], [652], [37], [71], [338], [661], [790], [802], [], [455], [12], [394], [871], [828], [531], [852, 187], [740], [681, 810, 620, 508], [152], [834, 457, 906], [659], [], [122], [785], [484, 871], [362], [647], [199], [770], [859], [937], [440], [421], [424], [984], [660], [443], [759], [353], [665], [701], [137], [61], [784], [391], [], [311], [890], [812], [580], [571], [385], [237], [307], [376], [], [670], [821], [352], [328], [913], [836, 703, 796], [216], [908], [], [160, 177], [136], [650], [290], [604], [529], [581], [805], [595], [368], [462], [953], [456, 341], [395], [437], [567], [492], [972], [271], [158], [273, 274], [], [548, 598], [112], [798], [524], [412], [314], [995], [512], [261], [681, 620, 527, 664, 508], [176], [522], [521], [52, 60], [286], [344], [309], [763], [173], [209], [278], [752], [], [900], [721], [923], [992], [891], [339], [569], [0], [190], [168], [380], [401], [13], [426], [342], [276], [673, 681, 526, 527, 664, 508], [770], [500], [489, 507], [375], [855, 828], [409, 892], [198], [335], [682], [835], [606], [499], [916], [907, 499, 411], [701], [640], [797], [815], [19], [520], [36, 37], [956], [169], [0], [54], [365], [962], [878], [854], [568], [629], [300], [744, 657], [728, 545], [366], [572], [923], [132], [992], [650, 558], [0], [218], [], [953], [496], [20], [502], [154], [482, 605], [165], [343], [991], [434], [], [518], [977, 978], [965, 440], [892], [677], [24], [249], [818, 437], [288], [91], [6], [619, 818], [612], [688], [601], [819], [923, 868], [], [221], [514], [631], [957], [489, 695], [731, 861, 999], [753], [262], [744, 657], [753], [919], [156], [147], [683, 699], [730], [771], [464], [315], [121], [962, 935], [209], [715, 524, 461], [779], [970], [263], [556], [995], [606], [673, 526, 527, 664, 508], [140], [540], [390, 973], [4], [379], [76], [957], [267], [626], [992], [526, 720], [158], [640], [211, 159], [405], [383], [879, 412], [272], [], [213], [828], [252], [570], [223], [112], [602], [457], [37], [125], [398], [477], [201], [903, 689, 601], [799], [375], [57], [515, 413], [744, 657], [], [697, 610], [538, 668], [674], [69], [428], [131], [508], [278], [898], [205], [643], [], [37], [964], [7], [379], [588], [834, 532, 505], [755], [548, 553, 527], [986], [484], [5], [370], [696], [], [400, 667], [267], [3, 147, 149], [186], [], [890], [416], [681, 620, 916, 664], [], [651], [116], [396], [235], [17], [92], [404], [504], [270], [492, 519], [643], [162], [939, 582], [643, 903], [720], [175], [197, 233], [927], [987], [834, 906], [342], [660], [], [136], [869, 655, 630, 539], [239], [795], [74, 815], [701], [374], [], [87], [], [134], [82], [420], [464, 763], [758], [485, 685, 475, 511], [158], [645], [396], [465], [366], [438], [154], [437], [271], [948], [53], [335], [995], [83], [915], [245], [251, 246], [546], [], [198], [95], [488], [763], [911, 701], [283], [837, 978, 890], [979], [], [650], [831], [], [990], [251], [887, 501, 439], [269], [852], [540], [573, 479], [959], [487], [99], [376], [], [550], [763], [8], [25], [396], [263], [424], [455], [965], [320], [731], [172], [320], [302], [896, 804, 434], [557, 472], [906], [644], [485, 592], [640], [340], [195], [310], [181], [127], [386], [842, 977], [721], [], [195, 179], [145], [191], [375], [931, 950, 954, 923], [], [847], [92], [601], [566], [851], [705], [113], [897], [360], [615], [666], [526, 765], [230], [606], [849, 505, 859], [7], [162], [112], [176], [148], [467], [725, 872], [26], [103], [141, 976], [268], [25], [935], [836, 837, 630], [27], [438], [82], [625], [], [382], [281], [599], [570], [479], [41], [230], [742], [], [788], [62], [769, 398], [146], [515], [453, 606], [39, 119], [211], [520, 669], [583], [377], [481, 482], [191], [804], [497], [843], [229], [343], [], [550], [241, 238], [195], [170, 177], [419], [139], [79], [565], [], [355], [310], [564], [898, 414, 608], [], [741], [406], [816], [846], [349], [669], [112], [131], [824], [888, 705], [6], [673, 526, 527, 782, 664, 508], [776, 650], [721, 831], [], [134], [182], [689, 819, 578, 488, 885], [888], [449], [140, 142], [760], [628], [984], [396], [913], [915], [], [100], [801], [], [204], [746], [216], [532], [501], [456], [473], [716], [], [200], [588], [997], [690], [697, 443, 828], [904], [669], [621], [170], [706, 879, 401, 762], [247], [259], [929], [220], [516], [25, 28], [569], [91], [398], [810, 508], [229], [957], [], [], [554], [192], [700], [570], [492], [675, 478], [614], [654], [736], [869, 671], [652], [372], [948], [403, 536], [674], [805], [454, 921], [445], [363], [16], [], [928, 949, 927], [253], [88], [208], [561], [839], [629], [842, 445], [252], [], [36, 37], [], [571], [], [815], [528], [354], [615], [81], [980], [884], [371], [11], [380], [], [606], [713], [207], [544], [796], [102], [902], [99], [24], [387], [886], [773], [370], [836, 970, 414], [201], [455], [764, 413], [917], [84], [246], [72], [534, 729], [780, 724], [88], [178], [321], [911, 533], [426], [232], [632], [], [154], [133], [729], [630], [809], [89], [218, 156], [804], [701], [644], [775], [832], [238], [362], [797], [584], [359], [668], [977, 978, 445], [622, 759], [], [], [958], [268], [34], [62], [443], [618, 809, 659], [547], [133], [208], [597], [810, 664, 527, 782, 508], [346], [283, 750], [735], [760], [440, 574], [866], [], [549], [802], [44, 60], [146], [260], [115], [40], [313], [930], [769, 587], [474], [957], [734], [301], [469], [807], [281], [289], [143], [290], [368], [589], [814], [792], [722], [], [583], [], [265], [], [666], [970, 795], [621], [26], [223], [451], [196, 198], [707], [313], [619, 750, 846, 721], [305], [29], [382], [340], [180, 243], [739], [977, 638, 639], [546], [514], [352], [368], [347], [770], [425], [608, 610], [685], [514], [811], [], [], [582], [], [475], [952], [928], [784], [607], [336], [945], [96], [785], [297, 295], [545], [4], [64], [44], [18], [801, 397, 983], [609, 977, 978], [208], [252], [358], [423], [130], [704], [424], [143], [286], [107], [93], [], [350], [866], [100], [719], [], [137], [129], [958], [], [809, 762, 923, 926], [170, 177], [855], [543], [803], [626], [248], [717], [450], [895], [178], [971], [423, 424], [923, 960], [530], [384], [422], [24], [370], [400, 667], [324], [984], [654], [302], [690, 345], [640], [184], [722], [805], [860], [100], [971], [141, 142], [159], [], [273], [394], [826], [709], [784], [428], [198], [286], [969], [221], [851, 548], [120], [358], [645], [939, 940, 943, 950], [543], [132], [159], [537], [234], [602], [51], [], [639], [330], [261], [533], [], [500], [926], [191], [346], [173], [], [3], [280, 278], [393], [374], [386], [791], [143], [422], [237], [755], [127], [410], [913], [176], [], [509], [85, 86], [927], [903, 526, 528, 782], [891], [971], [], [112], [985], [156], [188], [332], [487], [326], [744, 657, 403], [933], [556], [134], [206], [786], [235], [685], [957], [965], [255], [862], [827], [25], [779], [637], [542], [896, 434, 861], [495], [112], [344], [750, 564], [201], [632], [417], [19], [822], [441], [758], [352], [681, 620, 508], [840, 463], [], [144], [105], [134], [867], [57], [], [548, 851, 598, 632], [985], [628], [37], [648], [405], [332], [938], [319], [320], [71], [944], [936], [48], [95], [568], [441], [810], [586], [396], [8], [156], [45], [685], [734], [775], [167], [203], [822], [39], [111], [288], [638, 639], [463], [140], [802], [604], [975], [769, 418, 709], [896], [375], [247], [860], [52], [561], [], [943], [744], [814], [411], [317], [516, 520, 431], [63], [510], [386], [], [313], [859], [576], [469], [251], [863], [592], [447], [], [420], [8], [697], [873], [798], [365], [], [117, 62], [726], [606], [560], [], [692, 567], [969], [267, 852], [177], [410], [781], [77], [580], [670], [234, 165], [974], [61], [310], [828], [], [749], [607], [831], [954], [153], [577], [17], [280], [386, 101], [618], [263], [247], [984], [575], [105], [166], [912], [634], [581, 479, 656], [203], [26], [226], [774, 470], [788], [90], [516], [548], [220], [], [556], [725], [328], [525], [667], [238, 240], [875], [892], [934], [17], [612], [474, 911], [57], [914], [419], [490], [], [997], [294], [909], [214], [778], [772], [922], [30], [136], [153], [195], [415], [315], [435], [178], [828], [383], [277, 278], [207], [821], [712], [408], [], [536], [681, 620, 603], [898], [0, 133], [868, 415], [135], [209], [12], [251], [113], [537], [], [857], [541], [351], [806], [], [614], [553], [45], [382], [790, 998], [510], [497, 663], [526, 782, 851], [206], [536], [786], [257, 222], [939, 945], [838], [547, 820], [470], [701], [576], [917], [14], [], [736], [464], [966, 907], [979], [688], [548], [562], [686], [], [553, 851], [628], [14], [487], [215], [145], [183], [227], [839], [408], [968, 504], [988], [870], [1], [627], [905], [922], [766], [529, 692], [271], [852], [546, 453], [237], [259], [629], [655], [998, 987, 575], [515], [229], [427, 756], [869, 433], [650, 851, 541], [205], [757], [940], [159], [37], [845], [97], [284], [773], [140, 142], [838], [10], [373], [49], [492, 786], [91], [177], [146], [614], [779], [], [976], [458], [275], [791], [883], [775], [11], [334], [303], [120], [935], [], [792], [441], [285], [387], [568], [162], [83], [132], [678], [305], [684], [127], [701], [421, 818, 506], [836, 837, 552], [327], [325], [623], [189], [712], [638, 639], [284], [697], [838, 680, 631], [958], [758], [355], [907, 692], [12], [896, 434], [113], [137], [939, 943], [803], [245], [], [886], [986], [808], [377], [581, 479], [13], [24], [758], [898], [417, 701], [750], [28], [434], [991], [66], [257], [], [543], [617], [937], [904, 969], [289], [419], [472], [609, 586, 652], [333, 335, 760], [88], [393], [941], [697], [759], [368], [417, 869, 501], [320], [834, 977, 978, 982], [438], [275], [877], [520], [208], [245], [], [514, 664, 655], [311], [987], [462], [418], [399], [794, 799], [249], [18], [957], [76], [400, 667], [986], [653], [80], [615], [1], [458], [690], [468], [], [], [591], [152], [409, 826], [563], [750, 721], [939, 943], [196], [58], [48], [203], [], [518], [922], [35], [714], [], [665], [754], [210], [687], [851, 548], [198], [210, 852], [890], [127], [559, 594], [689, 443], [832], [487], [444], [55], [430], [663], [425], [808, 977, 978, 445], [], [377], [387], [916], [533], [], [], [828], [611], [884, 538], [697], [758], [316], [197], [392], [557, 751, 733, 479], [425], [715], [769], [410], [94], [391], [638, 639], [581, 479, 661], [821], [267], [365], [587, 499], [476], [584], [939], [820], [923], [39], [187], [947], [62], [654, 656], [884], [338], [557, 562], [938], [345], [32], [9], [438], [801], [404], [894, 281, 285], [896], [809, 659, 762], [346], [51], [4, 394], [586], [861], [455], [433, 793], [947], [874], [307], [445], [267], [493, 526], [953], [328], [949], [], [141], [386], [234, 165], [955], [967, 968], [667], [], [583], [162, 167], [180], [944], [959], [], [729], [], [106], [51], [646], [670], [698], [586], [120], [], [980], [730], [159], [973], [383], [713], [635], [], [281], [734], [938], [528], [315], [911], [598], [747], [178], [480], [925], [946], [546, 402, 819], [629], [], [970], [174], [689], [431], [867], [34], [760, 827], [466], [530], [131], [972], [28], [511], [475], [305], [712], [512], [950], [], [], [623], [605], [253], [809, 909, 926], [267], [10], [555], [466], [668], [225], [700, 999], [610, 823], [801, 973, 983], [259], [825], [683], [116], [350], [209], [327], [510], [406], [844], [644], [743], [], [519], [281], [681, 620], [385], [85], [759], [747], [766], [196, 198], [224, 223], [198], [559], [188], [66, 68], [744, 657], [], [919], [761], [756], [244], [543], [703], [719], [628], [217], [529, 219], [210], [487], [393, 108], [190], [127], [443], [296], [940], [431], [296], [744, 655, 657], [387], [804], [], [790], [710], [785], [531], [984], [667], [766], [], [218], [560], [843], [49], [638, 639], [9], [553], [233], [890, 445], [15], [], [484], [497], [782, 664, 810], [154], [787], [314, 861], [621], [730], [321], [673, 681, 810, 620, 526, 664, 508], [760], [706], [494], [591], [906], [382], [182], [239], [792], [487], [514], [696], [632], [], [946], [803], [792], [450], [], [185], [489], [], [73, 77], [529, 728], [501, 887], [650], [155], [629], [888], [132, 211], [425], [98], [77], [852], [645], [637], [956], [687], [25], [446], [632], [579], [505], [697], [286], [751], [390], [49, 50], [907], [450], [349], [], [782, 664], [456], [406], [274], [489], [331], [476], [825], [756], [877], [926], [], [], [523], [143], [254], [587], [868, 415], [387], [452], [716], [592], [366], [371], [594], [864, 586], [831, 967, 968, 608, 504], [885], [247], [546, 650, 819], [19], [355], [994], [769, 709], [223], [467], [584], [411, 678, 868], [107], [58], [698], [532, 762, 923, 572], [974], [], [521], [399], [412], [384, 375], [841], [20], [654], [844], [271], [331], [522], [499, 600], [39, 44], [314], [22], [736], [715], [759], [840], [729], [905, 532], [177, 490], [841], [845, 470], [], [640], [850, 823], [160], [471], [35, 49], [906], [79], [717, 581, 479], [801], [430], [934], [40, 44], [992], [894], [873], [37], [711], [80], [543], [107], [], [721], [732, 759], [330], [597, 763], [319], [548, 556, 851], [573], [224], [194], [538], [686], [384], [160], [414], [721], [392], [874], [], [592], [645], [280], [948, 949, 923], [260], [868, 923], [228], [881], [982], [771], [748], [], [740], [307], [708], [489, 852], [], [210, 211], [515], [531], [], [824], [], [680], [215], [984], [572], [148], [967, 923], [654], [707], [253], [], [746], [684], [333], [584], [185], [747], [635], [411], [455], [893], [165], [581, 518, 479, 661], [963], [], [877], [768], [139], [410, 309, 599], [137], [], [681, 620, 526], [644], [], [283], [113], [320], [283], [10], [], [632], [385], [664, 526, 527, 632], [631], [850], [537], [451], [611], [268], [634], [96], [149], [318], [61], [281], [427], [642], [291], [604], [], [991], [437], [254], [212], [995], [488, 439], [946], [857], [750], [840], [644], [378], [778], [], [108], [150], [59], [554, 628], [369], [993], [61], [342], [343], [44], [977, 978], [833], [451], [721, 831], [258], [544, 813, 910, 926, 469, 827], [750, 915], [143], [72], [612], [181], [12, 957], [198], [379], [745], [749], [206], [145], [515, 204], [486, 889], [668], [533], [573], [678, 518], [608, 872], [121], [119], [582], [43], [322], [562], [], [216], [697], [315], [921], [618, 623, 499], [188], [], [277], [305], [997], [], [977, 978], [889], [], [768], [359], [48], [840, 463], [356], [136], [981, 429], [780], [904], [145], [372], [555], [531], [332], [868], [832], [814], [548], [616], [423], [134], [317], [168], [616], [653], [439], [825], [421], [828], [27], [883], [987, 998], [397], [793], [418, 709], [], [542], [102], [40], [103], [27], [743], [448], [414], [], [664], [119], [199], [420], [940], [255], [281], [323], [383], [483], [255], [550], [874], [], [36], [637], [193, 187], [696, 806], [721], [471], [819, 822], [868, 923, 118], [40], [488, 843], [919], [325], [825], [341], [184], [264], [737, 907, 760, 440], [309], [404], [55], [491], [8], [531], [575], [912], [324], [168], [134], [13], [992], [683], [307], [855], [706, 559, 976], [], [618], [407], [842, 433], [269], [724], [90], [161], [715], [780, 914], [474], [476], [143], [736], [238], [476], [759, 475], [558], [268], [849], [151, 158], [232], [866], [526], [801, 691, 983, 570], [], [57], [279], [197], [103], [248], [418], [360], [403], [385, 386], [361], [], [109], [89], [721], [358, 359], [706, 846, 789, 765], [816], [360], [391], [256], [470], [748], [952], [0], [], [387], [71], [92], [674], [707, 709, 528], [], [936], [240, 241], [49, 50], [905], [369], [854], [385], [438], [19], [520], [223], [588], [363], [780], [150], [374], [333], [204], [939], [553], [716, 853], [47], [821], [918], [504, 957], [781], [], [731], [], [978], [755], [641], [], [432], [673, 681, 620], [813], [581, 479], [25], [737, 455], [784], [368], [], [506], [753], [917, 921], [773], [71], [894], [], [610, 543], [331], [554], [991], [44, 26], [66], [260], [849], [135], [785], [732, 759], [869], [418], [32], [630], [796], [970], [421], [], [659], [454, 917], [170], [563], [802], [500], [95], [255], [23], [852], [982], [39], [397], [239], [225], [701], [], [903, 691], [759], [72, 815], [518, 671], [518, 671], [16], [948], [], [484], [711], [615], [779, 414], [984], [38], [197], [855], [162], [908], [117], [944], [487], [953], [949], [243, 254], [431], [333], [560], [364], [354], [748, 667], [171], [127], [626], [640], [959], [535], [446], [], [904], [999, 700], [776, 683, 889], [774, 655, 831, 502], [263], [132], [441], [802], [342], [865, 207], [323], [96], [652, 413], [615], [], [815], [140, 142], [159], [317], [107], [], [182], [961], [], [4], [702], [673, 453, 526, 527, 664], [178], [923], [308], [191], [747], [], [911, 735], [453], [86], [170], [133], [142], [7], [], [403], [135], [884], [944], [336], [945], [], [880], [927], [24], [520], [27], [407], [492], [393], [16], [531], [970], [209], [489], [365], [647, 659], [958], [64], [727], [116], [574], [8], [225], [161], [489, 919], [99], [440], [], [872, 622, 759], [755], [956], [896], [941], [33], [], [17], [294], [42], [449, 975], [794], [874], [683], [121], [706], [742], [911, 533], [264], [963], [752], [564], [734], [561], [280], [932], [227, 805], [906], [], [147], [721], [540], [965], [673, 526, 527, 846, 831, 664, 508], [156], [29], [425], [325], [359], [535], [834, 906], [527, 664, 508], [], [366], [], [109, 973], [838], [570], [433], [143], [92], [735], [239], [874], [957], [170], [254], [400, 667], [721], [229], [405], [766], [18], [330], [515, 752], [422], [956], [], [], [74, 815], [888], [908, 895], [265], [363], [770], [670], [323], [794], [39], [40], [20], [369], [944], [831], [368], [942], [121], [406], [525], [284], [731], [37], [66, 68], [392, 973], [738, 427], [652, 413], [317], [918], [286], [822], [704], [860], [42], [808], [288, 290], [390], [577], [453, 799], [697], [638, 639], [868, 964], [783], [44], [], [773], [89], [151], [158, 263], [], [957], [902], [888], [732], [825], [472], [323], [490], [708, 682], [974], [530], [647], [258], [818], [], [224], [418], [528, 707], [396], [896], [667], [429], [298], [574], [540], [180], [137], [119], [167], [588], [920], [324], [291, 292], [459], [365], [235], [987, 998], [612], [147], [363], [103], [631], [339], [318], [476], [384], [603], [95], [805], [832], [818, 729], [683, 579, 731], [534], [670], [153], [726], [688], [433], [359], [310], [627], [659, 949, 923], [513, 776], [80], [583], [], [277], [874], [423], [789], [811], [79], [436, 581, 479], [350], [969, 504], [392], [69], [821], [625], [393], [74, 815], [], [411], [833], [144], [108], [644], [291], [14], [602], [653], [901], [26], [115], [612], [618, 666], [800], [], [609], [28], [441], [859], [449], [25], [806], [752], [30], [896], [], [867], [738], [310], [429], [748], [552, 716], [195], [601], [952], [691], [73], [641], [410], [192], [547], [210], [578, 982], [634], [467], [83], [451], [44], [210, 164], [692], [566], [737, 582], [710], [395], [966, 948, 923, 572], [104], [], [232], [211], [570], [232], [40], [862], [79], [345, 690], [213], [749], [812], [828], [358], [], [76], [291], [], [654], [351], [826], [245], [162], [425], [72], [550], [825], [32, 31], [971], [295], [493], [151], [], [327], [872, 759], [286], [834, 906], [342], [833], [292], [352], [284], [563], [607], [433, 793, 638, 639], [629], [513, 543], [306], [889], [222], [513], [223], [152], [273], [157], [443, 841], [532], [155], [363], [353], [235], [868], [406], [453, 454], [906], [781], [712, 719], [205], [273], [35], [797], [153], [554], [748], [320], [321], [908, 895], [810, 878], [524, 461], [], [], [97], [766], [324], [880], [5], [318], [999, 247], [558], [682, 708, 562], [81], [485], [371], [], [685], [167], [221], [827], [429], [591], [723], [168], [841], [771], [715], [249], [426], [776], [], [913], [209, 703], [77], [487], [], [280], [410], [453, 589], [869], [604], [702], [748], [993], [755, 733], [317], [444], [82], [440], [763], [876, 435, 282], [909], [646], [650, 402], [399], [488], [], [802], [732], [839, 718, 821], [572], [832], [], [], [581], [696], [13], [494], [434], [946], [301], [389], [863], [127], [735], [], [82], [534], [73], [522, 281], [791], [341], [329], [514], [614], [107], [79], [645], [964], [911, 735], [483, 975], [922], [716, 309, 599], [518], [155], [779], [888], [909, 987], [223], [98], [839], [480], [360], [164], [900], [703], [944], [418], [715], [102], [532, 411, 931, 933], [449], [841], [317], [313], [757], [336], [341], [], [94], [200, 155], [690], [], [], [306], [], [989], [348], [435], [376], [], [652, 764, 413, 734], [642], [883], [597], [780], [322], [649], [], [1], [13], [475], [], [130], [709], [529, 631], [242], [120], [681], [652, 413], [179], [447], [48], [673, 681, 526, 527, 664, 508], [888], [789], [584], [], [896, 414, 487], [528], [447], [762], [963], [893], [818, 610], [460], [774], [], [84], [891], [942], [673, 681, 526, 782, 664, 508], [240], [769], [884], [777], [491, 477], [285], [869], [732], [475], [918, 281], [892], [684], [800], [577], [641], [470], [424], [453], [602], [301], [294], [755], [731], [759], [61, 62], [794], [769, 302], [422], [708], [450], [350], [124], [326], [982], [479, 660, 511], [662, 719], [574], [887, 501], [152], [579], [627], [578, 689], [235], [538, 668], [548, 851, 632], [418, 845], [0], [596], [403], [34], [831], [376], [561], [359], [733], [890], [87], [202], [397], [67, 68], [205], [777], [992], [631], [295], [868], [582], [684], [205], [], [], [639], [432, 683], [462], [928, 960], [716], [419], [80], [432], [189], [577], [888, 821], [796], [792], [637], [606], [], [524, 461], [], [849, 883], [], [709], [265], [857], [213], [460], [162, 164], [377], [], [588, 790], [942, 952], [406], [994], [417], [798], [719], [258], [557, 914], [], [611], [773], [13], [752], [263], [621], [432], [471], [932], [706], [625], [213], [916], [813, 567], [597, 763], [747], [533], [632], [156], [862], [829], [971], [557], [289], [638, 639], [413], [], [400], [906], [234], [759], [37], [595], [99], [676], [843], [475], [116], [638, 639], [780], [518, 958, 671], [910], [812], [543], [489, 207], [616], [310, 301], [956], [692, 968], [938], [317], [950], [960, 868, 845, 927, 415], [63], [823], [403], [993], [329], [484], [486], [704], [113], [344], [953], [44], [552], [84], [80], [316], [987, 998], [918], [489, 315], [749], [459], [], [954], [17], [638], [613], [352], [612], [323], [397], [905, 846, 725], [241], [557, 497], [873], [125], [492], [911], [333, 151], [883], [527, 916, 664], [173], [798], [30], [320], [935], [625], [219], [39], [198], [652], [666], [387], [157], [919], [960, 967, 968, 504, 923], [560], [102], [138], [365], [923], [888], [208], [253], [650], [284], [783, 784], [556], [150], [0, 389, 758], [22], [395], [763], [199], [], [122], [740], [704], [537], [146], [], [429], [90], [772], [443], [196, 198], [531], [], [195], [796], [984], [], [696], [709], [543], [235], [375], [481], [690], [639], [593], [337], [544, 469], [957], [878], [141], [825], [941], [949], [857], [655, 721, 831], [410], [66, 68], [253], [624], [85], [116], [583], [248, 249], [707], [578], [45], [273], [32], [948], [922], [398], [167], [645], [224], [392], [664, 526, 782], [938, 923], [395], [544], [963], [903], [248, 249], [537], [382], [31], [828], [274], [270], [971], [367], [797], [267], [737], [843, 977, 978], [357], [6], [723], [975], [172], [404], [421], [452], [588, 790], [38], [315], [324], [987], [105], [770], [679], [], [66, 68, 54], [888], [553], [476], [910, 948], [673], [836, 837, 906], [497], [361], [860], [893], [440], [759], [218], [744, 657], [513, 875], [153], [928, 960, 923], [], [309], [724], [], [352], [156], [197, 199], [714], [56], [209], [110], [45], [815], [738], [91], [248, 537], [137], [599], [673, 453, 553, 526, 527, 664], [634], [947], [90], [324], [274], [824], [222], [542], [984], [748, 636], [545], [871], [], [444], [216, 219, 214], [581, 511], [3], [61], [150], [52], [108], [564], [681, 620, 284], [341], [570], [749], [133], [281, 282], [340], [752, 852], [48], [291], [723], [865], [346], [712], [621], [579, 881], [432, 566, 683], [174], [844], [291], [66], [266], [574], [], [69], [382], [649], [992], [774, 655], [416], [199], [42], [903, 786], [652, 764, 413], [616], [658, 533], [631], [949, 990], [106], [672, 797], [654], [133], [851, 548, 453], [281], [], [930], [256], [104], [746], [531], [382], [528], [403], [457, 834], [955], [976, 972, 437], [424], [849], [707], [349], [155], [184], [430], [782, 916, 664, 508], [586], [288], [460], [273], [133], [472], [289], [869, 454, 824], [334], [601, 578], [417, 971], [352], [81], [288], [873], [413], [334], [418], [171], [474], [113], [425], [137], [783], [422], [291], [603], [475], [673, 526, 527, 664], [525], [31], [71], [248, 250], [], [508], [57], [841], [137], [138], [252], [677], [578, 846, 689, 601], [956], [27], [], [466], [954], [576], [252], [724], [], [795], [544], [953], [954], [923, 470], [933], [123], [984], [286], [992], [733], [193], [810, 508], [893], [321], [703], [519], [443], [687], [482], [967], [252], [190], [756], [475], [461], [135], [755], [636], [299], [416], [112], [966, 883, 572], [172], [873], [], [800], [175, 184], [498], [98], [914], [656], [689], [172], [572], [479, 661], [657], [366], [949], [340], [107], [322], [721], [421], [0], [571], [532, 762], [899, 725], [295], [95], [486], [443], [147], [553], [939], [606], [188], [184], [479], [139], [909, 827, 926], [652, 465], [69], [641], [145], [218], [898], [320], [194], [8, 7], [778], [148], [529], [806], [636], [], [882], [645], [931], [], [643], [737, 455], [333], [388], [694], [455, 440], [836, 837], [4], [801], [], [201], [814], [281], [542], [42], [132], [645], [], [323], [30], [677], [454], [836, 837, 869, 636], [933], [419], [110], [969], [208], [545], [375], [467], [], [455], [899, 868], [550], [437], [723], [735], [224], [56], [724], [336, 337], [917], [953], [468], [183], [], [88], [622], [25], [899, 505], [543], [896], [484], [972], [966, 907, 572], [592], [882], [540], [936, 939, 943, 945], [], [436], [96], [632, 605], [297], [250], [182], [433, 445], [930], [173], [472, 693], [265], [187], [466], [578], [702], [23], [497, 442, 858], [256], [129], [337], [383], [954], [935, 910, 659], [40, 46], [726], [905, 526, 664, 831, 846, 721, 851, 750, 894], [792], [439], [955], [316], [996], [853], [985], [], [574], [], [934], [496, 245], [907, 532, 762], [128], [327], [588, 606], [752], [503], [172], [949], [177], [738, 580], [448], [729], [759], [15], [949, 950, 923, 957], [873], [833], [133], [434, 435], [248], [796, 911], [852], [], [688], [361], [], [370, 375], [517, 718], [968, 969], [444, 518], [575], [82], [], [], [112], [472], [], [476], [], [312], [191], [447], [486], [320], [417, 575], [676, 263], [806], [358, 359], [521], [429], [987, 335], [217], [238], [724], [875], [612], [453, 881], [199], [915], [], [330, 331], [950], [529], [870], [164], [864], [822, 541, 542], [931], [194], [73], [225], [448], [471], [379], [718, 839], [121], [729, 534], [476], [601], [259], [43], [903], [349], [], [], [374], [413], [572], [388], [], [949, 950], [716], [698], [67], [35], [241], [884], [298], [213], [655], [], [251, 212], [240, 241], [141], [244], [370], [944], [515, 238, 596], [34], [460, 975], [769], [277], [807], [912], [273], [957], [988], [710, 767], [696], [942, 658], [984], [39], [535], [988], [400, 667], [656, 475], [321], [27], [875], [497], [64], [530, 479], [414], [559], [888], [223], [314], [912, 425], [655], [107], [622, 784], [119], [673], [139], [], [], [850], [930, 934], [874, 757], [136], [112], [], [651], [193, 187], [603], [382], [780], [776, 819], [904], [], [283], [920], [270], [846], [637], [889], [976], [579, 881], [609], [961], [508], [436], [582], [802], [449, 718], [515, 552, 834], [10], [], [], [223], [288, 290], [47], [43], [], [866], [435], [797], [307], [995], [192], [70], [37], [367], [965], [702], [276], [194], [343], [19], [654], [496], [241, 238], [269], [515], [646], [], [143], [302], [111], [740, 783], [557], [629], [488, 679], [476], [464], [26], [464], [388], [234], [279], [685], [841, 728], [362], [174], [862], [720], [88], [763], [451], [512], [434], [585, 589], [16], [968, 659], [848], [908, 895], [787], [167], [591], [927, 928], [], [4], [1], [196], [455], [215], [545], [620, 499], [603], [451], [], [], [532], [737], [338], [296], [300], [73], [692, 772], [815], [106], [235], [660], [240], [343], [886], [265], [320], [172], [], [438], [886], [688], [546, 650, 402, 819], [884], [829], [610], [969, 508], [555], [697], [759, 872], [426], [610, 841, 697], [592], [277], [457], [235], [569], [614], [481], [215, 216], [752], [229], [204], [908], [572], [952], [249], [703], [832, 979], [364], [14], [383], [971], [821], [572, 415], [405], [615], [826], [634], [218], [42], [533], [645, 733], [206], [654], [207], [420], [489], [705], [287], [775], [235], [995], [121], [630], [558], [709], [697], [832], [898, 455], [813], [856, 958], [453], [], [454], [340], [281], [101], [148], [278], [908], [387], [570], [389], [], [412], [115], [305], [830], [836, 837], [802], [60], [915], [747], [872], [282], [154], [], [296], [295], [], [608, 882], [964], [288], [582, 791], [150], [402, 546], [446], [229], [87], [779], [651], [581], [752], [421], [871], [381], [633], [507], [646], [769], [438], [506], [401], [613], [832], [147], [573], [608, 824], [999], [987], [923], [487], [881], [236], [276], [], [40], [228], [486], [452, 245], [987, 998], [398], [632], [688], [854], [268], [62], [428], [884, 406], [376], [203], [800], [297], [235, 172], [581, 479], [764], [405, 839], [583], [798], [669], [759], [315, 462], [331], [402], [393], [616], [], [603], [], [28], [492], [522], [49], [713], [800], [973], [381], [962, 923], [307], [109], [], [759], [975, 703], [267], [962, 935], [94], [], [808], [449], [813], [248, 250], [500], [548], [128], [211], [21], [637], [], [478], [870], [993], [69], [715], [493], [925], [263], [861], [292], [485, 592], [789], [129], [86], [623], [566], [939], [868, 923, 572], [218], [57], [658], [870], [349], [393], [453, 463], [543], [554], [330], [220], [218], [855], [293], [665], [420], [788], [237, 158], [875], [673, 681, 526, 782, 664], [654, 757], [959, 762, 923], [448], [921], [499], [289], [884], [488], [], [198], [985], [777], [921], [], [250], [369], [], [], [443], [245], [608, 474], [685], [280], [255], [261], [673, 681, 620, 697], [], [413], [410, 309, 599], [750, 721, 414], [21], [338], [524, 461], [946], [353], [488, 695], [987, 567, 923], [154], [27], [382], [769], [583], [584], [726], [659], [484], [103], [120], [756], [453, 624, 765], [475], [326], [441], [397], [345], [472], [141], [385, 386], [836, 837, 678, 977, 978], [158], [873], [651], [968], [883], [], [31], [486], [871], [221], [867], [496], [496], [934], [11], [632], [244], [21], [806], [512, 623], [53], [619, 846], [756], [842, 445], [523], [87], [748], [463], [20], [443], [797, 282], [463], [962, 935], [335, 703], [85], [687], [679], [434], [783], [957], [887], [221], [9, 876, 435], [987, 998], [292], [359], [672], [431], [199], [409], [], [739], [907, 572, 966], [225], [], [321], [211], [346], [656], [942], [322], [129], [725], [581, 479, 817], [518, 570], [218], [429], [968], [244], [373], [22], [878], [421, 428, 834, 869, 501], [453, 454, 624], [38], [11], [28], [48], [837, 806], [836, 837], [649], [808, 978], [13], [254], [261], [375], [403], [972], [148], [383], [692, 478], [942], [248], [679], [240], [792], [608, 799], [560], [568], [492], [874], [721], [234, 236], [579], [36], [808, 822], [769, 35], [518], [227], [976], [940], [628], [], [578], [264], [77], [619, 846], [916], [589], [305], [595], [686], [868], [], [867], [892], [9], [326], [181], [851], [836, 837, 608], [819, 608], [740], [331], [416], [], [538, 668], [427], [149], [117], [923], [840, 462], [662], [383], [517, 488, 600], [515], [644], [748], [98], [435], [832], [912], [899, 725], [519], [933, 923], [853, 762], [305], [330], [], [32], [263, 264], [638, 639], [688], [270], [], [], [177], [515], [325], [], [805], [554], [12], [275], [275], [185], [631], [547], [513, 683], [666], [809, 967, 968], [219, 836, 837], [119], [84], [113], [578], [162], [727], [354], [607], [721, 831], [322], [256], [79], [871], [294], [560], [46, 47], [21], [35], [315], [659, 809], [59], [211], [169], [54], [834], [145], [780], [810, 878], [986], [682], [10], [617, 845], [], [281], [342], [], [587, 677], [246], [938], [784], [353], [54], [242], [381], [613], [303], [119], [199], [362], [911, 796], [924], [533], [921], [208, 211], [441], [21], [794], [94], [717], [571], [421], [619], [545, 846], [326], [829], [179, 180], [916], [723], [754], [579, 881], [234], [197], [978, 445], [953], [748], [517, 554, 625, 536, 510], [999], [513], [654], [12], [129], [650, 401, 402, 818, 819, 632], [833], [92], [], [], [191], [484], [344], [654, 656, 479], [235], [659], [536, 403], [402], [804, 631], [959], [849], [], [246], [665], [650, 593], [672], [248], [662], [], [294], [], [3], [975, 693, 472], [676], [261], [507], [223], [699], [422], [65], [932], [283, 478], [836, 837, 869, 650, 818, 819], [645], [992], [502, 539], [949], [], [258], [409, 437], [113], [13], [336], [265], [170], [179], [139], [836, 837], [579], [13], [1], [794], [143], [784], [609], [856], [937], [691], [89], [748], [896], [252], [120], [428, 954], [557, 701], [333], [517, 536], [640, 562], [677], [299], [40, 44], [896], [313], [792], [683, 432], [192], [], [546], [380], [518], [419, 617, 823], [661, 479], [], [610, 602], [721], [345], [], [721, 831, 281], [770, 703], [300], [354], [272], [890], [813], [264], [325], [738], [859], [747], [], [221], [100], [], [647], [735], [830], [821, 839], [348], [], [652, 764], [203], [68], [715], [723], [545], [414, 893], [955], [121], [952], [38, 44], [232], [275], [711], [], [513], [], [132], [], [589], [939, 943, 945], [525], [981], [565], [52], [655], [128, 127], [454, 919], [78], [836, 837], [689, 601], [139], [237], [913], [504], [8], [806, 655], [], [94], [207], [996, 109], [650, 402, 819], [580], [641], [673], [72], [118], [983], [524, 652, 465], [612], [254], [1], [790], [401], [941], [591], [3], [474], [570], [634], [554], [574], [488, 975], [357], [], [], [930], [431], [184, 189, 191], [81], [612], [950], [879], [957], [877], [992], [650, 402, 819], [386, 101], [630], [161], [983], [254], [194], [6], [293], [960], [822, 541], [874], [140], [393], [994], [434], [510], [590], [341], [968, 725, 504], [515, 610, 714, 402], [905, 532, 831, 799], [254], [836, 837, 445], [37], [281], [930, 868, 441, 762, 923], [36], [992], [915], [512], [615], [653, 728], [459], [852, 219], [], [281], [966, 503], [407], [532, 789], [812], [103], [910], [610], [940, 942], [485, 553, 632], [575], [450], [], [274], [66], [269, 272], [364], [740], [22], [237, 158], [717], [494], [416, 602], [951, 122], [307], [399], [998], [968, 849], [256, 205], [489, 93], [271], [257, 222], [0], [809], [801], [605], [695], [916], [760], [434, 756, 793], [757], [63], [483], [185], [201], [528], [61], [905], [529], [385], [136], [482], [448], [], [36], [90], [275], [800], [764], [602], [], [504], [], [762, 559], [245], [650, 558, 819], [353, 351], [53], [221], [17], [], [940, 941, 942], [193], [544], [699], [], [626], [372], [220], [225], [], [441], [527, 782, 664], [530], [88], [834, 906], [826], [62], [67, 68], [478], [978], [582], [673, 664, 527], [], [427], [899], [714, 402], [107], [884, 406], [412], [676], [283], [810, 878], [800], [281], [122], [390], [988], [918], [], [99], [325], [546, 650, 402, 819], [74], [287], [169], [160], [771], [53], [610], [169], [479, 751, 879, 817], [525], [440], [141], [150], [136], [708], [], [823], [239], [123], [740], [170], [166], [982], [58], [106], [89], [728, 858], [685], [581], [661], [569], [250], [564], [713], [401], [301], [533], [775], [478], [752, 852], [785], [423, 424, 585, 589, 526, 782, 851, 664], [769, 515], [], [866, 595], [695], [206, 221], [513, 875], [106], [476], [4], [251], [939], [921], [437], [774], [359], [248], [880], [242], [748], [63], [837, 639], [174], [251], [265], [89], [469], [547, 565], [420], [988], [763, 597], [254], [42], [857], [54], [181], [967, 504], [107], [240, 239], [705], [154], [524], [993], [696], [905], [45], [640], [247], [835], [661], [551, 629], [700], [778], [118], [910], [488, 778, 600], [417], [690, 984, 345], [128], [530], [677, 783], [25], [876, 589, 207, 435], [146], [11], [271, 277], [593], [791], [673], [589], [794], [660], [518, 830], [270], [399], [], [862], [566], [832], [78], [282], [412, 335], [705], [474], [106], [557], [311], [569], [234], [215], [788], [133], [252, 262], [825], [], [513], [677], [777], [985], [204], [532, 572], [955], [29], [896, 804], [781], [367], [724], [13], [738, 580], [], [794], [], [487, 761], [314], [716], [541, 542], [699], [20], [], [389], [569], [923, 965], [608, 770], [554], [166], [225], [244], [62], [], [478], [463], [732], [595], [211], [584], [943], [30], [917], [726], [838], [808], [932, 478], [855], [541], [], [], [781, 557], [271], [803], [656], [445], [], [336], [210], [53], [609, 479], [319], [521], [415], [244], [11], [119], [233], [], [485, 754, 632], [776, 819], [462], [134], [419], [22], [309], [27], [511], [502], [681, 620, 526, 527, 782, 664, 508], [524, 461], [417], [610], [975], [951], [755], [510], [192, 463], [53], [603], [84], [161], [877], [971], [855], [343], [297], [168], [318], [214], [881], [453, 454, 624], [892], [717], [497], [320], [695], [104], [406], [991], [363], [825], [], [16], [], [791], [833], [155], [818], [515, 870], [684], [757], [367], [413], [], [194], [327], [306], [886], [752, 852], [954], [993], [382], [807], [311, 312], [96], [53], [827], [330], [338], [865], [694], [588], [25], [483, 979, 825], [848], [807], [525], [195], [22], [136], [774], [816], [231], [866], [567, 827], [], [7], [982], [343], [933], [958], [700], [921], [143], [709], [157], [680, 805], [223], [574], [133], [932], [721, 831], [], [852], [13], [155], [407], [535], [629], [750], [67], [245], [68], [220, 213], [521, 813, 909, 910, 567, 926], [625], [71], [809, 762, 923, 926], [673, 810, 508], [], [342], [217], [71], [785], [325], [990], [114], [589], [118], [277], [304], [738], [866], [572], [994, 116, 126], [13], [654], [], [529], [973], [696], [252], [899], [268], [190], [911], [544], [256], [426], [488, 826], [805], [624], [612], [], [142], [148], [720], [974], [748, 636], [376], [628], [92], [501], [866, 595], [110], [409, 892], [328], [341], [417, 616], [896, 861], [155], [711], [424], [939, 940, 941, 942, 943], [526], [411], [8, 84], [63], [485], [582], [547], [827], [928, 659], [321], [962, 923], [252], [488], [751], [22], [749], [723], [397], [21], [695], [609, 660], [803], [966], [640], [514], [252], [756], [489], [373], [500], [581, 479, 511], [923, 964], [430], [370], [971], [412], [917], [898], [283], [128], [302], [385], [], [655], [856, 958], [144], [653], [182], [988], [], [474], [433, 639], [905, 283], [583], [25], [333], [161], [348], [495], [836, 837, 906, 656, 785], [873], [405], [309, 599], [616, 843], [814], [645], [604], [223], [248, 250], [576], [102], [729], [275], [43], [64, 59], [523], [387], [991], [93], [246], [], [], [517], [453], [908, 812, 404], [835], [618], [107], [129], [575], [462], [765], [208], [311, 312], [960], [403, 536], [814], [376], [713], [991], [302], [329], [217], [], [40, 46], [199], [105], [753], [670], [482], [363], [516, 520], [777], [484, 871], [479, 817], [208], [604], [230], [381], [474], [909], [984], [799], [441], [76], [669], [339], [441], [380], [924], [40], [825], [323], [950], [45], [800], [617, 720, 823], [234], [33], [39], [182], [832], [234], [287], [481, 626], [698], [431], [666], [806, 911, 658], [349], [9], [641], [57], [335], [253], [774], [865, 850], [473], [15], [506], [450], [372], [344], [832], [230, 231], [361], [783], [387], [92], [732], [936], [995], [], [625], [33], [892], [346], [712], [308], [175], [970, 795, 796], [75], [235], [132], [15], [287], [448], [302], [555], [118], [590, 605], [339], [339], [759], [498], [950, 951], [252], [433, 638, 639], [134], [535], [236], [740], [934], [701], [430], [970], [940], [555], [160], [505], [624, 453, 454], [337], [308, 309], [361], [418], [470], [977], [347], [750, 533], [708], [249], [643], [928, 960], [778], [373], [260], [432], [947], [865], [558], [307], [162], [109], [455, 760, 440], [440], [450], [], [423, 424, 585], [97], [79], [], [580], [469], [435], [545], [387], [673, 526, 527, 782, 664, 508], [404], [737, 898], [467], [], [709], [251], [660], [], [829], [518], [532, 470], [508], [357], [465], [42], [], [199, 588], [904], [393], [470], [546, 841], [212], [479], [489], [586, 652], [143], [797], [260], [858], [814], [674], [], [63], [106], [788], [905], [572], [424, 423], [695], [628], [62], [], [2], [38], [], [711, 721], [232], [64], [769], [794], [608, 610, 559], [833], [190], [98], [898], [862], [456], [798], [319], [892], [228], [108], [706], [402], [208], [233], [810, 508], [], [], [705], [828], [744, 657], [378], [75], [795], [254], [916], [690, 958, 345], [852], [553], [369], [232, 264], [793, 794], [269], [649], [532], [534], [431], [874], [114], [392], [562], [453], [432], [797], [756], [903, 585], [573], [722], [748], [553], [243], [750, 721], [499], [297], [897, 971], [645], [275], [666], [], [780], [773], [567], [286], [347], [77], [581, 479, 656], [250], [847], [910], [106], [937], [261], [355], [625], [149], [656], [505], [959], [808], [712], [996], [493], [421], [915], [264, 263], [], [487], [869], [867, 675], [615, 890], [467], [739], [833], [594], [618], [226], [313], [219], [399], [59, 64], [295], [683, 558, 432, 566], [371], [742], [242], [809, 532, 762, 923, 959], [424, 423], [520], [340], [], [205], [78], [712], [246], [327], [914], [605], [], [144], [788, 502], [879], [408, 575], [63], [136], [601], [447], [760], [486], [568], [178, 282], [], [963], [499], [560], [858], [779], [134], [572], [673, 526, 527, 664], [879, 689], [576], [803], [514, 515, 655], [993], [63], [894], [896, 495], [940, 942], [610], [964], [899], [869], [738], [453, 917, 921], [333], [163], [590], [976], [752], [619, 846, 721, 892, 831], [205], [806, 655, 630, 502], [918], [308], [], [776], [483], [570, 518], [354], [192], [407], [544], [443], [], [], [195], [973], [171], [580], [205], [344], [291], [568], [75], [734], [483], [298, 63], [712], [70], [704], [530], [417], [388], [602], [659], [422], [524], [20], [560], [529], [422], [544, 909, 762], [16], [559], [990], [101], [562], [313], [522], [658], [387], [932], [330], [610], [854], [914], [759], [], [373], [539], [439], [533], [252], [912], [261, 174], [248, 250], [689, 594, 601], [535], [431], [724], [656, 784], [100], [332], [416], [259], [268], [142], [962, 923], [982], [121], [961, 923], [592], [483], [180], [836, 837], [], [685], [408], [322], [695], [843], [], [], [195], [182], [133], [581, 479, 436, 535, 511], [420], [269], [30], [739], [811], [191], [], [352, 351], [5], [529], [928], [214], [214], [994], [153], [729], [], [936], [125], [37], [123], [550], [243], [541, 542], [849], [659], [125], [902], [936], [], [], [898, 918], [767], [], [971], [40, 46], [55], [130], [6], [879], [284], [214], [222], [402], [392], [], [215, 218], [237], [556], [380], [342], [757], [], [881, 486], [175], [330], [749], [38], [669], [993], [597], [48], [826], [923, 926], [52], [277], [479], [347], [966], [946], [544, 827], [691], [137, 146], [384], [663], [95], [197, 183], [185], [957], [784], [283], [535], [292], [238], [80], [466], [148], [705, 547], [673, 526, 527, 782, 664, 508], [883], [808], [300], [279], [432], [323], [53], [481], [836, 638, 639], [102], [821], [357], [393], [471], [447], [838], [451], [766], [950], [586, 977], [652], [724, 733], [705], [268], [897], [831], [804], [60, 62], [], [953], [740, 359], [926], [480], [993], [950], [867], [79], [486], [831, 282], [277], [], [255], [919], [799], [647], [168], [899, 901], [108], [228], [348], [805], [884], [934], [53], [426], [268], [994], [8], [849, 504, 505], [338], [110], [130], [354], [427], [711], [161], [156, 285], [505], [84], [839], [512], [884], [545], [118], [546], [715, 524, 787], [], [886], [514], [388], [41, 44], [91], [915], [916], [513, 650, 819], [563], [], [324], [909, 926], [152], [158], [170], [383], [831], [909, 849], [8], [375], [414], [], [119, 120], [69], [230, 231], [912, 716], [325], [59], [46], [268], [951], [666], [106], [], [685], [588], [992], [721], [798], [715], [458], [], [402], [95], [53], [560], [440, 441, 455], [], [374], [327], [128], [478], [513, 439], [746], [510], [526, 844, 742], [483], [280], [265], [932], [518], [499], [62], [203], [212], [318], [], [310], [291], [815], [695], [635], [70, 904], [485, 592], [803, 228], [293], [267], [917], [141], [52], [812], [351], [545], [24], [796], [485, 530], [480], [608], [530], [744, 657], [724], [498], [143], [570], [693, 472], [560], [194], [999, 281, 700], [783, 784], [676], [919], [727], [550], [573], [109, 973], [327], [], [787], [963], [425], [505], [368], [74, 815], [], [498, 854], [822], [258], [731], [861], [138], [626], [551], [312], [305], [372], [393], [321], [806, 831], [345], [185], [972], [269], [520, 669], [550], [379], [], [532], [818], [592], [697], [107], [21], [377], [445, 638, 639], [831], [472], [6], [], [852], [779], [472, 693], [224], [809, 659, 923], [732], [842, 638, 639], [155], [650], [303], [582, 519, 950], [731, 861], [34], [], [801, 445], [822], [155], [366], [815], [376], [593], [311], [55], [895], [750], [105], [839], [545], [626], [179], [423], [561], [596, 639], [636], [352], [152], [774], [371], [991], [844], [688], [840], [914], [850], [28], [640], [389], [137], [929], [204], [632], [245], [868, 923], [970, 795], [876], [762], [418, 487, 620], [996], [424], [803], [21], [409], [849], [158], [452, 911], [307], [331], [377], [651], [215], [658, 911], [867], [201, 254], [118], [914], [343], [894], [340], [925], [364], [], [279], [410], [424], [907], [146], [612], [669], [], [196], [674], [476], [4], [389, 391], [72], [927], [975], [157], [148], [], [476], [367], [970, 795], [494, 497, 442, 858], [658], [192], [332], [69], [497], [601], [2, 814], [978], [], [165], [673, 526, 527, 782, 664, 508], [807], [639], [199], [642], [340], [135], [446], [541], [363], [451], [309], [], [104], [487, 810, 590], [218], [492], [862], [905], [529, 977, 978], [333], [194], [], [650, 401, 402, 546, 559, 818, 819, 889], [545], [307], [609], [517], [205], [45], [477], [716], [36], [940], [17], [677], [244], [581, 479, 511], [409, 892], [656], [791], [777], [147], [195], [219], [516], [546], [735], [954], [227], [359], [902], [216], [783], [471], [13], [161], [938], [427], [2], [66], [393], [10], [45], [783], [257], [520], [], [354], [479], [415], [771], [977, 978], [448], [502], [350], [741], [513], [361], [480, 886], [741, 884], [7], [329, 973], [161], [31], [936], [631], [738], [160], [403], [248, 250], [165], [979], [346], [847], [635, 767], [374], [739], [350], [763], [927], [813, 567], [700], [20], [292], [9], [960, 470, 923], [], [19], [], [318], [434], [803], [28], [879], [502], [554], [484], [630], [532, 923], [390], [123], [872], [678], [782, 664], [655], [851], [767], [479, 511], [519], [97], [144], [302], [231], [407], [602], [629], [96], [103], [805], [332], [865], [214], [384], [753], [895], [214], [951], [699], [255], [625], [421, 841], [292], [948], [731], [823], [728], [937], [118], [714], [551], [98], [903, 617], [87], [190], [878], [410], [611], [230, 231], [274], [513], [578, 834, 982], [234, 805], [154], [572], [983], [650, 541], [190], [379], [963], [], [794], [419], [445], [447], [408], [719], [900], [206], [260], [552], [859], [750], [928], [242], [602, 638, 639], [511, 479], [183], [462], [806], [962], [351], [756], [729], [416], [], [910], [778, 467], [570], [498], [427], [283, 284], [6], [65], [673, 526, 527, 782, 664, 508], [555], [175], [281], [236], [], [626], [508], [824], [535], [900], [673, 526, 527, 916, 664, 508], [980], [964], [910], [765], [920, 733], [141], [479], [466], [254], [411], [430], [404], [586, 437, 408], [956], [284], [121], [], [567, 411], [161], [953], [211], [116], [416], [13], [], [279], [866], [395], [203], [13], [466], [843], [254], [603], [572], [707], [507], [523, 830], [176], [388], [198], [709], [439], [258], [11], [367], [513], [189], [736], [573], [936], [969, 440, 572], [69], [836, 837, 842, 445], [846], [778], [107], [693], [495], [270], [942], [593], [742], [134], [336], [344], [572], [929, 912], [132], [341, 342], [416], [418], [361], [], [704, 656, 479], [891], [999, 692], [173], [269], [568], [858], [35, 37], [847], [314], [977, 978], [673, 526, 527, 782, 664, 508], [807], [747], [37], [706], [987], [612], [500], [867], [], [355], [444], [670], [873], [940, 941, 942], [927], [53], [567], [422], [650, 402], [900], [318], [778], [509], [726], [583], [404], [217], [124], [767], [383], [759], [720], [129], [], [408], [679], [968], [133], [], [], [], [766], [], [22], [425], [220], [897, 651, 760], [385], [346], [924], [62], [576], [454], [118], [832], [492, 493, 495], [379], [997], [192], [712], [842, 433, 638, 639], [697], [300], [720], [736], [400, 667], [965], [843], [779], [566], [682, 562], [403], [4], [], [291], [968], [5], [132], [405], [], [545], [843, 445], [6], [898], [140], [286], [559], [102], [], [172], [798], [78], [81], [70], [523, 869], [33], [], [194], [191], [973, 983], [891], [576], [977, 978], [], [525], [386], [76], [692], [905], [986], [587], [488, 679], [652, 413], [776], [328, 108], [658], [], [880], [842], [198], [915], [400], [457, 834], [], [328, 116], [588, 790], [649], [307], [570], [774], [303], [754], [274], [317], [192], [322], [435, 876], [183], [525], [770], [976], [743], [721], [535], [749], [444], [756], [473], [518], [67], [56], [95], [], [974], [780], [754, 605], [840], [583], [3], [], [784], [923, 964], [963], [908, 404], [411], [586], [456], [774], [79], [577], [61], [52], [991], [559], [582, 851], [700], [813], [111], [436], [483, 958], [967], [571], [917, 413], [522], [243], [992], [952], [145], [973], [798], [473], [749], [94], [], [47], [841, 918], [374], [152, 155], [680], [698, 538], [96], [417], [99], [738, 559], [912], [809, 923, 924], [499], [416], [616], [699], [332], [743], [233], [64], [489], [751, 468, 479], [701], [91], [964], [], [967, 968], [217], [452], [], [836, 837], [64], [357], [874], [236], [789], [187], [365], [195], [9], [778], [484], [28], [170], [], [753], [90], [684], [681, 620], [144], [106], [601], [141], [688], [46], [756], [195], [896], [148], [691], [309], [763], [307], [152], [], [5], [836, 837], [543], [], [732], [323], [219], [91], [879, 977, 978], [282], [154], [941], [351], [], [22], [503], [992], [122], [891], [74], [390], [43], [126], [304], [69], [71], [407], [195], [488, 600], [935], [56], [825], [975, 977, 979], [903], [271, 280], [182], [594], [23], [331], [879], [597], [987, 998], [199], [977, 978, 728], [300], [943], [834, 906], [], [792], [280], [811], [914], [545], [288], [179], [701], [411], [120], [448], [607], [506, 421], [687], [59], [74], [733], [767], [], [87], [278], [], [304], [], [174], [936], [408], [153], [245], [551], [156], [934], [606], [657], [791], [], [716], [142], [315], [], [409], [270], [434, 794], [57], [532], [979], [502], [774], [917], [616], [12], [39], [923], [594], [421], [77], [836, 837, 844], [494], [824, 474], [518, 665], [962, 923], [735], [148], [876, 435], [844], [158], [903], [763], [178], [439], [540], [992], [], [431], [94], [48], [909], [849], [233], [588, 790], [310], [354], [829], [11], [789], [712], [650, 819], [975, 671], [348], [889], [694], [892], [354, 349, 350], [880], [117], [901], [365], [842, 879, 977, 978], [224], [581, 479, 717], [587, 677], [679, 435, 578], [969], [856], [478], [168], [688], [], [274], [], [749], [984], [492], [128], [361], [453], [473], [292], [283], [100], [668], [644], [34], [11], [859], [416], [995], [945], [140], [366], [7], [345], [695], [24], [450], [699], [994], [675], [564], [731], [260], [658], [20], [184], [33], [460, 718, 150], [375], [360], [366], [810, 878], [735], [576], [116], [145], [670, 518], [405, 839], [309, 917, 599], [567, 827], [588], [712], [], [595], [988], [820], [451], [110], [490], [565], [442], [918], [200], [786], [261], [573], [521], [294], [448], [71], [386, 101], [548], [760], [585], [587, 784, 596, 477], [896, 804, 999, 794, 861], [12], [681, 620], [563], [185, 186], [595], [867], [474], [332], [215, 218], [], [661], [301], [500], [337], [997], [435, 876], [888], [43], [], [309], [567], [], [101], [85], [410], [758], [160], [896], [993], [939], [802, 518], [], [435], [332], [552], [76], [282, 797], [256], [834, 906], [692, 950], [658], [978, 824], [732], [709], [905, 589, 740], [875], [636], [406], [947], [896], [487, 681, 620, 916, 508], [334], [130], [513, 776, 683, 875], [0], [980], [411], [417], [871], [141], [], [558], [876, 435], [], [523], [210], [71], [59], [535], [726], [675, 580, 608, 889], [862], [490], [914], [858, 445], [603], [854, 406], [], [849], [638, 639], [610, 836, 837], [680, 750, 697], [349], [813], [689, 887], [545], [295], [589], [7], [656], [888], [], [194], [573], [164], [332], [420], [994, 114, 947], [939], [439], [729], [440], [66, 67], [356], [474], [], [696], [387], [842, 977, 978], [778], [261], [836, 837], [200], [867, 864], [677], [419], [990], [98], [739], [72], [359], [214], [977], [682], [836, 837], [], [407], [734], [224], [219], [], [52], [103], [716], [717], [916], [140], [912], [663], [911], [270], [335], [], [659, 923], [240], [759], [832], [975], [990], [427], [756, 792], [345], [799], [381], [287], [529, 823], [333], [681, 620, 526], [819], [617], [58], [239], [134], [666], [846, 750], [673, 664, 526, 527, 632, 508], [265], [418, 709], [297], [], [134], [349], [194], [538], [114], [458, 703], [755], [934, 692, 948], [559], [396], [57], [64, 55], [226], [977, 978], [208], [562], [306], [780], [135], [997], [481], [500], [406, 892], [237], [494], [738], [314], [424], [], [688], [139], [881], [217, 215], [564], [2], [256], [115], [302, 306], [225], [366], [578, 552, 689, 982], [547], [696], [31], [619, 846], [934], [619, 846], [308], [950, 954], [26], [411], [668], [400, 667], [], [545], [673, 526, 527, 782, 664, 508], [371], [227, 232], [350], [70], [283], [99], [365], [405], [12], [844], [107], [964], [360], [765], [596], [784], [418], [515, 230], [867, 569], [896, 804, 794, 861], [941], [], [922], [624], [761], [386], [641], [575], [693], [658, 760], [358], [615], [], [352], [714], [417], [111], [], [563], [297, 295], [417], [565], [674], [973], [967, 968, 504, 923], [561], [398], [449], [], [390], [281], [628], [531], [270], [533], [153], [608, 474], [807], [737], [528], [548], [742, 681, 620, 526], [11], [522], [277], [166], [893], [], [51], [4], [597], [422], [145], [232], [138], [884], [978, 611], [769], [625], [880], [392], [476], [658], [327], [524, 461], [557, 538, 698], [312], [358], [557], [904, 905], [431], [269, 272], [560], [299], [582, 948, 951], [784, 587, 477, 740], [606], [67], [970], [518], [809], [787], [24], [], [239], [888], [462], [431], [], [242], [], [708], [263], [261], [46], [617], [390], [921], [42], [881], [999], [447], [351], [418], [660, 436], [218], [349], [782, 664, 508], [70], [270], [728], [53], [593, 650], [103], [975, 693, 472], [94], [], [863], [770, 539], [989], [947], [5], [864], [121], [610], [215], [720], [511], [670], [915], [296], [778, 485], [183], [516, 905, 526, 493], [330], [788, 630], [741, 399], [4], [333], [733], [89], [536, 913, 724], [63], [706, 519, 428, 716], [283], [301], [617], [44], [899], [925], [907], [772], [987, 998], [959], [670], [435], [425], [354], [299], [257, 222], [248, 250], [581], [517], [750, 564, 669], [193], [729], [793], [800], [424], [41], [475], [659], [149], [659], [605], [277], [354], [701], [528], [281], [720], [349], [54], [710], [286], [4], [460], [418, 709, 767], [272], [281], [611], [236], [548, 493, 851], [423], [162, 676], [720], [], [520], [], [233], [867], [213], [827], [634], [489, 638, 639], [690, 345], [242], [494], [646], [672], [224], [85], [44], [553], [583], [103, 395], [344], [135], [770], [892], [251], [656, 879], [365], [578, 689], [605], [441], [772], [347], [900, 756], [746], [915], [390], [960], [269], [210], [489, 219], [959], [896], [625], [], [432], [236], [645], [670], [738], [911], [], [652, 465, 597, 413], [900], [296], [333], [468, 603], [239], [], [40, 46], [], [], [309], [576], [66, 68], [54], [277], [910], [551], [860], [581], [10], [829], [974], [414], [316], [520], [638], [281], [], [773], [343], [83], [], [], [], [670], [656], [122], [179], [506], [839, 405], [335], [964], [237], [543, 433, 445, 638, 639], [500], [529], [577], [497, 538], [690], [895], [70], [188], [791], [159], [757], [294], [297], [29], [911], [716, 757], [116], [292], [32], [234], [838, 720, 631], [991], [], [956], [758], [209], [656, 479], [481, 482], [615], [640], [889], [444], [941], [857], [452, 911], [514, 515], [28], [11], [519], [198], [725], [497], [], [996], [170], [935], [176], [10], [779], [35, 37], [728], [203], [962, 923], [119], [349], [384], [110], [805], [365], [75], [536], [938, 935], [496], [], [554], [721], [202], [125], [109], [854], [146], [232], [476], [498], [309], [504], [237], [686], [244], [145, 148], [236], [217], [], [878], [482, 548, 851, 598, 632], [354], [292], [744, 908], [213], [697], [877], [965], [863], [302], [642], [220], [923], [283], [], [722], [618], [661], [152], [64, 55], [851], [822], [525], [363], [186], [654, 757], [], [338], [511], [963], [608], [831], [926], [106], [655], [523], [294], [44], [94], [], [840], [638, 639], [936], [148], [69], [563], [97], [552], [37], [311], [], [583], [42, 44], [817], [589], [576], [318], [198], [400, 667], [979], [468, 479], [109], [897], [632], [357], [555], [372], [447], [762, 853], [976], [554], [162], [834, 652, 906], [225], [797], [708], [978], [624], [819, 541], [319], [], [275], [707], [184], [250], [681, 810, 620], [737], [939], [265, 267], [700], [570], [728], [13], [156], [907, 966], [952], [685], [422], [301], [131], [437], [865], [411], [259], [], [553], [433], [809], [208, 243], [299], [311], [744, 657], [267], [], [986], [34, 977], [548, 850, 851], [632], [64], [548], [110], [372], [828], [996], [611], [355, 489], [184], [64, 55], [273], [], [419], [], [193], [425], [562], [289], [359], [160], [513], [124], [937], [452], [610], [908, 895], [281], [965], [396], [973], [335], [387], [875], [642], [52], [486], [698], [], [361], [189], [901], [635], [238], [94], [25], [574], [639], [492], [], [822], [643], [], [505], [709], [157], [406], [194], [488], [610, 759, 794], [541, 542], [985], [105], [291], [744, 657], [832], [217], [989], [307], [147], [592], [170], [612], [108], [306], [314], [41, 46], [34], [324], [7], [31], [239], [753], [557, 733], [902], [324], [336], [720], [703], [378], [650], [652], [18], [578, 216], [763, 597], [827], [], [769], [673, 526, 527, 782, 664, 508], [683], [488, 616, 887], [681, 620], [234, 214], [267], [341, 342], [], [], [670, 655, 414], [298], [322], [681, 810, 620, 508], [939, 945], [576], [671], [], [806, 630], [27], [805], [149], [62], [518], [308], [615], [393, 973], [455], [422], [487], [379], [276], [17], [497], [217, 212], [4], [], [109], [779], [713], [841, 731], [16], [153], [988], [507], [40], [400, 667], [65], [679], [982], [428], [728], [522], [100], [358], [497], [594], [667], [], [42], [2, 3], [], [722], [247], [915], [26], [981], [79, 630], [298], [], [501], [614, 584], [], [79], [497], [733, 557], [589, 639], [553], [594], [821], [896, 910, 608], [670], [375], [524], [211], [983], [892], [172], [85], [318], [409, 892], [256], [405], [682], [517], [744, 652, 657, 471], [], [], [56], [992], [579], [917], [499], [195], [823], [966, 572], [67], [661], [], [205], [20], [632], [272], [582, 937, 938], [193], [596], [870, 825], [912, 348], [688], [285], [234, 236], [725], [944, 946], [184], [957], [453], [401], [320, 319], [657], [975], [139], [900], [948], [787], [756], [32, 26], [], [75], [460], [518], [501, 885], [564], [643], [635], [529], [77], [627], [378], [119], [858], [497], [575], [241, 238], [], [334], [976], [989], [774], [433], [617], [552], [248, 250], [961], [1], [884], [262], [438], [641], [686], [486], [239], [625], [533], [879], [193, 201], [423, 424], [421], [186], [208], [786], [968], [693], [140], [422], [713], [953], [623], [360], [958], [2], [263], [251], [169], [839], [72, 815], [672], [404], [169], [919], [215], [933], [550], [], [43], [162, 168], [136], [664], [244], [418], [396], [756], [604], [636], [28], [208], [942], [39, 43], [951], [19], [591, 850], [358, 359], [701], [512, 907, 950, 951, 954, 572], [111], [518], [17], [986], [554], [634], [20], [88], [882], [903], [128], [570], [421], [667], [210], [513], [], [122], [866], [177, 170], [663], [160], [378], [512, 473], [], [932], [149], [955], [], [548, 651, 831], [195], [765], [], [560], [], [199], [836, 837, 748], [578, 689, 885], [742], [51], [619, 818], [329], [853], [586], [], [41], [84], [129], [485, 592], [933], [926, 544], [309, 599], [987, 998], [243], [952], [662], [834, 906], [395], [996], [], [624, 453], [429], [298], [488, 858], [841, 823], [185], [745, 851, 598], [529], [525], [], [176], [608], [847], [429], [950], [385, 386], [816], [108], [326], [691], [977], [671], [219], [2], [], [166], [605], [52], [], [246], [243], [164], [362], [315], [584], [224], [], [542], [770, 841, 970], [679], [583], [528], [543], [742], [], [879], [664], [327], [301], [800], [209], [], [], [829], [608, 514, 610, 655], [119], [31], [316], [387], [487], [638, 639], [80], [950, 954], [348], [966, 720, 572], [171], [761], [531], [507], [255], [717, 479], [70], [797, 765], [], [212], [118], [187], [890], [781], [202], [123], [551], [273], [797], [448], [821], [769], [321], [463], [407], [144], [911], [44], [818], [554], [966, 907], [138], [427], [865, 610], [660, 799], [568], [529, 478], [951, 725], [27], [284], [332], [254], [281, 282], [422, 747], [521], [516, 520], [805, 261], [2, 3], [192], [5], [146], [406], [264, 263], [], [458], [854], [500], [608, 514, 515], [991], [778], [100], [293], [479], [996], [936], [340], [781], [765], [64, 55], [800], [453, 454, 624], [520], [287], [821, 839], [311, 312], [37], [376], [940], [535], [163], [182], [29], [768], [337], [], [973], [420], [], [596], [990], [536], [611], [396], [682], [932], [87], [], [801], [315], [743], [478, 722], [910], [929], [518, 414], [94], [92], [81], [47], [740], [593], [], [492], [164], [668], [332], [487], [596], [304], [244], [], [968], [155], [59, 916, 55], [330], [697], [904], [295], [29], [225], [746], [77], [238], [880], [100], [], [581], [521], [805], [67], [469], [172], [271], [937, 938], [370], [575], [495], [430], [75], [514], [557], [524], [563], [312, 311], [], [], [745], [374], [706], [621], [565], [428], [492], [644], [16], [269], [619], [273], [882], [334], [140, 142], [850, 282], [937], [770], [587, 784], [205], [983], [], [540], [284], [198, 199], [], [187], [399], [582, 948, 949, 950, 954], [215], [976], [], [783], [869], [539], [930, 582, 415], [39, 26], [337], [435], [361], [325], [677], [618, 926], [910], [57], [425], [912], [908], [578, 982, 571], [], [900], [371], [931], [940], [920], [505], [339], [], [581, 479, 717], [386, 101], [939], [280], [536, 628], [454, 655], [], [868, 951, 923], [892], [752, 852], [217], [952], [29], [448], [341], [211], [677, 587], [], [409, 892], [120], [186, 193], [62], [], [], [20, 13], [539], [744, 657], [413], [], [351], [], [11], [470], [326], [799], [849, 850], [567], [430], [301], [316], [222], [919], [969, 470, 923], [425], [182], [443], [301], [566], [299], [55], [299], [822], [842], [554], [575], [101], [994], [337], [309], [736, 762], [], [238], [518, 665], [313, 315], [875], [845], [816], [943, 953], [769], [393, 108], [83], [113], [557], [453], [242], [713], [], [133], [751, 979, 479], [211], [5], [100], [210], [567], [278], [333], [755], [765], [613, 810, 508], [942], [892], [740], [852], [181], [82], [], [310, 504], [956], [373], [49, 50], [635], [485, 754], [522], [], [458], [684], [571], [995], [], [571], [209], [755], [0], [226], [612], [540], [197, 198], [785], [572], [379], [], [833], [546, 650, 819], [626], [903], [806, 610], [282], [], [484], [943], [39], [801, 983], [888], [365], [926], [256], [897], [48], [718, 821], [220], [861], [433], [849], [854], [711, 631], [31], [682], [381], [81], [190], [442, 663], [218], [522], [926], [986], [185], [726], [362], [539], [638, 639], [581, 479], [863], [343], [697], [925], [565], [940], [618, 923], [641], [], [972, 825], [], [339], [992], [], [185], [914], [197], [717], [], [832], [76], [93], [], [718], [294], [844], [753], [], [668], [838], [232], [303], [176], [224], [125], [319], [64, 59], [75], [360], [204], [42], [913], [552], [909], [330], [471], [758], [156], [265, 267], [898], [857], [51], [145], [374], [928], [509], [12], [525], [894], [946], [], [840], [923], [804], [886, 440, 860], [661], [606], [789], [909, 987, 926], [841], [519], [176], [316], [177], [66, 68], [808, 515], [531], [388, 872], [243], [135], [684], [242, 159], [872], [606], [296, 427, 756], [678, 487, 854], [883], [904], [803], [520, 529], [581, 656, 479], [], [754], [749], [764], [372], [693], [549], [], [447], [143], [463], [25], [922], [160], [726], [992], [453, 454, 624, 402], [], [302], [765, 706], [812], [645], [140], [301], [159], [488], [307], [142], [449, 858, 733], [41], [836, 747], [272], [659], [177], [236], [664], [18], [772], [679], [654], [565], [549], [383], [728, 478], [970], [959], [735], [952], [15], [434], [687], [871], [217], [825], [358], [109], [495], [30], [853, 645], [805], [207], [165, 234], [894], [536], [215], [312], [392], [776], [610, 47], [505], [75], [393], [173], [720], [531], [], [487, 681, 590], [942], [129], [886], [284], [409], [298], [928], [724], [737], [604], [0], [0], [640], [232, 151], [410], [591], [680], [], [421], [717, 733, 479], [], [363], [210], [13], [219], [755], [263], [147], [287], [115], [491], [448], [780], [249, 250], [926], [], [761], [692], [303], [972], [836, 837, 958], [40, 46], [710], [293], [979], [173], [257], [681, 620], [749], [488], [288], [916], [941], [], [792], [154], [691], [], [], [640], [759], [611], [118], [], [63], [193, 235, 852], [871], [19], [400, 667], [896, 804, 999, 905, 861], [80], [433], [608, 414], [245], [880], [185], [292], [169], [85], [902], [], [567], [962], [649, 977, 978], [269], [427], [482], [382], [488, 723], [638], [505], [959], [364], [805], [497], [587, 596], [457, 834], [977, 150], [], [743], [145], [73, 77], [578, 689, 601], [168, 159], [830], [109], [766], [130], [763], [448], [993], [788], [491], [738, 944], [375], [435], [700, 999], [79], [146], [447], [269], [622], [420], [510], [578, 689], [283], [417], [673, 508], [186], [619, 846], [], [925], [467], [468], [180], [879, 912], [578, 601], [688], [102], [553], [483], [218, 156], [387], [196, 198], [487], [738, 428], [689], [323], [591], [], [9], [871], [749], [950, 951], [466], [615], [314], [615, 597], [609], [316], [488], [184], [128, 856], [669], [615], [249], [56, 472], [], [520], [189, 190], [822], [361], [537], [394], [417], [527], [242, 243], [], [385], [697], [158], [732], [172], [755], [], [132], [984], [550], [453, 454, 526], [910], [230], [771], [278], [31], [536], [586], [715], [909, 926], [97], [327], [122], [759], [157], [162], [], [732], [933], [649], [763], [788], [29], [598], [568], [422], [896, 804, 838, 585, 631], [822], [192, 193], [713], [586], [807], [75], [322], [120], [472], [737, 455], [588], [173, 958], [19], [349], [286], [701], [692], [194], [649], [769], [390, 395], [987, 935, 923], [47], [62], [570], [983], [130], [100], [519], [619, 846], [619, 846], [161], [768], [214], [254], [90], [234], [694], [311], [720], [], [780], [], [397], [], [349], [704], [628], [332], [337], [793], [757], [865, 850], [270], [], [989], [], [], [51], [49], [187], [254], [178], [], [245], [424], [13], [766], [584], [409, 892], [116], [17], [19], [613], [454], [751], [157], [994], [951], [111, 52], [997], [672], [77], [345], [581, 479], [30], [476], [587], [189], [550], [22], [0], [456], [200], [], [704], [49], [532, 923, 572], [], [313], [379], [420], [], [258], [28], [253], [606], [968, 504], [915], [950], [403], [535, 671], [378], [376], [565], [495], [], [414], [303], [546], [406, 887], [113], [105], [518], [164], [789, 539], [990], [938], [347], [740], [53], [172], [90], [59], [466], [906], [933], [53], [444], [140], [769, 709, 710, 767], [193], [230, 231], [561], [306], [], [614], [439, 764], [118], [808], [], [268], [577], [652, 413], [529], [367, 369], [], [492], [24], [681, 620], [137], [978], [627], [549], [136], [], [777], [182], [362], [329], [671], [1], [112], [883], [987], [703], [], [], [786], [536], [867], [104], [928], [235], [862], [828], [427], [929], [23], [958], [549], [43], [342], [971], [814], [140], [575], [552], [301], [676, 197], [430], [608, 977, 978], [303], [235], [544], [645], [807], [110], [114], [836, 976], [454], [419], [642], [581, 479, 817], [591], [79], [856], [177], [930, 844], [765], [496], [478], [231], [773], [97], [674], [991], [375], [102], [486, 650, 558, 819], [85], [109], [573], [78], [479], [401], [846], [268], [301], [892], [466], [], [497], [908], [577, 488], [308], [506], [497], [939, 943], [455], [977], [988], [89], [508], [554], [128], [30], [316], [12], [687], [423, 424], [], [553, 493], [19], [52], [76], [690], [872, 841], [553], [514], [548, 851], [374], [878], [896], [238], [45], [989], [763], [418, 720, 872, 759, 622], [18], [590], [684], [957], [673, 681, 526, 527, 782, 664, 508], [270, 279], [985], [895], [535], [129], [653], [932], [90], [331], [131], [346], [495], [495], [386, 101], [167, 212], [109, 828], [59], [293], [765], [217], [668], [653], [352], [118], [], [652, 413], [698], [568], [793], [932], [413, 670], [641], [822], [620, 508], [], [743], [202], [480], [981], [569], [61], [701], [417], [958], [535], [293], [], [753], [352], [609], [355], [553], [976], [292], [], [910], [509], [716, 637], [468], [858], [85], [511], [18], [692], [351], [382], [844], [939], [816], [], [704], [678], [342], [425], [194], [386], [153], [118], [799], [600], [452], [287], [630], [309], [613], [87], [647], [721], [578, 982, 703], [755], [475], [721], [19], [548], [869], [959], [57], [886], [453], [411], [302, 305], [923], [696, 463], [123], [109], [982], [818], [611], [152], [406], [745], [592], [950, 951], [442, 494], [593], [297, 295], [671], [42, 44], [994], [538], [556], [584], [92], [269], [938], [278], [64], [670], [364], [0], [], [844], [958], [813, 910, 954], [749], [881], [725], [743], [171, 172], [168], [372], [931, 790, 415], [908, 404], [251], [], [369], [58], [436, 479], [762, 532], [], [951], [30, 31], [715], [894], [867], [716], [], [440, 412], [513, 875, 822], [], [970, 795], [347], [937, 567], [427], [595], [915], [344], [679], [572, 966], [234], [288], [338], [654], [221, 206], [37], [986], [883], [312], [663], [387], [435], [294], [577], [], [649], [769], [837], [308], [570], [913], [779], [753], [955], [277], [363], [], [547, 820], [608, 597, 763], [850], [], [62], [287], [413], [], [155], [80], [908, 895], [407], [489, 781], [], [53], [435, 876], [460], [731], [558], [], [601], [186], [502], [140, 142], [535], [514], [489], [542], [87], [], [37], [319], [655], [339], [894], [579, 432, 819], [582], [173], [360, 337, 357], [340], [939, 943], [568], [932, 868], [865], [87], [916], [41], [387], [981], [818, 884], [849], [116], [352], [292], [147], [72], [536], [515, 764], [614, 966, 532, 762, 923, 572], [892], [715], [], [424], [327], [670], [673, 664, 526, 527, 508], [39, 46], [732], [383], [], [550], [320], [62], [], [617], [], [186], [963], [660], [96], [446], [393, 108], [3], [512], [709], [294], [], [295], [760], [561], [650, 479, 608, 609, 610], [839], [704], [117], [971], [188], [162], [30], [515], [547, 820], [439], [112], [521, 926], [797], [738], [129], [748], [], [821], [438], [], [939, 940], [355], [824], [629], [], [147], [472], [376], [782], [884], [639], [424], [981], [69], [701], [608, 824], [130], [30], [737, 920, 762], [526, 786], [666], [571], [132], [709, 696], [430], [758], [261], [428], [], [], [550], [], [875, 819], [644], [222], [221], [490], [101], [457, 617, 712, 633], [616], [311], [178], [430], [495], [995], [492], [], [512], [996], [537], [771], [894], [], [860], [709], [187], [264], [225], [483], [478], [933], [218], [915], [190], [754], [980], [], [405], [68], [557], [650], [496], [795], [779], [511], [138], [344], [748], [157], [], [184], [769, 418, 767], [240, 241, 238], [147], [893], [360], [391], [298], [806, 655], [156], [573], [], [410, 309, 599], [929], [240, 238], [619, 846], [617, 823], [625], [108, 991], [718], [626], [219], [691, 570, 958], [867], [512, 473], [638, 639], [439], [99], [926], [242, 243], [112], [397], [708, 682, 458, 439], [962, 659, 923], [719], [542], [853], [802], [107], [725], [132], [404], [420], [44], [373], [825], [583], [61], [475], [793], [920], [82], [67], [722], [168, 159], [298], [502], [861], [815], [311], [599], [111], [893], [908, 895], [371], [332], [557], [192], [346], [87], [25], [737], [534], [], [167], [937], [607], [156], [663], [169], [], [144], [899], [974], [684], [24], [575], [], [682], [286], [], [49, 50], [420], [635], [], [435], [806, 630], [16], [118], [352], [42], [14], [673, 674], [548], [755], [16], [145], [673, 742, 526, 527, 782, 664, 508], [979], [615], [404], [], [867], [259], [906], [800, 903, 552], [806], [15], [969], [807], [153], [625, 724], [852], [624], [12], [717], [261], [445], [203], [872, 759], [], [228], [711], [948], [825], [], [796], [861], [518, 842], [278, 280], [466], [327, 123], [363], [548, 851, 632], [588], [756], [579], [263], [577], [52], [722], [715], [554], [45], [110], [546, 714, 402], [922], [902], [608], [673, 968, 526, 504, 508], [], [255], [173], [986], [382], [568], [496], [87], [293], [468, 919], [608], [416], [372], [979], [376], [121], [815], [451], [768], [32, 30], [265], [715, 744], [114], [405], [652, 413], [704], [427], [229], [977, 775], [853], [809, 618, 659, 925], [750, 917, 697, 921], [171], [654], [951], [480], [], [973], [894], [354], [52], [341], [738], [793], [241], [96], [742], [677], [849], [396], [996], [572], [215], [295], [395], [679], [274], [245], [118], [816], [435, 631], [21], [892], [560], [], [144], [834, 906], [914], [533], [199], [576], [432], [71], [982], [186], [641], [165], [293], [391], [], [251], [902], [937, 939, 943, 950, 951, 954], [510], [290], [399, 728], [278], [587], [600], [397], [951], [248], [216], [625], [676], [], [840], [215], [900], [47], [167], [391], [698], [787], [302], [165], [604], [496], [290], [801], [715], [508], [516, 520], [39], [624, 453, 454], [903], [788], [373], [801, 329, 842], [679], [110], [430], [], [301], [289], [942], [705], [206], [810, 508], [985], [979], [246], [922], [820], [485, 754], [], [146], [269], [591, 434], [570], [], [49], [310], [455], [31], [658, 911], [198], [259], [943, 931, 933], [525], [438], [513], [691], [744, 657], [649, 487], [193], [535], [809, 909, 923, 926], [814], [635], [135], [953, 954], [465], [260, 232], [242], [685], [610, 836, 837], [516], [948], [373], [797], [], [61], [912], [897], [763], [], [191], [532], [931], [975], [162], [494], [644], [737], [629], [791], [801], [466], [532, 762], [716], [], [525], [339], [542], [521], [175], [339], [999, 159], [267], [326], [892], [880], [561], [131], [836, 837, 841, 610], [953], [218], [4], [581], [432], [470], [208], [4], [831], [668], [113], [107], [690], [579], [995], [106], [407], [425], [405], [538], [118], [368], [78], [434], [808, 642], [], [], [967], [331], [], [267], [234, 165], [199], [387], [444], [892], [883], [899], [41], [978], [104], [211], [51], [608, 630], [488], [648], [873], [199], [], [630], [127], [], [88], [363], [536], [888], [239], [802, 621], [483], [752], [532], [218], [564], [884], [655], [637], [38], [877], [877], [170], [611], [969, 659], [214], [320], [808], [692], [419], [591], [132], [167, 173], [434], [99, 100], [927], [95], [], [112], [449], [], [301], [74, 815, 309], [332], [508], [116], [20], [632, 851, 548], [81], [916], [15], [725], [194], [208], [77], [677], [355], [136], [779], [375], [298], [135], [212], [866], [410], [867], [190], [349], [507], [199], [140], [356], [222], [614], [615], [391], [964], [792], [353, 343], [851], [37], [831], [390], [980], [693], [93], [986], [471], [419], [], [371], [353], [238], [744], [], [192, 185], [729], [103], [768], [264, 171], [589], [994], [673, 664, 526, 527, 632, 761, 508], [286], [305], [733], [], [483], [237], [67], [], [379], [33], [7], [476], [378], [588], [746], [726], [234], [664, 851], [428], [116], [914], [759], [], [983], [172], [27], [410, 309, 599], [148], [285], [234, 177], [], [792, 834, 630], [89], [158], [752, 852], [741], [836, 837], [158], [378], [152], [669], [369, 381], [601], [231], [648, 720], [690], [608], [210], [344], [733], [610], [699], [512], [481], [], [340], [569], [], [], [731], [316], [44], [239], [455], [261, 230], [765], [], [945], [808], [], [662], [206, 221], [661], [650], [247], [810, 878], [606], [886], [208], [44], [], [133], [248], [679], [188], [], [587, 784, 477], [638, 639], [908, 404], [389], [503], [428], [303], [9], [994], [995], [162, 167], [501], [688], [974], [693], [923, 982, 762], [445], [563], [402, 546], [997, 947], [406], [144], [476], [354], [], [307], [518, 671], [65], [459], [831], [707], [15], [159], [129], [79], [207], [716], [483], [198], [171], [896, 804], [392], [223], [197], [961, 659], [258], [672, 797], [834, 457, 527, 664, 508], [410], [205], [775, 699], [486], [510], [806, 911, 496], [183], [524], [893], [829], [376], [11], [317], [976, 977, 978], [272], [529], [161], [727], [904], [474], [314], [780, 724], [935], [354], [863], [987, 998], [95], [], [948, 949], [], [836, 638, 639], [571], [49], [342], [178], [], [195], [292], [801], [515, 808], [191], [879], [235], [574], [593], [66], [505], [225], [], [907, 966], [625], [180], [466], [639], [380], [426], [945], [37], [161, 162], [103], [751], [611], [936], [759], [701], [943], [629], [714], [389], [224], [815], [601], [819], [655], [301], [408], [740], [831], [282], [984], [], [389], [564], [25], [960], [474], [688], [957], [97], [312], [443], [846], [941], [262], [492], [985], [414, 608], [507], [578, 495, 601], [275], [205], [588], [193, 187], [89], [224], [890], [497], [583], [239], [990], [367], [], [543], [480], [989], [520], [484], [249], [593], [349], [344], [897, 799], [968, 504], [901], [829], [508], [821], [364], [165], [871], [480], [212], [], [499], [617], [400, 667], [222], [338], [413], [], [290], [], [897], [], [397], [286], [721, 831], [952], [112], [582], [558, 541, 542], [], [483], [449], [], [980], [332], [136], [56], [716], [690, 345], [835], [768], [558, 432, 889], [141], [444], [270], [637], [749], [123], [572], [538, 727], [952], [600], [33], [419], [286], [186], [397], [797], [495], [997], [692, 623], [805], [2], [609], [793], [698], [991], [217], [259], [583], [273], [900], [500], [857], [461], [996], [7], [3, 147], [110], [752], [355], [757], [99], [646], [719], [378], [293], [773], [2, 3], [531], [896, 651, 827], [774, 608, 610], [288, 290], [716], [], [673, 526, 527, 782, 664, 508], [418], [803], [768], [348], [640], [365], [220], [402], [378], [], [948], [], [635], [291], [944], [730], [1], [308], [112], [165], [616], [254], [707], [532, 762, 572], [305], [209], [], [679], [733, 858], [], [965, 923], [800], [604], [104, 489], [441], [436], [465, 597, 734], [280], [164, 165], [480], [997], [402], [70], [767], [454], [171], [391], [282, 539], [474], [526, 527, 664, 508], [533], [595], [573], [511], [908], [176], [915], [197, 199], [530, 409], [], [810, 878], [783], [666], [538], [435], [850], [609], [71, 119], [], [], [671], [411], [535], [395], [231], [234], [249], [666], [888], [610], [997, 947], [314], [167], [557], [315], [473], [968, 504], [502], [3], [409, 892], [335], [859], [238], [581], [748], [450], [397], [737, 455, 440], [], [284], [727], [556, 827], [171], [480], [35], [384], [556], [940], [611], [447], [806], [463], [994], [594], [909, 567, 478], [999], [226], [35, 876], [73, 77], [127], [889], [69], [435], [237, 158], [466], [766], [308], [759], [994], [774, 655, 825], [698], [124], [538], [731], [484, 871], [30], [561], [441], [161], [832], [769], [898, 836, 837], [880, 518], [392], [51], [659, 923, 928, 945, 959], [280], [207], [429], [314], [566], [451], [547], [686], [972], [442], [473], [851, 633], [882], [235, 676], [157], [927], [972], [658, 824], [206], [960], [597], [], [620, 508], [460], [473], [718, 975, 437], [947], [615], [336], [815], [974], [707], [858], [849], [398], [780, 914], [363], [239], [908], [514, 788], [147], [25], [547], [697], [131], [600], [354], [165], [772], [572], [175], [399], [719], [338], [300], [655, 630], [968], [337, 943], [581, 479], [899], [815], [424], [330, 331], [48], [515, 420], [952], [288], [771], [341], [842], [562], [989], [], [730], [892], [324], [268], [974], [571], [550], [651, 412, 60, 868, 616], [770], [233], [758], [863], [618], [730], [842], [404], [264], [453], [272], [342], [294], [239], [114], [487], [824, 678], [608], [927], [969], [642], [], [542], [453], [880], [436], [355], [787], [128], [999, 700], [627], [581, 479, 817], [], [614], [873], [548], [543], [858], [465], [57], [29], [442, 858], [233], [988], [323], [255], [90], [630], [738], [170], [456], [7], [52], [868, 651, 659], [560], [685, 785], [], [383], [273], [339], [425], [609], [624], [968, 911, 849, 505], [74], [617], [966, 572], [317], [289], [610], [517, 600], [788], [989], [171], [11], [911, 658], [334], [187], [791], [458], [86], [], [333], [288], [949], [619, 846, 851], [641], [248], [733], [180], [667], [674], [639], [667], [230], [75], [479], [231], [747], [591], [157], [172], [410], [906], [677], [766], [], [420], [483], [], [26], [902], [113], [989], [270, 272], [597], [799], [86], [19], [456], [857], [396], [962, 923], [952], [500], [321], [526], [41], [679], [467], [334], [460], [573], [892], [607], [841], [470], [382], [918], [879], [133], [316], [581, 751, 468, 895, 479], [591], [545], [806, 459], [289], [784], [582], [130], [311], [214], [259], [932], [251], [358, 359], [], [470], [], [], [], [], [804, 503], [606], [32], [703], [612], [407], [305], [602], [681, 810, 620, 526, 508], [900], [], [339], [418], [433], [765], [], [618], [609], [932], [937], [535], [869], [981], [610], [122], [627], [], [118], [542], [175], [295], [692, 487], [56], [599], [793], [765], [23], [323], [551, 748, 629], [801, 570], [], [342], [69], [540], [259], [998], [], [797], [252], [568], [834], [], [96], [82], [486], [471], [320], [702], [921], [525], [], [690], [51], [113], [865], [919], [498], [], [325], [297], [606], [611], [496], [858], [136], [740, 756], [681, 620, 664, 526, 527, 632, 508], [548, 851], [652, 413], [886], [423], [857], [218, 156], [925], [], [353], [236], [216], [786], [488], [171, 172], [], [], [116], [666, 924], [649], [615], [686], [296], [242], [228], [668], [940], [891], [819], [279], [712], [459], [822], [777], [276], [702], [898], [884], [326], [472], [630], [932], [453], [130], [917], [], [555], [173], [973], [225], [931], [683, 594], [], [380], [192], [966], [138], [908], [53], [], [74], [144], [814], [516], [73], [845], [770, 608, 610], [298], [618], [104], [289], [850, 855], [484], [579], [57], [708, 887], [320], [929], [603], [109, 973], [5], [73], [668], [615, 652, 465, 413], [568], [649], [], [869], [105], [531], [135], [963], [366], [852], [468], [701], [740, 519], [985], [332], [524], [346], [336], [178], [2], [506], [300], [83], [251], [435, 151, 156], [853], [196], [434], [405], [911], [789], [251], [660, 557], [143], [306], [428], [], [619], [978, 638, 639], [156], [622], [387], [928, 960], [908], [508], [850], [436], [822], [298], [952], [408], [], [47], [573], [79], [168, 159], [633], [], [297, 295], [], [22], [], [], [512], [308], [433, 638, 639], [177], [32], [], [51], [105], [], [908], [189], [453, 454, 624], [816], [626], [975], [170], [825], [801, 838, 570], [749], [480], [510], [270], [476], [941], [900], [972, 437], [474], [170], [703], [330], [617, 823], [648], [910, 567], [953], [306], [104], [548, 453, 553, 851], [458], [309, 599], [273, 274], [341], [727], [149], [956], [477, 868, 623], [495], [792], [899], [674], [676], [677], [7], [], [72, 74], [90], [860], [677], [779], [750, 211], [868], [78], [189], [527], [253], [291], [385], [434], [687], [146, 147], [41], [548], [110], [757], [221], [692], [812, 908, 404], [834, 806, 630], [257, 222], [611], [831], [983], [281], [354], [650, 526], [355], [281], [33], [652, 465, 570, 413], [515], [385], [547], [614], [], [144], [169], [845], [915], [244], [], [40, 46], [662], [184], [958], [355], [304], [212], [63], [722], [819], [308], [882], [533], [467, 341], [659, 923], [250], [852], [], [979], [212], [939], [999, 905, 700], [610, 678], [226], [14], [99], [30], [751, 479], [453], [318], [830], [971, 502], [777, 524, 461, 596], [978, 445], [646], [911], [744, 657, 812], [257], [898], [275], [131], [547], [], [626], [335], [981], [410], [266], [343], [783], [434], [140, 142], [], [445], [557, 468, 733], [592], [738], [364], [508], [877], [448], [377], [233], [376], [627], [], [973], [997, 947], [575], [], [], [277], [351], [746], [836, 837, 605], [788], [284], [996], [542], [487], [550], [508], [69], [886], [528], [83], [583], [841], [673, 681, 620, 526, 527, 664, 508], [197], [540], [774, 977], [902], [863], [], [], [205], [], [881], [729], [463], [968, 504, 505], [271, 274], [191], [864], [], [264], [901], [], [762], [843], [853], [822, 541, 542], [], [214], [69], [264], [706], [418], [56], [53], [383], [504], [869, 445, 638], [461], [213], [], [709], [879], [554], [93], [333], [308], [958], [738], [479], [50], [861], [615], [833], [], [987, 998], [805], [870], [], [700], [611], [], [279], [492, 630], [487], [293], [460, 975, 437, 733], [685], [], [410], [854], [196, 198], [594], [656], [677, 587], [450], [858], [561], [773], [407], [691], [32], [490], [343], [769], [276], [144], [621], [452, 911, 658], [453, 885], [169], [308], [296], [407], [595], [453, 454, 921], [816], [476], [933], [576], [563], [369], [615], [842, 977, 978], [290], [440], [347], [206, 221], [785], [20], [919, 920], [488, 679, 714], [967, 968, 504], [814], [317], [681, 810, 620], [41], [190], [791], [431], [315], [766], [294], [942], [563], [788], [784, 923], [207], [113], [722], [111], [756], [475], [573], [520], [170, 177], [], [847], [929], [200, 155], [227], [674], [734], [52], [537, 248], [], [296], [738], [515], [760], [709], [928, 868, 923, 927], [253], [26], [611], [835], [], [305], [642], [188], [482], [], [852], [167], [352], [652], [379], [464], [649], [531], [446], [677], [887], [744, 657, 733], [], [330], [953], [589], [4], [831], [808], [616], [324], [457, 834, 906], [851], [838], [733], [], [155, 204], [794], [29], [709], [249], [364], [421], [583], [1], [820], [151], [341], [521], [296], [], [94], [572], [683], [536], [591], [532, 760], [383], [858], [7], [801, 983], [38, 44], [312, 314], [383], [79], [651], [323], [642, 542], [161], [494, 7], [70, 123], [556], [315], [990], [610, 750, 564, 697], [443, 411], [161], [19], [741], [586], [660], [263], [265], [400], [111], [610, 836, 837], [990], [976, 978], [709], [279], [295], [555], [158], [768, 610], [554], [408], [261], [211], [664], [502], [394], [439], [], [12], [893], [880], [338], [349], [656, 791], [79, 988], [574], [925], [604], [653], [966], [71], [], [721, 750], [], [], [265], [243], [89], [354], [], [], [260], [812], [298], [617], [427, 509], [792], [511], [365], [450], [503], [852], [851], [404], [757], [655], [756], [546, 650, 819, 542], [161], [118], [406], [42], [65], [484], [672], [825], [53], [914], [937], [756], [941], [769, 777], [498], [241, 238], [311], [90], [162], [534], [952], [185], [647], [393, 973], [141], [590], [433], [862], [394], [309], [987], [274], [616], [884, 406], [68], [617, 823], [324], [981, 429], [949, 951], [72], [973], [797], [920], [127], [363], [659], [], [132], [550], [705, 547], [46, 47], [50], [81, 82], [514], [239], [484, 871], [890], [932], [219], [284], [673, 664, 526, 527, 508], [48], [802], [68], [], [777], [954], [425], [775], [696], [450], [834, 906], [846], [544], [599, 951], [15], [835], [136], [205], [929], [931, 587, 792], [328], [829], [919], [984], [976], [453, 409], [396], [547], [683], [565], [260], [116], [187], [423], [697], [671], [54], [], [544], [308], [938], [190], [887, 406], [910], [649], [893], [367], [564], [327], [672], [441], [], [839], [313], [584], [203], [304], [560], [364], [948], [929], [309], [799], [565], [19], [630], [445], [607], [125], [746, 622], [634], [49], [362], [854], [840], [538], [869, 636], [817, 511, 479], [491], [118], [231], [519, 478], [230], [177], [141], [185], [791, 582], [80, 136], [286], [441], [], [517], [284], [421, 539], [83], [985, 324], [], [395], [21], [650, 822], [44, 26], [705, 489], [701], [351], [183], [771], [757], [679], [739], [992, 947], [565], [147], [270], [982], [21], [892], [], [745], [449], [776], [287], [163, 168], [965], [904, 981], [694], [777, 531, 587, 487], [835], [460], [604], [480], [72], [367], [260], [771], [20], [742], [814], [815], [476], [572], [67], [213], [824], [168], [163], [556], [761], [23], [90], [745], [619, 846], [80], [241], [96, 904], [709, 767], [532, 953, 762, 923], [471], [759], [407], [429], [419, 741], [390], [581], [908, 895], [834], [245], [162], [424, 423], [40], [283], [215], [446], [435], [126], [785], [997], [29], [183], [139], [428], [453, 526], [483], [909], [119, 120], [981], [574], [513], [], [154], [1], [248, 249, 250], [835], [], [557, 762, 733, 670], [280], [576], [310], [265, 266], [687], [122], [801, 973, 983], [676], [840], [567], [], [909], [350], [389], [142], [185], [296], [994], [652], [341], [169], [366], [579], [863], [185], [185], [119], [485], [796], [459, 445], [431], [625, 724, 540], [1], [164], [305, 302], [419], [407], [881], [931], [609], [216], [791], [185, 189], [977, 978], [], [500], [916], [218], [407], [778, 526], [631], [242], [489, 695], [882], [488, 671], [728], [982], [360], [177], [983], [354], [324], [463], [734], [513], [479, 661], [659], [899, 647], [702], [280], [492], [68], [655], [565], [410], [182], [560], [668], [207], [367], [549], [772], [], [674], [586], [132], [868, 966, 923], [472], [550], [882], [674], [687], [911, 824], [480, 707], [534], [525], [410, 599], [596], [145], [10], [548], [521], [223], [648], [814], [480], [643], [618, 813, 910], [872, 652, 413], [532], [401], [194], [518, 465, 597, 413], [849], [513], [10], [659], [34], [512], [96], [56], [513, 776, 875, 541], [520], [770, 788, 630, 502], [624], [84], [30], [330], [732], [466], [89], [866, 958], [116], [968, 504], [568, 765], [], [154], [449], [631], [996], [162], [884, 406], [642], [129], [970, 349], [814], [378], [560], [324], [510], [641], [581, 479], [351], [31], [556], [443], [537], [616], [898], [353], [79], [571], [902, 488], [964], [955], [], [418, 563], [945], [112], [], [730], [220], [384], [158], [610], [210], [966, 907, 572], [878], [125], [362], [119, 39], [722], [466], [286], [815], [150], [93], [898, 455], [368], [542], [363], [425], [703], [721], [583], [311], [232, 249], [866, 595], [], [243], [415], [73, 815], [902], [913], [33], [772, 488], [806], [368], [499], [54], [183], [480, 478], [864], [275], [], [593], [293], [666, 924], [850], [614, 696], [819, 854], [456], [495], [546], [560], [22], [217], [28], [616], [993], [974], [925], [218], [], [28], [69], [605], [832], [612], [512], [], [999], [62], [447], [994], [276], [489, 236], [812], [643], [921], [408], [], [], [292], [278], [286], [913], [957], [992, 528], [], [871], [249], [236], [417], [874], [38], [21], [505], [200, 204, 155], [115], [798], [230, 231], [895], [144], [288, 290], [455], [288], [488, 679], [102], [40], [587], [387], [315], [324], [375], [592], [64], [911, 658], [526], [218], [], [978, 638, 639], [539], [], [680, 697], [16], [317], [772], [675], [873], [86], [592], [47], [], [124], [619], [605], [942], [954, 950], [423, 424], [666], [475], [645], [863], [22], [442, 663, 858], [689, 601], [524], [321], [49], [528], [905], [742], [614, 697], [921], [533], [459], [894, 759], [521], [608], [104], [665], [915], [601], [135], [], [253], [356], [897, 851], [], [63], [791], [689], [24], [429], [136], [532], [373], [383], [80], [373], [874, 829], [638, 639], [748], [948, 950, 951], [177], [407], [379], [740], [349], [176], [353], [301], [626], [716], [236], [472], [310], [567], [661], [667], [650, 828], [467], [974], [51], [], [460, 974], [897], [153], [492], [386, 101], [221], [239], [556], [819, 854], [973], [251], [818, 920], [792], [409], [532, 831], [355], [708, 884], [205], [548, 526, 851, 532], [816], [470], [766], [881], [476], [579], [212], [910, 567], [950], [653], [282], [238, 240, 241], [62], [732], [668], [942], [999, 434, 861], [909], [55], [500], [217], [184], [969, 987], [240, 241], [914], [484], [32], [288], [290], [253], [63], [416], [999, 794], [261, 254], [336], [777], [312], [325], [], [245], [990], [231], [537], [774], [180], [582], [271, 277], [573], [455], [], [657], [50], [385], [], [15], [918], [118], [339], [816], [403], [549], [861], [820], [372], [230], [470], [670], [128], [569], [529], [317], [415], [], [553], [], [456], [], [986], [473], [730], [936], [237, 151], [388], [452], [120], [], [672], [260], [630], [685], [922], [931], [938], [], [103], [661], [94], [402], [577], [384], [613], [799], [768], [889], [748], [], [35], [680, 470], [704], [807], [], [], [499], [786], [28], [14], [468], [678], [396], [596], [83], [405], [574, 575], [551], [453], [957], [875], [666], [551], [305], [178], [926], [965], [235], [], [990], [967, 968], [464, 763, 597], [173], [654], [4], [819, 541, 542], [341], [660], [991], [145], [372], [58], [375], [119], [24], [388], [78], [959], [137], [434], [98], [676], [389], [209, 850], [84], [682], [707], [524, 461], [654, 656, 792], [236], [99], [365], [757], [954, 651], [210, 211], [256], [162], [895], [423], [216], [366], [201], [673, 742, 526, 527, 664, 508], [706], [211], [315], [426], [], [209, 805], [255], [654, 733], [], [866], [504], [645], [449, 976], [459, 655], [], [255], [681], [777], [321], [666], [401], [119], [801, 836, 842, 433, 638, 639], [548], [550], [261], [], [869, 652], [], [913], [596], [], [608, 610, 836, 837], [], [986], [845], [], [594], [608, 610, 903], [865], [54], [534], [297, 369], [391, 801, 983], [601], [746], [784], [996], [486], [673, 681, 810, 620, 508], [558], [620], [614], [82], [834, 650, 906], [609, 860], [903, 836, 837, 465, 501, 763], [534], [762], [300], [227], [483], [193], [441, 572], [343], [814], [8, 7], [650], [449, 975], [133], [819, 854], [863], [256], [65], [518], [683], [938], [449], [425], [921], [740], [186], [720], [681, 620], [393], [697, 589], [169], [886], [153], [712], [968, 504], [95], [205], [59], [673, 526, 527, 662, 664, 508], [137], [658], [5], [918], [719], [], [949, 923], [744, 657], [961], [862], [378], [694], [815], [505], [], [86], [268], [397], [375], [306], [742], [902], [778], [605], [252], [518], [196], [863], [581], [388], [232], [378], [947], [764, 413], [251], [475], [], [57], [50], [933], [321], [690], [329], [500], [854], [679], [393], [882], [595], [942], [144], [549], [976], [424, 423], [317], [], [825, 858, 958], [502], [740, 459], [309, 599], [632], [378], [311], [40, 44], [12], [647], [78], [260], [788], [464, 950, 954], [493], [644], [992], [160], [891], [399], [567], [836, 837], [604], [293], [836, 837], [223], [449], [289], [171], [742], [191, 189], [153], [467], [720], [353], [987], [907, 892], [643], [829], [924], [624, 453], [546], [374], [419], [980], [793], [640], [611], [350], [91], [588, 790], [488, 679], [867], [], [573], [809], [708], [378], [252], [130], [168, 211], [740], [824], [816], [382], [329], [987, 998], [42], [536, 517, 510], [149], [288, 290], [337], [334], [901], [521], [], [667], [518], [64], [100], [823], [], [310], [617], [197], [693], [548, 664, 526, 851], [], [547], [], [41, 44], [707, 528], [306], [262], [922], [], [32, 30], [331], [951], [428], [618, 659, 926], [479], [64, 55], [385], [448], [680], [882], [536], [832], [346], [82], [380], [981, 429], [791], [940], [920], [181], [258], [806, 630], [477], [721], [329], [509], [195], [455], [544], [], [222], [929], [516], [383], [43], [814], [472, 693], [652], [13], [528], [419], [300], [207], [417], [140], [581], [70], [746], [61], [579], [703], [88], [680], [778], [159], [330], [178], [809, 659, 923], [621], [265, 266], [], [710], [], [487, 681, 620, 281], [994], [144], [313], [382], [63], [], [524, 461], [38], [400, 667], [336], [943, 923], [869], [303], [486], [265], [479], [838], [967], [929], [579], [578, 689, 562, 601], [186], [878], [395], [801, 983], [352, 351], [541], [283], [235], [111], [842, 978], [898], [389], [144], [711], [65], [386], [947, 997], [382], [707, 484, 914], [468], [581, 734, 479], [643], [767], [546], [756], [607], [336], [755], [630], [619], [985], [578], [546, 650, 819], [277, 278], [929], [613], [592], [820], [313], [250], [604], [740], [319], [391], [366], [327], [45], [248], [560], [507], [908, 404], [859], [605], [55], [410], [522], [92], [195], [314], [], [909, 469], [902], [812], [259, 526], [726], [513], [962], [976, 150], [986], [349], [273], [], [965], [923], [683], [673, 681, 620, 526, 527, 664, 508], [269], [700], [468], [28], [], [679, 488], [601], [383], [347], [416], [762], [763], [518, 616], [44], [970], [116], [], [835], [808], [614], [6], [], [353], [351], [406], [382], [881], [643], [232, 267], [717], [863], [757], [], [326], [101, 386], [308], [548, 851, 598, 632], [346], [923, 959], [271], [771], [864], [561], [563], [682, 698], [487], [125], [543], [432], [543], [110], [968, 849], [890], [399], [524, 461], [381], [], [973], [165], [393], [648], [758], [271], [530], [804, 631], [], [542], [222], [922], [785], [109], [495, 532, 729], [599], [239], [304], [138], [], [266, 570], [137], [617], [], [949], [299], [579, 881], [327, 328, 112], [99], [503], [954], [780], [806], [683, 819], [310], [813], [962], [107], [488, 695], [990], [621], [770], [21], [66, 68], [361], [132], [83], [888], [912], [834, 755], [304], [400], [864], [296], [649], [83], [568, 869], [418], [532], [296], [393], [527, 664], [187], [564], [926], [394], [154], [453, 454], [730], [278], [879], [919], [], [354], [202], [1], [28], [802], [264], [553, 526], [989], [581], [984], [810, 508], [195], [163], [65, 973], [383], [315], [512], [293], [824], [629], [223], [673, 742, 681, 620, 526, 527, 664, 508], [420], [139], [46], [330], [325], [910], [832], [782, 851], [129], [237], [301], [500], [854], [180], [774], [955], [507], [898, 711], [486], [935], [524, 461], [226], [405], [554], [435, 876], [901], [532, 762, 923, 572], [800], [300], [82], [973], [18], [893], [584], [913], [902], [156], [296], [247], [798], [653], [755], [893], [405], [534, 729], [796], [611], [457], [327], [56], [700], [680], [889], [806], [322], [825], [412], [36], [33], [673, 619, 526, 527, 782, 846, 664, 508], [407], [994], [564], [907], [847], [406], [225], [324], [806], [393], [122], [501, 568], [640], [155], [708], [331], [312], [309, 599], [41], [174], [604], [707], [286], [972, 976], [760], [946], [930], [849], [558], [589], [594], [60], [817, 479], [342], [580], [651], [56], [175], [733], [665], [7], [445], [444], [385], [376], [721, 636, 831], [690, 471], [258], [843], [725], [575, 479], [172], [954], [577], [70], [373], [409], [198], [774, 655, 703], [770, 543], [512], [619, 846], [610], [783], [907, 883, 532], [245], [857], [30], [508], [545], [269], [967, 504], [232], [223], [640], [], [192, 185, 186], [275], [45], [463], [392], [209], [337], [947], [], [100], [221], [685], [458], [771], [914], [14], [878], [325], [737], [281], [308, 79], [380], [585], [601], [281], [896, 435, 794], [761], [23], [666], [642], [155], [375], [681, 810, 620, 508], [525], [82], [995], [973], [415], [155], [912], [], [809], [895], [781], [147], [16], [860], [830], [239], [82], [297], [297], [29], [916], [82], [487], [808], [739], [921], [495, 532, 725], [496, 765], [164], [514], [357], [174], [559], [820], [477], [661], [436], [834, 630], [448], [248], [27], [540], [523, 414], [175, 189], [1], [985], [128], [646], [235], [722], [553], [661], [801], [940], [90], [], [586], [356], [341], [981], [579, 401, 881], [318], [72, 815], [71], [661], [756], [310], [293], [354], [438], [181], [23], [828], [989], [578, 982], [467], [289], [595], [569], [788], [370], [44], [241], [660, 733], [741], [], [800], [669], [850], [973, 983], [673, 681, 620, 526, 664], [348], [448], [592], [890], [81], [845, 720, 692], [341], [507], [954], [367], [364], [158], [697], [35], [520, 516], [727], [243], [489, 270], [546, 650, 819, 542], [566], [75], [615], [140], [706, 765], [236], [472], [83], [987, 998], [533], [], [319], [658], [832], [111], [605], [543], [973, 801, 983], [124], [365], [320], [616], [526, 527, 782, 664, 673, 508], [], [736], [447], [621], [830], [979], [145], [420], [653], [357], [355], [], [109], [], [298], [612], [642], [], [15], [479, 511], [42], [7], [897], [794], [705, 547], [571], [428], [233], [916, 664], [681, 620, 508], [46], [522], [229], [124], [609], [924], [399, 501], [156], [926], [258], [688], [808], [411], [751, 479], [510], [651], [302], [851], [], [864], [787], [103], [652], [], [], [40], [280], [606], [836, 837, 869], [868, 987, 809, 923], [975], [850], [551, 629], [729], [975, 703], [155, 204], [887], [981, 781], [703], [920, 717], [920, 414], [872, 818, 759], [816], [673], [669], [339], [636], [498], [392], [545], [592], [34], [38], [481, 453, 485, 632], [966], [429], [], [388], [], [836, 837, 775], [617], [464], [255], [375], [115], [195, 790], [479, 656], [213], [603], [711], [293], [822, 871], [35], [273], [875], [314], [], [86], [144], [856], [548, 851, 598, 632, 281, 285], [908, 718, 888], [659], [572, 966], [213], [849], [905], [215], [805], [872], [496], [766], [713], [36], [304], [821], [724], [182], [88], [652], [846], [150], [375], [71], [311], [725], [189, 191], [97], [542], [650, 818, 819, 822], [], [689, 501], [909, 926], [400, 667], [214], [103], [132], [191, 189], [950, 951], [259], [489], [577], [769], [617, 731, 823], [113], [927], [456], [103], [528], [], [], [203], [673, 508], [222], [822], [59], [270], [300], [111], [455, 907, 440], [766], [298], [835], [711], [670], [264, 253], [762, 532], [222], [589], [901], [15], [63], [424], [714], [292], [232, 217], [807, 654], [130], [984], [919], [928, 949], [129], [757], [371], [394], [84], [738], [421], [980], [341, 719], [388], [454, 487, 728], [229], [856], [495], [275], [812], [], [636], [64], [], [152], [95], [649], [92], [92], [710], [], [439], [908], [616], [694], [890], [822, 542], [770, 478], [270], [], [], [688], [578, 601, 982], [772], [243], [180], [482], [804], [417], [134], [526], [838], [987, 998], [486], [800], [33], [48], [904], [567, 827], [645], [236], [223], [910], [798], [842], [697], [904], [784], [56], [442], [949], [273], [67, 68], [948], [], [56], [152], [511, 479], [135], [], [548, 851], [228], [701], [451], [322], [209, 805], [520], [218], [173, 180], [437], [56], [14], [775], [871], [75], [], [920], [499, 955], [997], [874], [958], [], [613, 526, 527, 664], [858], [182, 185], [401], [238, 239], [156], [49, 50], [591], [730], [33], [682], [115, 327], [469], [66, 68], [174], [], [404], [134], [636], [284], [995], [], [571], [946], [642], [517], [457], [258], [601], [50], [903], [590], [175], [649], [551], [654], [3], [], [], [642], [769], [690], [753], [981], [440, 441, 572], [581], [877], [43], [593], [372], [197], [727], [347], [275], [887], [211, 210], [834, 906, 762, 638], [344], [773], [719], [], [856], [252], [439, 461, 465], [849, 505], [715, 251], [789], [659, 760], [967], [505], [], [511], [34, 977, 978], [765], [233], [132], [539], [175], [736], [893], [119], [22], [447, 600], [], [829], [459, 445], [555, 652], [620, 681, 470], [772, 679], [463], [208, 282], [785], [121], [195, 151], [466], [939], [828], [643], [664, 527, 761], [123], [674], [715, 764], [77, 319], [85], [834], [677], [801, 842, 433, 983], [951], [537], [722], [104], [713], [954], [267], [154], [678], [443], [247], [945], [814], [495], [], [701], [590], [946], [996], [], [59], [137], [230, 231], [], [659], [923], [591], [713], [795], [328], [], [656, 479], [735], [201], [485], [923], [298], [31], [624, 453, 454], [759], [994], [234], [431], [922], [562, 663, 442], [528], [149], [208], [115], [699], [660], [452, 793], [634], [118], [672], [], [779], [999], [190], [192], [858], [171], [], [], [143], [146], [518], [877], [87], [263], [104], [742, 553], [356], [346], [767], [295, 297], [485, 664, 526, 527, 851, 632], [499], [576], [529], [774, 655, 836, 837, 636], [618], [156], [133], [793], [796], [24], [], [41], [251], [196], [696], [884, 406], [], [309, 599], [579], [892], [124], [460, 718, 468, 733], [515], [618], [997], [260], [280], [12], [714], [904], [879], [41], [149, 150], [], [], [145], [685], [311], [800], [817, 751, 479], [369], [723], [896], [734], [356], [88], [692], [633], [957], [393], [862], [905], [578, 585], [172], [566], [126], [515], [323], [113], [230], [814], [688], [250], [191], [70], [318], [429], [84], [584], [81], [511, 479], [29], [308], [147], [682], [669], [243], [934, 567], [904], [858], [331], [750, 726], [906], [440], [939], [164], [91], [28], [154], [740, 477], [882], [366], [574], [], [699], [661], [662], [521], [472, 693], [155], [896], [774, 655], [931], [461], [327], [915], [89], [734], [246, 159], [488], [358], [546, 921], [49], [788], [], [446], [59], [919], [], [63], [505], [478], [], [832], [178], [64], [528], [363], [674], [427, 756], [752, 852], [456], [356], [], [822], [581, 479], [690], [], [864], [997], [849], [35], [33], [233], [915], [470], [987], [697], [195, 245], [719], [857], [248, 249, 250], [896, 859, 495, 827], [281], [668], [808], [198], [177, 170, 676], [104], [598], [477], [735], [135], [415], [361], [5], [86], [], [156], [600, 769], [777, 623], [], [385], [992], [314], [986], [964], [684], [642, 542], [710], [349], [267], [194], [400], [659], [565], [945, 943], [], [279], [12], [860], [466], [769, 455], [274], [967], [74], [281], [576], [915], [821], [619, 846], [52], [602], [911], [455], [993], [], [386], [218, 215], [841], [971], [267], [378], [166], [977, 978], [], [382], [474], [717], [946, 309], [], [385], [252], [977, 978], [909], [29], [137], [554], [564], [670], [67], [137], [612], [], [183], [25], [751], [46], [146], [], [803], [854], [75], [], [611], [498], [329], [276], [309], [723], [255], [334], [568], [263], [131], [456], [63], [73, 74, 815], [802], [557, 442], [585, 838], [605], [834, 906], [996], [776], [564], [349], [393], [832], [], [395], [673, 681, 526, 527, 664, 508], [20], [711, 563], [256, 676], [404], [235, 658], [196, 198], [907, 572, 966], [322], [982], [349], [175], [109], [896], [761], [514, 788], [400, 667], [879], [434], [], [859, 651, 760, 827], [], [695], [585], [851], [245], [], [430], [479], [663], [723], [648], [257, 249, 222], [276], [178], [774], [690], [937], [923], [802], [191], [220], [306], [508], [54], [237], [785], [452], [349], [914], [], [], [581, 479], [136], [214], [951], [178], [943], [455], [24], [105], [497], [298], [333], [596, 763, 764], [], [918], [227], [576], [557], [331], [905], [458], [489], [709], [490], [547], [611], [306], [905], [967, 968], [79], [944], [323], [199], [819, 566], [734], [633], [680], [942], [410], [342], [950], [827], [503], [339], [624, 453], [236], [134], [150], [879], [670, 628], [367, 379], [4], [801], [246], [], [103], [638, 639], [28], [150], [923], [855], [323], [52], [289], [895], [950], [], [2, 148], [473], [750, 282], [789], [630], [459], [717], [83], [55], [83], [842, 457], [168, 159], [738], [741], [724], [243], [655, 445, 638, 639], [701], [64], [786], [532, 762], [323], [504], [265], [360], [600, 825], [403], [945], [89], [361], [673, 508], [170], [706], [570], [481], [13], [488, 843], [898], [22], [340], [714], [444], [696], [219], [416], [742], [718, 536], [960], [65], [939, 943], [770, 806], [570], [968, 504, 415], [], [91], [821], [39], [503], [616], [834, 522], [844], [507], [430], [577], [93], [168], [640, 868], [649, 825], [45], [207], [641], [473], [12], [325], [192], [712, 790], [467], [526, 786], [], [881], [958], [164, 166, 167], [], [814, 970, 484], [685], [181], [830], [610, 838], [451], [181], [180], [756], [103], [320], [683], [2, 3], [553], [14], [306], [831, 968, 846, 619, 504], [394], [703], [926], [756], [379], [458], [336], [853], [267], [141], [679], [25], [273], [165], [69], [809], [113], [114], [148], [], [340], [137], [291, 340], [149], [9], [574], [781], [834, 400, 667], [196], [], [954], [436], [695], [945], [767], [350], [140], [698], [292], [810, 878], [315], [243], [774], [780], [], [232], [723], [993], [351], [6, 842], [658], [838, 434], [], [40], [77], [492], [48, 49], [23], [], [512], [161], [121], [351], [], [748], [32], [184, 189], [812], [315], [274], [106], [684], [367], [577], [121], [628], [444], [281, 282], [263, 230], [724], [213], [678], [388], [573, 518], [679], [682], [424], [930, 963, 868, 923, 813, 415], [473], [886], [421], [300], [908], [174], [529], [172], [614], [726], [605], [225], [453], [497, 406], [936, 909, 926], [791], [243], [66], [337], [684], [767], [299], [840], [74], [834, 630], [188], [971], [382], [669], [276], [30], [787], [166], [989], [347], [661, 479], [470], [625], [908, 404], [358], [519, 907], [147], [205, 197], [528], [480], [734], [549], [421], [588], [170], [229], [885], [826], [570], [596], [539], [908], [425], [934], [700], [82], [118], [184], [43], [87], [487], [80], [426], [221], [89], [529], [475], [601], [341, 342], [142], [553], [788, 502], [428], [], [162], [490], [419, 720], [339], [40, 46], [215], [44], [965], [299], [721], [888], [730], [530], [459], [96], [481, 482], [63], [855], [342], [562], [489], [857], [589], [590], [316], [763], [335], [412], [72], [550], [425, 912], [943], [575], [96], [332], [536], [285], [], [917], [563], [921], [385], [754], [132], [183], [834], [327], [962], [230], [393], [189], [339], [918], [977], [272], [914, 536], [98], [510], [273], [116], [510], [521], [707], [341], [488, 843], [886], [555], [200, 244], [76], [365], [609], [], [898], [564], [79], [994], [993], [78], [207], [522], [249], [14], [998], [8], [10], [], [2, 3], [], [298], [710], [501], [973, 991], [331], [41, 26], [690], [], [644], [128], [660], [602], [524, 461], [186], [685], [428], [384], [754], [386], [72], [911, 824], [], [488], [397], [155], [980], [208], [], [], [436], [594], [79], [491], [956], [777], [395], [788], [777], [112], [751], [838, 692], [200], [688], [26], [350], [582, 412], [905], [143], [685], [790], [803], [424], [970, 979], [845], [438], [87], [836, 837], [130], [466], [604], [726], [558], [468], [820], [836, 837, 617], [745], [294], [949, 923], [185], [366], [184], [265], [625], [987], [359], [108], [], [443], [323], [610, 899], [911], [245], [952], [180, 435], [732], [], [378], [815], [213], [806], [429, 981], [709], [744, 657], [556], [238], [548, 851], [179], [], [254], [72], [814], [263], [523, 728], [977, 978, 853], [547], [120], [7], [127], [154], [223], [39], [656], [192, 852], [192], [0], [112], [209], [538], [54], [78], [916], [362], [688], [561], [256], [], [468, 718, 839], [58], [14], [693], [842], [530], [209, 210], [860], [869], [737, 455], [537], [228], [118], [907, 818], [689], [792], [704], [688], [385], [736], [897], [823], [895], [986], [375], [200], [336], [280], [609], [596], [119], [93], [228], [119], [836, 837], [692, 943], [9], [427], [614], [558], [205], [610, 796], [941], [787, 524, 461], [250], [781], [145], [664], [884], [251], [770], [], [], [], [482], [950], [477], [437], [], [], [775], [], [481], [206], [966], [298], [86], [750], [696], [967, 968], [743], [376], [531], [818, 862], [562], [951, 503, 572], [559, 799], [842], [644], [301], [774, 414, 842, 464, 978], [349], [252], [34], [348], [361], [57], [154], [], [934, 415], [261], [], [879, 344], [478], [], [207], [698], [970, 979], [565], [900], [632], [453], [358, 359], [481], [], [46], [393], [43], [363], [359], [921], [678], [515, 880], [746], [721], [37], [670], [859], [311], [46], [426], [73], [44, 26], [611], [136], [281], [172], [489, 10], [622], [69], [895], [231], [35], [422], [687, 406], [616], [47], [353], [99], [581], [760], [514, 774, 523, 655], [636], [110], [918], [547], [206], [511], [306], [919], [650], [838], [606], [555], [], [210, 164], [], [834], [157], [480], [259], [852], [769], [559], [216], [], [244], [], [949], [246], [652], [593], [418], [344], [], [495], [730], [275], [151, 508, 158], [244], [437], [932], [40, 46], [836, 837, 841, 970], [279], [890], [522], [32], [49], [241, 238], [641], [646], [548, 851], [], [166], [10], [699], [613], [170], [795], [535], [369, 379], [136], [196], [387], [362], [600], [9], [281], [], [404], [177], [192], [874], [], [281], [640], [231], [128, 135], [540], [707], [636], [], [], [532], [94], [996], [310], [241, 238], [948], [259], [376], [356], [953], [89], [396], [513], [870], [915], [292], [163, 168], [297], [271, 272, 273], [906], [322], [261], [607], [], [104], [67], [985], [810, 878], [992], [215], [298], [315], [283], [463], [647, 828], [770], [728, 735], [213], [301], [314], [674], [779], [400, 667], [720], [564], [700], [733, 919, 920], [640], [964], [90], [574], [248], [772], [939, 942, 948], [], [13], [736], [377], [673, 681, 620, 664, 526, 527, 782, 508], [503], [31], [472], [754], [479, 817], [219], [772, 711], [384], [892], [404], [286], [115], [130], [988], [439], [1], [], [285], [711], [524, 461], [448], [483], [528], [172], [515], [951], [608, 487, 824, 502], [], [851, 892], [934, 923], [258], [515, 643], [], [673, 592], [], [881], [302, 303], [697], [945], [198], [880], [20], [758], [306], [283], [936, 923], [612], [743], [50], [502], [673, 810, 526, 527, 782, 664, 508], [518, 465, 597, 413], [285], [19], [518], [293], [185], [773], [503], [251], [908, 895], [537], [], [715, 524, 461, 883], [], [], [343], [722], [667], [286], [280], [], [225], [518], [236], [487], [989], [463, 758], [25], [353], [607], [801, 983], [758], [809], [539], [161], [313], [], [12], [209], [973], [17], [705, 547], [143], [948], [171], [685], [125], [836, 837, 650, 819], [], [75], [], [746], [953], [113], [843], [279], [928], [749], [761], [600], [738, 580], [165], [604], [161], [333], [66], [524], [745], [84], [674], [55], [353, 350], [142], [722], [747], [582, 617, 728], [801], [617, 845], [236], [68], [507], [4], [905, 750, 846], [419], [998], [141], [118], [476], [736], [357], [947], [929], [731, 762], [489], [106], [282], [928, 960, 966, 923, 572], [610, 731], [728], [888], [649], [869], [], [340], [832], [987, 121], [], [19], [604], [303], [4], [700], [541], [662], [168, 210], [136], [679], [607], [], [787], [18], [489], [623], [744, 657, 517], [394], [89], [462], [934], [345, 690], [910], [604], [449], [645], [645], [801], [84], [804], [834, 655, 975, 630], [168], [18], [690, 346], [161], [425], [775], [608, 584], [292], [260], [24], [158], [248, 249], [964], [77], [642], [95], [20], [805], [961], [884], [96], [551], [732], [424], [771], [965], [551], [500], [829], [181], [887], [866], [999, 218, 700], [831], [538], [676], [981], [102], [43], [312], [469], [702], [188, 189], [901], [611], [738], [510], [164], [819], [912], [142], [832], [48], [646], [811], [81], [965], [460], [866], [32], [511, 479], [577], [671, 444], [92, 95], [371], [404], [76], [971], [294], [694], [988], [530, 619, 846], [344], [417], [52], [364], [], [886], [90], [299], [489, 600], [861, 435, 285], [991], [151], [610, 903], [286], [948, 572], [557, 733], [690], [582, 879, 692, 954, 955], [684], [820], [425], [376], [410], [404], [759, 447], [377], [814], [795], [738], [181], [263], [50], [619], [256], [909], [252], [641], [801], [673, 526, 527, 664, 508], [173], [949], [], [800], [962, 923], [478], [123], [722], [135], [369], [28], [323], [132], [316], [69], [175], [656], [987], [85], [889, 486], [738], [340], [331], [], [196], [214], [144], [], [610], [932], [962, 923], [280], [252], [188, 190], [532], [676], [360], [], [300], [412], [589], [879, 775], [538, 727], [615], [574], [617], [435, 789], [654], [981, 429], [746], [6], [856], [187], [578], [177], [402], [489], [108], [642], [847], [288, 290], [], [173], [241], [249], [288], [395], [33], [247], [958], [923, 806, 936], [632], [258], [43], [881], [803], [455], [585], [], [586, 652], [291], [0], [991], [842, 977, 978], [], [356], [254], [], [779], [688], [668], [677], [217], [327], [976, 978], [254], [316], [497], [44], [373], [177], [647], [11], [363], [162], [54], [114], [75], [32, 28], [660], [61, 62], [929], [352], [561], [9], [491], [137], [696], [267], [98], [464], [210], [], [259], [892], [350], [995], [743], [426], [44], [264], [585], [744, 657], [691], [669], [357], [892], [944], [230, 231], [711], [], [322], [407], [798], [948], [54], [329], [52], [986], [], [745], [236], [873], [682], [772], [682], [705], [498], [846], [], [], [393], [672], [717, 479], [436], [743], [765], [253], [608], [425], [148], [334], [193, 187], [0], [988], [201], [258], [680], [783], [808], [805], [177], [94], [788], [858], [952], [701], [62], [787], [349], [600, 894], [200, 175], [425], [], [564], [351], [433], [169, 172], [611], [19], [110], [923, 925], [271], [695], [607], [972], [44], [674], [673, 526, 527, 664, 508], [222], [547], [809], [], [937], [423], [631], [966, 459, 445], [299], [449], [], [357], [], [343], [770], [105], [515], [809], [88], [515], [128], [630], [999, 191], [379], [750, 281], [746], [245], [102], [839, 975], [877], [884], [917, 453, 454], [391], [81], [330, 331], [], [168, 159], [], [479], [587], [509], [72], [574], [299], [922], [711], [337], [113], [182], [725], [988], [346], [452, 968, 504], [626, 893], [316], [638, 639], [880], [641], [922], [933, 923], [96, 489, 93], [587], [892], [300], [281, 283], [31], [248], [829], [192], [], [403], [767], [40, 489, 46], [521], [382], [84], [966, 907], [656, 627, 468], [810], [492, 750, 831, 414], [929], [486], [967, 968, 504], [281], [376], [504], [], [873], [418], [608], [553, 493], [443], [752, 852], [604], [110], [306], [850], [955], [953], [262], [531], [189], [279], [677], [90], [209], [742, 662], [211], [737], [64], [785], [681, 810, 620], [611, 954], [203], [845, 966], [714], [793], [491], [904], [474], [309], [731], [854], [211], [671, 535], [], [877], [768, 414], [822], [683], [462], [975], [529], [620], [497, 538], [263, 231], [681, 620], [283], [364], [535], [889], [145], [159], [746], [301, 310], [198], [240], [889], [416], [469], [839, 718, 978, 821], [23], [553], [796], [970, 671], [962, 937, 923, 959], [698], [499], [166], [932], [689, 601], [545], [96], [], [323], [925], [278], [95], [309], [500], [222], [896, 999, 861], [488], [324], [593], [], [463], [613], [63], [180], [685], [637], [38], [341], [542], [343], [988], [656], [130], [681, 620], [963], [648], [308], [939], [125], [301], [791], [569], [425], [309, 599], [529], [228], [431], [182], [178], [450], [153], [419], [145], [301], [459, 655, 638, 639], [788], [908, 895], [719], [221], [546, 650, 819], [826], [988], [91], [382], [689], [335], [720], [548], [159], [260], [223], [259], [972], [87], [555], [], [326], [874], [71], [679], [53], [367], [69], [703], [766], [556], [714], [512], [418], [376], [68], [834, 400], [924], [320], [908, 913, 404, 977, 978], [809], [843], [560], [835], [610, 836, 837], [768], [765], [879], [365], [678], [207], [373], [896], [820], [874], [490], [81], [708], [], [762, 868, 659, 532, 470, 923, 924], [787], [138], [192], [203], [731], [836, 837, 839, 460, 718], [928], [562], [791, 254], [20], [938], [287], [330], [628], [471], [174], [513], [684], [630], [360], [590], [200], [344], [934, 959, 923], [656], [330], [562], [963], [515, 652], [881], [186], [352], [37], [809, 925], [926], [], [825, 706], [538, 698], [531], [987, 998], [526, 782, 664], [159], [382], [230], [587], [970, 795], [706], [182], [], [349], [41], [72], [215], [433, 638, 639], [103], [616], [409], [207], [950], [423], [453], [564], [844], [911], [833], [496], [945], [106], [631], [912], [15], [812, 908, 404], [580], [957], [836, 837, 630], [872], [974], [956], [738], [775], [721], [218], [738], [], [772], [6], [632], [543], [], [206], [806], [816], [98], [122], [912], [221], [422], [186], [376], [923, 700], [358, 359], [795, 799], [36], [305], [774], [530], [137], [515], [447], [306], [299], [338], [695], [404], [988], [148], [342], [243], [771], [51], [325], [311], [199, 251], [420], [155, 157], [349], [281], [734], [649], [473], [892], [510], [194], [314], [391], [72], [622, 759], [578, 834, 457, 982], [662], [95], [861], [865], [], [681, 620, 526, 508], [748], [832], [435], [782, 664], [], [637], [251], [], [276], [137], [654], [159], [818], [230], [537], [880], [413], [68], [605], [927], [354, 680], [863], [9], [39], [428], [941], [658], [793], [571], [652, 625, 447], [139, 141], [153], [434, 823], [820], [995], [46], [502], [737], [319], [859], [248], [746], [829], [969], [649], [319], [884], [297], [450], [237], [439], [426], [989], [177], [993], [489], [795], [128], [154], [513, 566], [811], [775], [367], [], [547], [679], [375], [91], [730, 603], [411], [96], [912], [307], [62], [591, 659], [783], [3, 983], [505], [832], [727], [330], [581, 479, 436, 511], [646], [738, 532], [942], [248], [839], [926], [643, 876, 435], [117], [317], [681, 620, 526, 664, 508], [116], [660], [981], [74, 77], [482], [], [968, 923], [872, 622, 759, 414], [870], [673, 527, 782, 664, 508], [64, 59], [161], [738], [994], [13], [119], [365], [157], [198], [193], [793], [977, 978, 472], [526, 495, 786], [962, 923, 935], [125], [3], [235], [497], [786], [810, 333, 508], [578], [845], [291], [], [257], [805], [472], [], [375], [443], [], [507], [924, 965], [774], [93], [514], [10, 11, 14], [934], [947], [443], [437], [367], [837], [514], [126], [549], [373], [623], [851], [670], [636], [468], [149], [416], [863], [203], [448], [908, 404], [30], [548, 782, 851, 598, 632], [2], [776], [487, 590], [607], [751, 479], [927], [43], [761], [407], [602], [168], [258], [920, 919], [931], [958], [955], [75], [7], [141], [], [191], [273], [75], [460], [496], [398], [], [262], [667], [63], [645], [712], [776], [], [723], [988], [673, 527, 664, 508], [713], [355], [487, 531], [454], [978, 222], [559], [800], [687], [737], [222], [384], [940], [], [272], [543], [103], [51], [777, 787], [590], [465], [926], [452], [597], [610], [227], [981], [749], [751], [331], [222], [940, 942], [956], [608, 681, 620], [592], [346], [663], [205], [684], [178], [607], [44], [47], [954], [602], [411], [813], [133], [871], [43], [58], [140], [511], [576], [606], [368], [741], [368], [587], [15], [724], [], [876, 435], [898, 680], [72], [879], [776], [385, 907], [900], [909, 926], [445], [21], [], [725], [437], [6], [896], [990], [498], [], [116], [932], [369], [234], [881], [311], [], [491], [682], [267], [220], [734], [279], [148], [997], [], [385, 386], [844], [801], [129], [709], [822], [495], [987, 998], [675], [852], [], [], [], [218], [470], [], [584], [315], [515, 819], [136], [780, 977, 914, 978], [636], [941], [941], [65], [657, 475], [152], [900], [799], [956], [957], [525], [45], [903, 689, 501, 887], [547], [853], [726], [810, 878], [784], [632], [841, 794], [852], [337], [992], [353], [598], [797], [889], [121], [701], [321], [562], [943], [452], [129], [610], [466], [0], [98], [581, 717], [228], [4], [555], [844], [528], [3], [487], [898], [277], [393], [342], [929], [896], [943], [], [211], [898], [590], [], [11], [726], [866], [990], [873], [610], [893], [952], [407], [885], [327], [359], [], [], [165], [449], [174], [281], [804], [176], [975], [757], [530], [397], [875], [619], [516], [687], [627], [243], [220], [], [131], [205], [], [470], [253], [307], [593], [62], [987, 998], [861], [907, 440, 572], [594], [449], [], [897], [619, 846], [755], [82], [510], [754], [613], [], [635], [183], [277], [363], [928], [321], [728, 936], [307], [292], [20], [835], [488, 616], [956], [301], [255], [538], [355], [866, 853], [546, 650, 818, 819], [300, 302], [306], [393], [804], [925], [794], [868, 931, 968, 532, 504], [427], [410], [], [801, 983, 570], [363], [941], [385], [812], [876, 435, 794], [681, 620, 285], [142], [], [551], [581], [253], [749], [453], [102], [899, 505], [679], [83], [310], [255], [608, 515], [923, 572], [99], [509], [445, 638], [679], [479, 751], [200], [89], [338], [744, 586, 657, 408], [820], [849], [992], [33], [139], [733], [896, 861], [938], [138], [674, 333], [610], [496], [290], [640], [499], [853], [], [944], [53], [576], [270], [636], [79], [], [201], [884, 406, 857], [127], [568], [], [785], [], [987, 998], [806, 975, 445], [835, 733], [258], [789], [658], [182], [739], [], [986], [767], [326], [762, 572], [229], [112], [685], [373], [873], [], [333], [659], [133], [165], [675, 757], [855], [451], [692, 509], [655, 843], [8], [56], [332, 478], [979], [505], [473], [202, 189], [672], [660], [334], [460], [769, 77, 815, 798], [293], [], [995], [65], [934], [690], [568], [317], [340], [850], [399], [10], [29], [544], [], [746], [352], [221], [717], [396], [315], [875], [720], [557], [92], [17], [441, 572], [455], [303], [834, 906], [442], [65], [534], [684], [974], [96], [889], [679], [857], [856], [679], [831], [40], [569], [412], [125], [322], [], [352], [991], [401], [440], [259], [751], [441, 932], [391], [421], [162], [226], [228, 229], [281, 282], [708, 682], [516, 431], [786], [200], [550], [500], [803], [523], [970], [781], [397], [669], [673, 508], [143], [113], [271, 277], [889], [932], [472], [569], [645], [], [783], [673, 526, 527, 782, 664, 508], [131], [884], [204], [195], [570], [225], [904], [14], [184], [566], [7], [987, 998], [575], [693, 472], [28], [635], [155], [29], [842], [987], [34], [217], [407], [773, 455], [557], [994], [77], [271], [94], [650], [], [827], [449], [299], [75], [809, 942, 659], [821], [418, 709, 838, 767], [336], [757], [779], [786], [49, 50], [688], [817, 511, 479], [165], [67], [145], [407], [369], [216], [58], [695], [239], [622], [19], [740], [], [213], [576], [], [906, 834, 630], [812], [], [485], [456], [851], [10], [549], [773], [143], [28], [218], [840], [86], [], [], [195], [], [337], [254], [935], [561], [599], [651], [613], [11], [75], [862], [], [47], [506], [904], [740, 756], [917, 921], [920], [912], [77], [286], [], [126], [274], [24], [20], [904], [16], [0], [144], [248], [502], [687], [357], [336], [518, 671, 444], [11], [242], [274], [523, 721], [161], [711], [521, 618, 651, 813, 827], [388], [84], [62], [687], [374], [], [504], [216, 219], [158], [216], [672], [559, 818, 819], [], [962, 923], [72], [636], [863], [325], [421, 632], [], [162], [691], [975], [652], [113], [36], [899], [288], [328], [896], [579], [555], [486, 889], [719], [223], [19, 13], [781], [608], [314], [43], [943], [566], [994], [125], [388], [479, 817], [727], [318], [518], [574], [867], [540], [506], [882], [300], [613], [66], [865, 850], [973], [157], [727], [750, 591], [], [398], [198], [602], [259], [512], [905, 854], [36, 37], [420], [162], [564, 750], [382], [95], [244], [715], [596], [247], [409], [], [890], [581], [736], [360], [4], [154], [286], [598], [96], [739], [30], [765], [806, 630], [21], [334], [343], [402], [3], [149], [803], [872, 453], [177], [203], [410], [511], [997], [199], [281], [128], [246], [520], [405], [164], [866], [468], [95], [634, 858], [206, 221], [780, 914, 921], [276], [955], [420], [270], [881], [], [40, 46], [249], [772], [478], [857], [637], [675], [419], [426], [259], [353], [185], [178], [554], [602], [354], [241, 238], [639], [3], [761], [288], [755], [], [264], [19], [937, 938], [306], [416], [168], [880], [], [447], [191], [69], [705, 547], [704], [218], [552], [662], [940, 941, 942], [173, 251], [], [121], [178], [914], [971], [206], [610, 890], [719], [31], [159], [619, 846], [225], [610, 465], [113], [281], [], [113], [212], [], [612], [300], [702], [819], [674], [513, 776, 819], [335], [498], [870], [702], [63], [204, 153], [730], [635], [996], [803], [131], [803], [977], [111], [], [792], [357, 358], [681, 819, 620], [], [965], [307], [50], [408], [826], [92], [879], [910, 567, 926], [513], [867], [514, 515, 898, 808], [100], [570, 691, 652], [489], [418], [387], [866], [350], [870], [420], [166], [540], [345], [819, 818, 632], [417], [640], [662], [914], [650, 541, 558, 819], [68], [707, 637], [557, 919], [96], [902], [172], [902], [587], [447], [959], [507], [132], [789], [342], [66], [875, 566, 541], [764], [51], [390], [791], [416], [517], [896], [], [18], [985, 309], [515, 469], [39], [395], [809, 959], [833, 913], [947], [126], [850], [813], [723], [73], [544], [165], [187], [886], [], [37], [147], [912, 824, 447], [864], [842], [], [723], [72], [539], [633], [609], [220], [489], [418], [555], [430], [113], [439], [221], [727], [616], [272, 280], [], [428, 195], [863], [530], [], [251], [], [979], [579], [306], [619, 846], [939], [751], [676], [281], [974], [859], [547], [703], [769], [888], [61], [], [218], [496], [392], [88], [904], [856], [323], [281], [804], [491], [122], [408], [809, 925], [785], [164], [], [968, 721], [259], [284], [11], [], [491], [147], [449], [504], [952], [488, 695], [661], [242, 243, 805], [102], [139], [2], [], [785], [251], [174], [425, 858], [489], [836, 837], [958], [44], [348], [266, 219, 156], [193], [24], [167], [518, 444], [970, 976], [766], [862], [733], [], [951], [934], [450], [], [649], [150], [955], [94], [135], [], [522], [641], [459, 978, 445], [836, 837], [606], [980], [95], [46, 59], [], [], [386], [287], [518], [], [578, 903, 689], [102], [186], [685], [252], [736], [179], [322], [475], [866], [], [427], [278], [602], [582, 950, 790, 953, 954], [120], [372], [641], [910], [626], [448], [803], [983], [319], [3], [202], [658], [528], [956], [], [500], [722], [759], [770, 788], [90], [892], [], [350], [188], [920], [576], [760], [908], [215, 218], [621], [407], [208], [610, 841], [526, 882, 606], [964], [534], [344], [726], [81], [83], [266, 267], [2, 3], [855], [201, 589], [654, 475], [234, 795], [72], [947], [], [426, 635], [16], [681, 620], [379], [765], [736], [888, 821], [27], [152], [53], [540], [903], [85], [64], [95], [834], [786], [908], [243], [253, 273], [479, 436], [74], [652, 847], [417], [711], [583], [639], [], [], [923], [131], [316], [510], [193], [372], [140], [770, 788], [842, 433, 639], [625], [], [34], [22], [259], [744], [878], [472], [470], [216], [690], [179], [30], [288], [518, 491], [694], [522], [1], [320], [809, 659], [850], [95], [529], [204], [890], [93], [865], [868, 495, 572], [546, 650, 664, 527, 819], [946], [629], [815], [661, 479], [488], [311], [130], [781], [90], [93], [250], [239], [684], [137], [94], [707], [570], [572], [268], [673], [449], [198], [787], [618, 926], [965], [930, 934, 923], [333], [344], [128, 131], [464, 787], [], [462], [382], [176], [441, 572], [861], [81], [509, 582], [713], [120], [858], [621], [263, 236], [248, 249], [345], [762], [57], [12], [703], [150], [734], [881], [866], [416, 602], [267], [840], [400, 667], [62], [399], [], [17], [426], [81], [127], [445], [88], [981], [912], [109], [673, 526, 527, 664, 508], [220], [693], [740], [699], [], [182], [213], [201], [243], [], [376], [535], [275], [958], [605, 526, 784, 477], [240, 241, 238], [973], [459], [225], [564], [846], [275], [86], [363, 501], [640], [512], [564], [355], [968, 505], [738], [636], [630], [142], [10], [], [315], [387], [931], [992, 997, 947], [543], [258], [610], [668], [404], [], [50], [922], [923, 122], [574], [741], [456], [967, 968, 504], [543], [156], [770, 788, 916], [646], [35], [488, 600], [673, 904, 905, 526, 527, 664, 508], [796], [646], [393, 108], [226], [777, 524, 461, 787], [827], [920], [989], [30], [165], [361], [524, 461], [], [387], [432], [], [385, 101], [489, 368], [355], [705], [148], [549], [995], [], [123], [384], [916], [95], [652, 764], [396], [807], [], [992], [783], [299], [529], [958], [211], [961], [87], [232], [369], [664], [130], [], [444], [515], [894], [453, 831], [790], [660], [668], [919], [], [14], [327], [297, 295], [898], [102], [905, 794], [39], [217], [194], [869], [40], [475], [8], [927], [], [108], [588], [638, 639], [745], [232], [11], [], [875], [443], [245], [], [820], [], [577], [277], [494], [], [542, 822], [444, 637], [907], [], [423], [45], [105], [530], [352], [754], [675], [141], [476], [], [681, 620], [683], [388], [111], [497, 663], [171], [139], [530], [189], [125], [804], [994], [581, 479], [939, 943], [553, 493], [], [459], [872], [316], [], [289], [125], [131], [422], [617], [946], [336], [], [963], [539], [960], [812], [727], [128], [150], [127], [472], [936, 909, 926], [263, 253], [448], [923, 968, 849, 762, 828], [77], [416], [890], [311], [709, 767], [417], [479, 661], [216], [407], [138], [], [903], [805], [405], [989], [330], [16], [480], [519], [], [610, 589], [216], [], [810, 508], [216], [588], [938], [604], [341], [82], [651], [847], [], [67], [409, 892], [582, 936, 940], [333], [111], [], [432], [993], [178], [234], [750, 721], [341], [645], [449], [608, 744, 841], [975, 447], [349], [515, 665], [0], [511], [34], [638, 639], [911], [841], [741, 539], [299], [508], [62], [819], [981], [518, 665, 671], [955], [484, 914, 821], [782, 664, 281], [430], [905, 799], [], [131], [192], [48], [726], [92], [155], [362], [510], [607], [588], [238, 241], [187], [508], [862], [873], [911], [842], [809], [538], [866], [733], [977, 978], [], [499], [809, 923, 925], [403], [532], [901], [], [209], [35], [844], [232], [507], [299], [497], [111], [563], [680], [995], [403], [633], [340], [804], [], [517], [139], [936], [452], [17], [609], [247], [], [672], [560], [102], [356], [498, 919], [403], [], [143], [820], [324], [739], [479], [85], [330], [558], [], [433, 842, 639], [340], [67], [90], [318], [4], [532], [76], [544], [403], [764], [], [874], [537], [365], [45], [494], [95], [581, 661, 479], [145], [777, 623, 499], [429], [554], [8], [268], [140], [], [343], [787], [522], [398], [276], [864], [313], [974], [781], [217], [], [892], [364], [180], [44], [587, 784], [923], [676], [], [896], [586], [606], [770, 806, 608, 610], [804], [228], [336], [739], [432], [], [16], [73], [707], [916], [291], [279], [267, 265], [53], [825], [962], [807], [399, 501], [812], [995], [640], [139], [320], [245], [891], [540], [696, 477], [955], [738], [636], [528], [545], [316], [619, 846], [838, 551, 711, 629, 631], [], [53], [761], [491], [768], [701], [489], [468], [355], [24], [726], [812], [245], [55], [896], [332], [938], [614], [356], [56], [311], [317], [494], [150], [720], [139], [486], [118], [], [744, 657], [74], [794], [903], [], [23], [772, 679, 488], [104], [437], [602], [753], [456], [389], [908], [687], [22], [748], [682], [451], [894], [919], [308], [792], [161], [383], [681, 620, 526, 916], [915], [], [401], [439, 873], [235], [754], [662], [621], [821], [33], [847], [433], [585], [], [526, 673, 508], [482, 754], [552], [386, 101], [974], [825], [248, 249], [538, 698], [183], [46], [647, 845, 438], [240, 238], [874], [], [932], [763], [608], [17], [842, 459], [955], [758], [990], [38], [354], [853], [], [997], [212], [702], [745, 572], [], [696], [635], [449], [10], [91], [194], [873], [847], [250], [91], [989], [679], [784], [146], [255], [631, 838], [688], [13], [971], [157], [879], [], [165], [836, 837], [], [561], [458], [739], [], [869], [490], [806, 911, 502], [807], [], [], [40, 46], [560], [22], [568, 824, 869], [519, 907], [712], [144], [], [236], [858], [552], [146], [239], [256, 234], [], [957], [704], [791], [567, 926], [827], [377], [], [910], [160], [601, 578], [260], [542], [690], [146], [777], [651], [159], [371], [189], [64], [683], [814], [416], [717], [773, 659], [940], [465, 597, 630, 413], [468], [636], [145], [348], [398], [530], [869, 824], [880], [12], [933], [381], [146], [802], [127], [153], [968, 504], [814], [894], [637], [55], [359], [641], [635], [396], [537], [], [41, 44, 26], [937], [318], [12], [890], [266], [808, 836, 837], [624], [538], [575], [959], [10], [632], [72], [918, 721, 608, 750], [548], [740], [], [321], [661], [38], [991], [444], [573], [205], [619], [667], [807], [602], [757], [205], [], [67], [710], [145], [181], [64, 55], [619, 846, 721, 831], [100], [261], [28], [900], [552, 903], [772], [513, 776, 822, 541, 542], [897], [936], [140], [600], [], [329], [603], [642], [135], [658], [184], [416], [283], [950], [570], [655, 806], [794], [], [954], [921], [563], [], [554], [830], [277], [121], [839], [93], [711], [], [77], [818], [794, 861], [946], [208], [927], [211], [647], [693], [868], [267], [404], [979], [132], [120], [193], [653], [569], [489], [983], [770], [272], [752], [845], [448], [396], [742], [728], [], [321], [], [621], [291], [575], [243, 254], [820], [421, 693], [315], [589], [207], [274], [356], [730], [869], [619, 846, 721, 883, 831], [284], [311], [673, 526, 527, 664, 508], [424, 423], [886], [733], [724], [489, 444], [41], [324], [69], [376], [835], [323], [479], [6], [754], [452, 151], [204, 155], [], [320], [481], [337], [859], [324], [245], [619, 846], [865], [], [717], [459], [86], [118], [355], [], [525], [], [398], [570], [389], [422], [343], [74], [148], [211], [846], [126], [682], [923, 924], [], [293], [263], [699], [491], [], [42], [146], [408], [931], [655, 752, 852], [], [115], [657], [223], [881, 579, 889], [332], [962, 659], [558], [865], [295], [434], [572], [95], [108], [98], [846], [156], [337], [819], [750], [], [648], [], [195], [627], [180], [856], [975, 977, 472], [123], [289, 293], [109], [749], [177], [684], [584], [546, 650, 402, 818, 819], [], [472], [698, 538], [52], [587], [535], [375], [240, 241, 238], [922], [869], [673, 681, 526, 527, 782, 664, 508], [470], [847, 403], [714, 402], [608], [481, 482], [6], [418, 918], [], [90], [496], [903], [174], [], [281], [673, 810, 526, 527, 782, 664, 508], [640, 919, 841, 468, 728, 608], [358], [203], [421], [754, 632], [990], [686], [460], [844], [150], [258], [71], [446], [40, 44], [419], [865], [318], [722], [364], [585], [], [466], [914], [211], [858], [868], [230], [715], [339], [], [696], [482], [84], [909, 910, 926], [581, 479, 436], [], [], [], [226], [861], [882], [341], [792], [], [827], [360], [438], [318], [2], [229], [999, 435, 861], [275], [103], [672], [286], [98], [408], [942], [679], [35], [688], [79], [171], [232, 852], [22], [654], [436], [182], [950], [688], [816], [222], [773], [472], [296], [951], [517, 540], [911, 735], [383], [173], [41], [962], [467], [846], [664], [233], [905, 869], [82], [692], [475], [928, 960], [699], [741, 735], [378], [209], [569], [808], [589], [4], [166], [922], [952], [839], [770], [857], [174], [261], [406], [740, 783], [264], [41], [556], [448], [242], [680], [744, 657], [420], [824, 474, 911], [675], [50], [568, 248], [352, 353], [984, 425, 853], [777], [768], [265], [894], [], [619, 846, 470], [793], [12, 14], [967, 968, 504, 923], [823], [61], [419], [569], [656, 858], [431], [315], [508], [746], [453, 454, 624], [654], [74, 815], [444], [3, 4], [74], [199], [35], [232], [231], [524, 461], [111], [256, 218], [994], [], [810, 590], [964], [806, 870, 843, 850], [211], [519], [452], [637], [198], [946], [821], [508], [217], [873], [258, 279], [790], [672], [578], [614], [281], [594], [654], [465, 597], [51], [504], [106], [22], [821], [45], [516], [524, 461, 787], [694], [], [], [363], [767], [39], [7], [585], [647], [722], [510], [457], [174], [439], [919], [516], [215], [119], [233], [245], [871, 536], [929], [946], [71], [842, 445], [281], [123], [58], [497], [205], [], [438], [279], [710], [897], [912], [512], [689], [], [879, 614], [181], [388], [761], [509], [188], [537], [439], [112, 977, 978], [687], [975, 703], [], [773], [859], [14], [552], [190], [549], [500], [385], [524, 461], [802], [332], [49], [397], [913], [945], [176], [198], [26], [], [107], [], [868, 849, 504], [101], [847], [809, 924], [247], [736], [813], [385, 862], [142], [585], [4], [971], [730], [707], [445], [821], [795], [168], [780], [295], [581, 479, 436], [790], [361], [587, 792], [875], [675], [481], [104], [5], [941, 923], [454, 911, 474], [], [262], [456, 970, 445, 638], [508], [981, 429], [707], [475], [325], [851], [292], [412], [], [907, 440], [755], [495], [486], [941], [601, 578, 982], [206], [371], [896, 861], [686], [923], [672, 899, 469, 827], [420], [440, 441], [], [797], [596], [], [354], [944], [464, 676], [338], [462], [930], [731], [680], [679], [938], [413], [438], [455, 600], [162, 167], [164, 166], [813, 567], [921], [7], [106], [321], [897], [131], [921], [110], [453, 454, 559], [737], [259], [71], [690, 345], [144], [453], [370], [267], [640], [968, 504], [941], [411], [695], [225], [205], [10], [704], [72], [876, 435], [307], [650], [987, 923], [455], [728], [734], [680], [497], [877], [317], [591, 868], [595], [635], [852], [987, 998], [654], [970], [417], [56], [479, 511], [280], [256], [394], [422, 559], [205], [962, 923], [], [123], [991], [891], [416], [761], [983], [871], [981, 429], [291], [603], [5, 6], [595], [723], [544], [2], [873], [668], [], [], [898], [458], [880], [962, 467, 499], [179], [340], [515], [729], [700, 999], [245], [97], [330], [655], [629], [919], [71], [421], [519, 907], [977, 978, 445], [], [391], [230], [645], [], [283], [518, 671], [866], [31], [], [], [678], [521], [458], [150], [486], [], [347], [645], [], [466], [288], [745], [702], [562], [618, 909], [719], [918], [335], [344], [575], [499], [602], [952], [520, 680, 431, 529, 850, 443], [], [933], [874], [387], [234], [51], [61], [165, 187], [87], [], [61], [383], [194], [373], [193], [866], [470], [570], [257, 258, 489], [269], [14], [115], [393], [], [772], [937], [625], [673, 553, 526, 527, 664, 508], [979], [10], [511], [916], [388], [279], [], [523], [2], [902, 488], [768], [], [157], [24], [950], [944], [230, 231], [337], [612], [846], [215], [625], [529], [258], [985], [769, 798], [769, 114], [443], [205], [15], [578, 885], [683, 875, 558], [800], [281], [889], [434], [770], [519], [508], [673, 664, 526, 527, 508], [325], [803], [760, 415], [360], [743], [640], [729], [573], [731], [91], [], [301], [], [145], [931], [816], [723], [], [669], [941], [810], [730], [811, 281], [605], [22], [945], [678], [911, 658], [751], [], [292], [520, 697], [480], [230], [705], [536], [327], [232], [624], [110], [301], [889], [23], [429], [668], [337], [110], [864], [910], [448], [807], [723], [58], [105], [439], [199], [96], [746], [769, 606], [429], [], [650], [312, 311], [824], [866], [995], [554], [898], [577], [980], [768], [570], [850, 911], [10], [444], [977], [177], [443], [911], [352], [], [24], [708], [170], [860], [56], [936], [5], [318], [589], [648], [937], [668, 538, 607], [692], [836, 879, 822], [270], [543], [228], [923, 947], [933], [567], [920], [907], [880, 972], [615, 543], [568], [320], [927], [957], [329], [88], [104, 489], [461], [591], [896], [338], [971], [608, 518, 734, 465, 413], [797], [969], [999], [129], [373], [159], [366], [844], [647], [482], [142], [983], [129], [205], [245], [717], [52], [908, 404, 895], [], [453, 850], [473], [808], [332], [858], [448], [668], [700], [829], [795], [21, 127], [197, 199, 836, 837], [281, 282], [904], [763], [681, 620, 508], [256], [51], [612], [805], [155], [439], [373], [908], [546, 650, 819], [138], [111], [502, 539], [562], [702], [753], [304], [425], [828, 845], [307], [872, 759], [941], [923, 907, 532, 470, 762, 572], [], [169], [588], [33], [498], [557, 733], [107], [546, 889], [490], [597], [139], [806, 655], [778], [673], [], [287], [97], [332], [463], [33, 983], [636], [486], [183], [950, 951], [], [822, 542], [56], [723], [], [39], [240, 241], [696], [864], [921, 917], [977, 978], [868, 588, 692], [160], [824, 775], [790], [49], [761], [7], [235], [803, 637], [276], [584], [71], [756], [645], [629, 508], [774], [858], [53], [750], [836, 837, 906], [38, 45], [640], [856], [602], [225], [953], [484], [466], [769], [491], [489], [326], [71], [331], [66], [302], [434], [], [409, 531], [511], [745], [519], [114], [], [429], [418], [334], [318], [162], [182], [614, 818], [225], [740, 783, 477], [80], [], [14], [499], [591], [497, 884], [568], [100], [894], [486], [354], [], [521, 926], [514, 515, 597, 763, 445], [924], [63], [477], [676, 173], [888, 718, 839], [277, 278], [60], [], [716, 13], [913], [207], [375], [652, 465, 830], [340], [156], [154], [253], [251], [861], [277], [785], [317], [514, 655], [], [617, 823], [483], [382], [613], [48], [777], [812], [502], [198], [263], [306], [37], [35], [184, 191], [801], [262], [485], [], [576], [150], [700, 950], [333], [30], [23], [130], [50], [619, 750, 846, 721], [677], [249], [557], [35], [108], [], [400, 667], [960, 868], [348], [649], [830], [996], [670], [660], [494], [851], [662], [751, 479], [675], [851], [454, 917], [227], [747], [56], [332], [214], [930], [127], [987, 998], [921], [66, 68], [], [283], [784], [386], [996], [744, 657], [652, 465, 413], [239], [296], [359], [945], [876, 435, 282], [651], [], [509], [124], [66], [], [981], [572], [334], [127], [319], [900], [29], [327], [28], [382], [344], [731], [399], [680, 898], [156], [995], [161], [78], [367], [494], [774, 464], [951], [480], [81], [252], [464], [532, 453], [52], [], [735], [], [301], [354], [338], [653, 665], [482, 485], [992], [562], [676], [], [219], [570], [542], [974], [713], [538, 727], [801, 107], [725, 505], [937], [891], [], [290], [513, 875, 819], [850], [755], [866], [687], [344], [441, 572], [924], [237], [903], [93], [92], [350], [923, 951, 762], [162], [267], [335, 845], [], [411], [774], [357], [137], [581, 586], [608, 464], [411], [660], [162, 166], [810, 878], [937], [661], [558], [168], [89], [732, 622, 759], [235], [247], [384], [845], [871], [686], [993], [196], [345], [548], [404], [391], [174], [686], [755], [14], [143], [779], [914], [2], [930], [538], [912, 825], [478, 239], [478], [], [75], [922], [401], [730], [399, 840, 462, 741], [971], [32], [40, 46], [791], [525], [685], [672], [863], [754], [366], [205], [580], [202], [474], [416], [598], [635], [986], [914], [897], [607], [453, 454, 624], [757], [11], [960, 928], [136], [], [747], [311], [784, 587, 740, 477], [249], [326], [], [], [337, 360], [823], [58], [], [189], [936], [886], [762], [402, 593], [], [4], [851], [944], [708], [845], [164], [945], [256], [53], [821], [455], [918], [119], [55], [462], [20], [857], [650, 402, 819], [646], [197], [439], [752], [774, 412, 671, 836, 837, 733], [8], [670], [845], [617, 515, 860], [802], [853], [32], [650, 683], [139], [487], [401], [168], [82], [877], [781, 409], [305], [652, 830, 764, 413], [853], [723], [534, 729], [578, 876, 689, 435, 794], [858, 807], [884], [353], [218], [451], [879], [504, 968], [196, 198], [], [458], [805], [21], [864], [589], [384], [652, 465], [881], [458], [], [659], [847], [813], [923], [506], [198], [103], [912], [854], [674], [673, 664, 526, 527, 632, 508], [0], [84], [183], [5], [37], [840, 462], [478], [270], [541], [81], [927], [810, 878], [677], [471], [649], [416], [929, 509], [251], [], [366], [335], [464], [625], [20], [776, 650], [561], [379], [559], [415], [139], [757], [142], [569], [], [201], [895], [576], [663], [491], [64], [39], [185, 182], [866], [844], [326], [530], [322], [407], [548], [579], [84], [717, 751, 479], [680], [812], [940], [284], [250], [484], [677], [297], [880, 731], [368], [291], [7], [296], [731, 861], [15], [31], [], [783], [431], [244], [16], [377], [639], [628], [908, 404], [185], [730], [660], [362], [647, 969], [519], [323], [978], [509], [721], [], [], [608], [309], [591], [316], [484], [], [496], [836, 837, 853, 762], [976], [922], [956], [619, 818], [422], [103], [624, 453], [871], [326], [270], [986], [478], [907, 440], [843], [685], [311], [426], [792], [764], [908], [280], [280], [], [503], [865, 509], [637], [672], [153], [110], [45], [595], [995], [916], [923], [], [375], [376], [219], [735], [], [406, 857], [], [963], [586], [148], [199], [56], [287], [473], [937], [449], [861], [195], [707], [584], [497], [514, 689], [704], [538], [533], [904], [692], [76], [286], [], [783], [216], [189], [25], [500], [102], [821], [795], [737, 455, 907, 440], [862], [760], [377], [179], [637], [999, 648], [685], [511, 479], [393], [390], [275], [626], [337], [464], [310], [968, 504], [116], [222], [272], [747], [845], [815], [40], [30], [402, 819], [966], [], [580], [873], [580], [448, 494], [957], [893], [557], [139, 140], [], [628, 536], [324], [578], [203], [757], [609], [947], [321], [945], [485], [610], [472, 693], [653, 463], [544, 909, 849, 469], [172], [118], [319], [518], [837, 678], [694], [962, 923], [957], [938], [422], [525], [], [135], [890], [224], [923], [100], [967], [42], [926], [566], [724], [114], [249], [], [913], [407], [804], [528], [254], [480], [441], [207], [607], [357], [85], [396], [694], [543], [875], [519, 956], [257], [873], [5, 6], [553], [105], [268], [], [304], [866], [157], [775], [896], [599], [528], [71], [351], [636], [464], [99], [336], [17], [39], [770], [882], [], [72], [659], [661], [836, 837, 487], [], [6], [352], [861], [307], [328], [341], [735], [733], [], [152], [732, 759], [924], [717], [867], [229], [], [662], [757], [577], [309], [581, 479], [724], [766], [842, 433], [587], [923], [645], [229], [685], [732], [340], [530], [352], [865], [826], [820], [853], [495], [475, 15], [25], [534], [822, 542], [311], [337, 334], [907, 499, 470], [749], [347], [260], [412], [442], [199], [834, 487], [498], [65, 56], [764], [789], [], [766], [811], [660, 757], [650, 402], [562], [968, 504], [353], [244], [570], [438], [795], [198], [298], [838, 551, 629, 631], [21], [90], [248], [17], [532, 762, 923], [669], [413], [716], [85], [467], [861], [893], [317], [803], [225], [426, 685], [410], [925], [], [185], [814], [351], [578, 452, 689, 538, 601], [974], [80], [343], [496, 529, 411], [84], [884], [433, 639], [322], [927], [550], [651], [512], [940], [988], [790], [791], [909, 567], [638, 639], [367], [400, 667], [], [25], [736], [474, 452], [95], [822], [90], [119, 39], [242], [86], [638, 639], [504, 850], [596], [54], [], [320], [773, 532, 923, 572, 762], [737], [916], [287], [168], [375], [129], [959], [546, 650, 818, 819, 542], [816], [597], [558], [551], [], [], [553], [603], [466], [], [], [80], [27], [162], [434], [82], [222], [532], [15], [730], [595], [382], [785, 464], [881], [753], [76], [112], [204], [], [618, 813], [350], [506], [947], [130], [278], [932], [338], [41], [401], [285], [32], [829], [156], [190], [226], [340], [327], [365], [498], [435, 794], [619, 846], [611], [910], [262], [905], [], [524], [503], [659], [558], [795], [807], [761], [984], [947, 125], [112], [299], [84], [122], [847], [847], [472], [219], [864, 586, 652, 413], [650, 568, 608], [44], [952], [149], [89], [583], [565], [145], [], [806], [31], [232], [703], [858], [73, 74, 815], [644], [70], [745, 620], [513, 875], [685], [173], [840, 587, 758], [836, 837, 842], [479], [742, 620, 664, 527, 508], [798], [79], [746], [198], [316], [727], [252, 262], [258], [597], [302], [859], [932], [637], [761], [209], [297], [442], [993], [32, 152], [350], [989], [815], [432], [779], [], [1], [880], [578, 834, 836, 837, 458], [163], [506], [804], [672], [110], [143], [934], [566], [214], [911, 253, 735], [524, 461, 958], [525, 718, 437], [], [518], [119], [177], [400, 667], [295], [], [789], [234], [929], [638, 639], [597], [937], [494], [257], [62], [], [347], [369], [924], [539], [397], [317], [126], [580], [550, 968], [810, 878], [255], [576, 536], [459], [188], [], [33], [723], [594], [435, 58], [39], [85], [199], [888], [883, 739], [409], [147], [478], [462], [592], [716], [342], [143], [], [494], [74], [666], [464], [218], [411], [365], [900], [247], [754], [174], [515, 790, 636], [868], [12], [535], [887], [300], [39], [938], [427], [287], [203], [52], [94], [361], [317], [793], [935], [928], [977], [431], [776], [61], [505], [51], [63], [71], [116], [914, 536], [672], [], [401], [495, 532], [205, 478], [449, 975], [951], [671], [344], [806], [330], [697], [281], [615], [333], [528], [699], [651, 827], [812], [], [603], [], [337], [457], [948, 950, 954], [808, 842, 977, 978], [], [618, 813], [417], [60], [801], [453], [132], [582], [332], [114], [324], [489, 134], [260], [825], [], [968, 762], [775], [474], [], [87], [702], [579], [544, 827, 469], [], [344], [968], [667], [261], [988], [593], [931], [688], [438, 728], [845], [694], [843], [715, 652], [], [170], [979], [378], [], [582, 936, 943], [872, 759], [740], [378], [108], [127], [935], [203], [931, 933], [351], [254], [915], [633], [967], [429], [751], [268], [10], [983], [578, 982, 601], [744, 657], [556], [970], [268], [105], [464, 597], [482, 632], [104], [255], [569], [582], [272], [], [115], [399, 501], [133], [548], [241], [796], [111], [371], [891], [797], [957], [345], [666], [342], [159], [608, 117], [562], [608, 610], [260], [393], [296], [682], [608, 774, 788], [148], [776], [], [537], [203], [207], [765], [517, 821, 536, 510], [459, 434], [478], [683], [495], [875], [683, 566], [233], [985], [511], [710], [959], [973], [988], [673, 526, 527, 782, 664, 508], [235], [424], [206], [224], [539], [396], [945], [281, 285], [884, 406], [702], [542, 541], [605], [484, 814], [774], [601], [9], [652], [950], [993], [374], [603], [616], [206], [586], [930], [647], [343], [269], [328], [156], [153], [], [484, 871], [385, 101], [885], [], [794], [], [291], [664, 782, 662], [981], [410], [47], [364], [290], [708, 517], [852], [115], [916], [528], [115], [754], [459, 608], [360], [], [322], [872, 652, 447], [551], [751, 479], [97], [185, 153, 187], [202], [966], [971], [597], [599], [77], [72], [839], [629], [111], [718], [698], [223], [934], [360], [993], [632, 818, 819], [364], [984], [770, 806], [728], [528], [581], [322], [77], [894], [445], [869], [384], [617, 823], [], [38, 26], [160], [479], [250, 220, 248], [276], [901], [923], [308], [342], [], [838, 711, 648, 585, 631], [], [], [279], [78], [97], [], [746], [532], [688], [568, 831], [599, 955], [109, 973], [357], [919], [466], [908], [339], [573], [643], [715, 524, 461, 787], [301], [677], [], [143], [158], [451], [139], [700], [436], [774, 681, 620, 750, 721, 846], [704], [369], [936], [736], [901], [287], [], [835], [638, 639], [442], [734], [329, 397], [], [310], [970, 518, 671], [110], [580], [709, 710], [735], [265], [565], [560], [43], [282, 478], [800], [388], [177], [275], [340], [766], [290], [196], [148], [865, 850], [63], [157], [825], [240, 241], [920], [], [752], [694], [870], [770, 488, 843], [160], [700, 999], [], [958], [387], [556], [737], [76], [456], [701], [81], [942], [262], [61], [992], [545], [77], [262], [783], [548, 869, 655, 851], [367], [373], [115], [399], [], [], [565], [446], [129], [6], [748], [417], [970], [358], [815], [], [767], [71], [290], [909, 926], [788], [416], [399, 824, 600], [696], [358], [905, 750, 721], [272], [920, 829], [558], [854], [248], [160], [323], [622, 759], [870], [796], [318], [775], [140], [614, 887], [954, 950], [581, 479, 717], [784, 740], [571], [116], [519], [136], [355], [75], [956], [88], [491], [288], [874], [228], [559], [293], [241], [500], [486], [943], [350], [407], [146], [], [47], [99], [303], [402], [879], [], [51], [160], [573], [457], [842, 463], [872, 420], [221], [389], [527, 782, 916, 664, 508], [826], [382], [], [125], [581, 436, 479], [97], [254], [802], [499], [977, 978], [660], [629], [432], [261], [193, 153], [242], [838], [], [698], [257], [923], [222], [157], [570], [112], [359], [451], [292], [513, 776, 683, 875, 822, 541, 542], [378], [496], [775], [879], [836, 837], [752], [723], [724], [202], [295], [447], [801, 842], [185], [810, 878], [93], [380], [984], [964], [122], [384], [151], [], [528], [989], [612], [704], [220], [768], [86], [100], [573], [2, 3], [959], [], [459], [994], [498, 598], [286], [303], [276], [341], [14], [953], [856], [248], [797], [350], [903], [760], [103], [413], [608, 770, 414], [936], [840], [134], [34, 978], [302], [211], [597], [852], [135], [552], [356], [927], [214], [164], [292], [41], [477], [769], [709], [], [488], [500], [640], [918], [483], [117], [95], [897], [884], [853], [99], [472, 693], [213], [202], [592], [14], [767], [875], [228], [], [277], [608, 481, 482], [243], [204], [132], [875], [126], [439], [724, 536], [528], [962], [400, 667], [312], [477], [267], [716], [569], [339], [819, 541], [809, 618, 925], [193], [], [711], [967, 968, 923], [779], [533], [330], [], [], [668], [736], [262], [136], [379], [], [671], [281, 285], [955], [573], [968, 504], [331], [132], [784], [592], [621], [215], [172], [458], [150], [303], [799], [], [654], [515], [49], [490], [15], [223], [262], [682], [301], [592], [635], [291], [718, 628, 540], [625], [360], [716], [752], [20], [623, 795], [421], [618, 659], [122], [183], [232], [221], [], [399], [704], [19], [], [568], [317], [542], [822], [561, 950], [968], [719], [151], [], [679], [386], [581, 479, 511], [485], [], [946], [642], [368], [25], [239], [472], [550, 967, 968], [647], [368], [83], [854], [801], [], [772, 748], [118], [87], [873], [772], [114], [935], [218], [], [464], [966, 907, 572], [2], [622], [449], [961], [777, 499], [691], [69], [622, 759], [221], [257], [28], [328], [826], [821], [15], [73], [24], [357], [957], [29], [438], [521], [134], [866], [147], [187], [], [289], [671], [138], [26], [], [758], [738, 211], [617, 823], [613], [777], [217], [], [458], [772], [953], [835], [], [64, 55], [942], [327], [392], [871], [858], [810, 508], [833], [786], [924], [779], [586], [612], [402], [318], [842], [782], [], [673, 526, 527, 782, 664, 508], [501], [536], [153], [928], [819], [2, 3], [55], [138], [57], [661], [659], [173], [683], [655], [114], [669], [357], [887, 857], [277], [114], [616], [145], [355], [607], [2, 3], [385, 101], [859], [94], [813, 909], [896], [875], [652], [0, 389, 758], [984], [], [388], [74, 815], [11], [785], [540], [904], [], [860], [397], [810, 878], [489], [299], [171], [325], [546], [659], [555], [600], [437], [936], [353], [528], [739], [839], [727], [967], [121], [638, 639], [], [980], [196, 197, 198, 199], [258], [714], [], [729], [927], [67], [322], [579], [342], [8], [904, 905], [767], [40, 911, 27], [935], [296], [738], [882], [15], [439], [164], [580], [77], [331, 332, 338], [432], [150], [292], [188], [563], [391], [522], [492], [353], [804, 844], [660], [668], [619], [262], [661], [165], [683], [454, 624], [403], [201], [341], [90], [669], [474], [199], [942], [], [310], [859], [889], [482], [863], [925], [910], [], [491], [350], [183], [795], [586], [260], [681, 620], [265, 266], [439], [735], [32], [984], [], [668], [494], [278], [290], [292], [884], [785], [488], [833], [362], [], [128], [204], [583], [18], [127], [738, 968, 505], [99], [478], [155], [439], [111], [377], [976], [174], [836, 837, 459, 445], [466], [917], [12], [145], [883], [57], [898], [935], [918, 762, 923], [769], [], [130], [335], [760], [376], [937], [224, 223], [130], [621], [], [30], [497], [593], [], [658], [32, 28], [116, 126], [357], [277], [129], [407], [368], [515], [11], [408], [], [103], [57], [865], [506], [849], [770], [827], [730], [207], [562], [159], [184], [976], [74], [737, 651], [333], [309], [203], [533], [994], [25], [467], [771], [897], [332], [584], [269], [673, 527, 664, 508], [618, 562], [581, 479, 717], [454], [204], [267], [346], [706, 532], [959], [885], [434], [643], [142], [249], [505], [99], [341], [805], [], [373], [394], [789], [988], [870], [], [306], [790, 126], [793, 259], [115], [264], [510], [70], [772], [154, 478], [], [755], [36], [638, 639], [523], [765], [335], [911], [119], [], [169], [617], [276], [143], [310], [550], [228], [809], [357], [812], [565], [273], [457, 834], [64], [502], [336], [899, 521, 532, 412], [235], [696], [499], [175], [16], [165], [537], [783], [284], [606], [], [483], [], [935], [854], [917], [555], [444], [867], [578, 982], [633, 316], [714], [719], [25, 28], [280], [305], [], [793], [42], [587], [160], [261], [736], [281, 285], [687], [776], [918], [], [534, 729], [497], [987, 998], [352, 353], [661], [332], [143], [], [397], [495, 692], [655, 630, 474], [42], [979], [982, 703], [506], [672], [135], [4], [963], [205], [836, 837, 919], [364], [112], [645], [802], [481, 453], [691], [604], [633], [990], [160], [747], [425], [650], [73], [], [870], [954, 955, 953, 923], [628], [968], [41], [143], [420], [917], [142], [790], [988], [329], [568], [491], [954], [677], [7, 8], [609], [252], [527, 782, 673, 475], [800], [377], [439], [549], [213], [0], [76], [162], [905], [253], [971], [362], [913], [900], [290], [842, 693, 472, 445], [336], [774], [301], [621], [453, 454], [238], [43], [96], [], [989], [487, 620], [881], [761], [970, 795], [736], [80], [455], [651], [858], [228], [867, 919], [932], [401], [631], [827], [771], [999, 700], [651, 760], [875], [242, 243], [651, 631], [85], [165], [141, 142], [615], [244], [28], [575], [59], [700, 999], [928, 923, 960], [338], [557], [267], [868], [354], [601], [685], [520], [933, 934], [88], [714], [181], [459], [711], [762], [860, 919], [358], [13], [96], [472], [165], [694], [519], [], [839], [], [618, 813, 910, 532], [67], [80], [20], [113], [515, 695], [341], [608], [41], [18], [252], [738], [406], [980], [384], [838], [474], [161, 162, 167], [49], [], [84], [149], [406], [], [652, 413], [352], [760], [40], [82], [581, 479], [734], [57], [676], [115], [12], [363], [144], [733], [921], [945, 948, 950, 953], [449, 975], [127], [844], [986], [281], [471], [310], [273], [153, 203], [473], [258], [256], [1], [225], [426], [869, 879], [502], [227], [405], [59], [968, 504], [895], [444], [11], [770], [18], [893], [351], [50], [507], [775], [695], [592], [339], [748, 911, 692], [166], [515, 775], [916], [250], [214], [174], [552], [23], [432], [942], [257, 222], [], [130], [999, 700], [236], [395], [947], [637], [313], [141, 142], [818, 819, 854], [673, 487, 810], [480], [416, 638, 639], [699, 541, 542], [659], [966, 572], [806], [934], [518, 671], [220], [490], [276], [81], [682, 458], [805], [], [815], [581], [515], [543], [751], [142], [880], [2], [634], [792], [], [], [684], [665, 670], [763], [153], [296], [568], [203], [992], [741], [157], [], [737], [570, 830], [663], [496], [735], [88], [879], [], [993], [797], [385], [], [430], [3], [465], [67], [410], [795], [605], [823], [35], [873], [251], [866], [535, 479], [990], [992], [255], [984], [659], [866], [670], [69], [], [524, 787, 915], [882], [389], [991], [], [544], [564], [896, 804], [855], [984], [692], [298], [594], [557], [372], [652, 413], [528], [562], [743], [213], [937], [916], [191], [229], [923], [980], [], [630], [411], [695], [411], [895], [602], [68], [132], [51], [198], [710], [799, 831], [844], [1], [580], [798], [972], [64, 59], [375], [434], [10], [951], [220], [898], [195, 805], [60], [847], [551, 629], [964], [379], [986], [842], [205], [594], [191], [225], [229], [894], [794, 435], [611], [891], [99], [646], [941], [385], [358, 173], [774], [837, 836, 733], [], [703], [560], [268], [974], [150], [114], [], [390, 973], [216], [753], [131], [682], [822], [666], [416], [725], [291], [270], [212], [905], [567], [345, 690], [149], [920], [777], [94], [974], [760], [427], [723], [16], [528], [178], [562], [459, 543], [888], [280], [], [215], [], [740], [317], [709], [539], [239], [969, 692], [], [25], [736], [529, 830, 610], [63], [841, 697], [274], [63], [834, 869], [760], [396], [476], [71], [401], [382], [468], [923, 521, 762, 926], [357], [832], [977, 638, 639], [404], [11], [5], [433, 638, 639], [160], [941], [], [165], [247], [434], [576], [892], [259, 462], [938, 939, 943], [75], [619], [373, 463], [538, 668], [497, 884, 406], [892], [190], [392], [615], [30], [37], [364], [616], [414, 518, 535], [821], [487], [538], [], [817], [31], [977, 978], [646], [502], [434], [641], [355], [961], [64, 55], [752, 852], [382], [470], [253], [150], [110], [5], [195], [399, 636], [], [45], [816], [806, 630], [402], [987, 998], [617], [190], [626], [720], [6], [547], [980], [911, 539], [862], [208], [518, 489, 671], [], [640], [107], [917, 921], [90], [138], [508], [193, 186], [113], [118], [88], [520], [179], [98], [132], [196], [265, 267], [106], [76], [33], [180], [120], [848], [724], [404], [170], [], [584], [847], [644], [774], [28], [302], [819, 546], [399], [70], [769, 798], [578, 982], [831], [367], [919], [135], [161, 168], [455], [94], [944], [174], [616], [776], [], [115], [867], [733], [644], [849, 285], [955], [866], [78], [255], [911, 533], [532], [97], [153], [638, 639], [911], [439, 570, 764], [433], [847], [893], [855], [], [335], [302], [477], [543], [446], [333], [354], [940], [314], [560], [126], [358], [6], [553], [], [692, 886], [311], [], [251], [841, 759], [32, 30], [], [833], [518], [233], [21], [348], [296], [873], [748], [355, 489], [762], [853], [137], [495], [610], [279], [563], [707], [], [821, 703, 839, 975], [970, 979], [102], [601], [291], [637], [121], [317], [963], [95], [364], [838, 487, 459, 445, 638], [307], [512, 473], [224, 214], [475], [870], [23], [866], [], [156], [151], [336], [66], [901], [], [229], [757], [117], [221], [521, 809, 909, 987, 926], [844], [287], [404], [524], [756], [629], [], [], [309], [799], [173], [216], [448], [619, 846], [953], [13], [140], [], [768, 836, 842], [561], [573, 518], [629], [594, 982], [398], [68], [588, 790], [204, 185], [836, 837], [685], [279], [350], [688], [271], [55], [70], [645], [673, 526, 527, 664, 508], [607], [162], [97], [985], [515, 643], [139], [942], [278], [553], [291], [749], [], [], [205], [632], [827], [982], [701], [809, 925], [194], [881], [683, 432, 566], [182], [819], [142], [178], [989], [37], [421, 525, 975], [260], [305], [566], [846], [628], [556], [706, 789, 539, 799], [379], [254], [418], [369], [254], [770], [709], [353], [834, 457, 630], [], [360], [270], [950], [928, 868, 923], [760], [761], [23], [262], [194], [795], [31], [68], [252], [735], [358, 359], [682, 781], [725], [96], [763], [533], [809], [206], [873], [392], [798], [208, 179], [307], [606], [834, 515, 836, 837, 906], [628], [231, 156], [742, 872], [405], [651, 760, 827], [541], [802], [178], [70], [455], [292], [819, 854], [995], [393, 983], [775], [280], [13], [3], [946], [204], [], [136], [213], [470], [344], [596], [382], [748, 636], [909], [793], [186], [141], [616], [548, 851, 598, 632], [150], [], [693], [275], [740, 519], [224, 223], [], [], [622], [75], [720], [928, 960, 923], [772], [340], [819], [378], [804, 469], [789, 421], [273], [852, 186], [644], [], [301], [236], [9], [621], [15], [96], [48], [296], [222], [281], [715, 652, 764, 413], [249, 250], [549], [207], [860], [373], [80], [863], [312, 311], [552], [763], [494], [55], [562], [38], [131], [517], [276], [472, 693], [], [372], [566], [], [445], [495], [741], [823], [89], [549], [33], [288], [350], [753], [0], [979], [540], [921], [985], [], [194], [216], [866], [737], [779, 506], [802], [928], [416], [614], [713, 742], [218], [164], [951], [810, 878], [874], [435], [850, 732, 759], [729], [796], [734], [521], [698], [526, 453, 454, 608, 740], [], [182], [166], [102], [225, 465], [960], [896], [188], [989], [313], [322], [610, 862], [396], [327], [744, 657], [928, 960, 868, 415], [], [240, 238], [286], [557], [654], [929], [358], [302], [905], [989], [307], [578, 689], [943], [213], [933], [], [827], [694], [705], [360], [710], [341, 342], [836, 837], [538], [780], [205], [762, 923, 122], [70], [], [48], [273], [835], [809, 925], [448], [747], [220], [850], [187], [825], [472, 693], [79], [593], [719], [], [316], [723], [203], [398], [736], [581, 733, 479], [790, 588], [946], [86], [500], [474], [386, 101], [602], [836, 837], [131], [353], [18], [928], [329, 842], [705, 547], [681, 620, 526, 916, 906], [801], [274, 271], [], [713], [], [], [619, 883], [972], [614, 887], [313], [287], [829], [432], [355], [541], [40], [543], [648], [261], [459, 978, 445], [843], [572, 966], [487], [43], [], [964], [229], [254], [138], [870], [827, 849], [332], [247], [148], [434, 912], [599], [731], [100], [], [538], [498], [82], [671], [153], [875], [422], [72], [840], [548], [586, 864], [891], [930, 868, 967, 968, 504], [308], [301, 918], [128], [62], [750], [], [133], [720], [194], [13], [481], [575], [954, 943], [180], [78], [814], [42], [], [221], [301], [], [67], [258], [730], [3], [355], [240], [284], [879], [513], [690], [695], [280], [716], [300], [912], [574], [205], [131], [847], [843], [348], [800], [674], [658], [608, 869], [734], [459], [714], [295], [723], [699], [489], [607], [148], [809], [69], [347], [866], [203, 156], [412], [225], [35], [488], [16], [951], [], [293], [253], [189], [809, 926], [934, 923], [906], [509], [485], [517], [410], [772, 679, 488], [448], [526], [358], [721, 761, 831], [324], [536], [991], [892, 409], [613], [577], [47], [349], [406], [810, 655, 508], [461], [843], [594], [670], [], [483], [471], [248, 249], [668], [46], [392], [948], [], [694], [794], [562], [858, 467], [723], [902, 488], [650], [218, 215], [], [], [292], [40], [948, 572], [71], [918], [517], [146], [642], [181], [868, 923, 968, 725], [809, 659, 729], [819], [520], [440], [258], [792], [308], [795], [701], [304], [70], [518], [619, 846], [165], [276], [364], [392], [698], [930, 931], [105], [], [188], [221], [315], [169], [428], [77], [], [596], [288], [710, 767], [957], [553, 526], [58], [861], [305], [], [612], [619, 846], [907], [611], [152], [44], [456], [3], [814], [362], [896], [866], [523], [489, 274], [844], [905, 789], [], [917], [442], [], [199], [], [11], [686], [485, 685, 754], [334], [293], [505], [], [232, 852], [0, 758], [388], [189], [307], [], [], [375], [424, 423], [518], [38], [617], [111], [421], [752], [725], [908, 895], [107, 108], [291], [386], [707], [44], [578, 689], [114], [73, 74], [166], [668], [421], [259], [601], [908], [428], [881], [836, 837], [198], [302], [], [545], [226], [], [240, 241, 238], [834, 906], [235], [532], [517, 554, 536], [1], [445, 977, 236], [301], [675], [453, 454], [395, 758], [24], [263], [965], [301], [684], [558], [755], [684], [769, 633], [739], [151], [996, 309], [263], [154, 155], [928, 960], [], [613], [979], [813, 501], [779], [458], [728], [681, 620, 526], [319], [], [4], [131], [12], [182], [568], [608, 836, 837, 655, 636], [992], [802], [936, 943], [279], [34], [283], [540], [810], [364], [928], [283], [571], [521], [968, 504], [525], [370], [200, 155, 204], [481], [851], [396], [382], [652, 413], [], [232], [278], [625], [924], [342], [242], [829], [577], [264], [435], [440], [771, 507], [616], [674], [56], [472], [], [457], [466], [714, 542], [], [254], [162], [703], [395], [], [267], [140], [147], [303], [916], [616, 695], [971], [559, 764, 413], [835], [803], [469], [29], [341], [310], [], [9], [704], [270], [459], [3], [517, 839, 718], [756], [517, 975, 977, 536], [], [858], [], [947], [703], [228], [294], [963], [394], [864], [915], [979], [253], [380], [896], [117], [], [583], [836, 837], [794], [310], [701], [101], [552], [705, 888], [687], [15], [627], [], [552], [48], [364], [428], [471], [221], [549], [813, 910], [732], [279], [456], [711], [770], [28], [132], [826], [920], [344], [374], [237], [496], [], [96], [622], [503], [910, 659], [171], [585], [135], [393], [266], [669], [], [], [859], [66], [989], [569], [242], [962, 813, 827], [716], [746], [761], [346], [439], [113], [463, 412], [497, 442], [452, 689], [205], [673, 742, 681, 526, 527, 662, 664, 508], [741], [440], [874], [727], [660], [127], [0], [180, 195], [311], [], [525], [442, 494], [353], [453, 818], [786], [100], [240, 241, 238], [916], [160], [757], [164], [293], [654], [476], [919], [926], [713], [783], [262], [388], [829], [902], [438], [], [282], [521], [364], [177], [833], [658], [596], [215, 218], [576], [358], [752], [424, 423], [223], [78], [859], [605], [193], [156], [841], [82], [643], [77], [403], [173], [514, 836, 837], [], [420], [111], [33], [56], [249], [88], [830], [673, 478], [199], [136], [814], [591], [128], [], [984], [158], [372], [205, 750, 721], [555], [824, 633], [833], [125], [693], [589], [216], [571], [681, 491], [269, 249], [206], [602], [363], [538, 668], [155], [], [352], [389, 567], [], [4], [207], [616, 625, 724], [953], [], [300], [551], [586], [81], [190], [97], [104], [362], [714], [550], [160], [949], [883], [759], [989], [628], [741], [884], [803], [142], [95], [945], [208], [567, 827, 926], [204], [819], [654, 671], [55], [449], [235], [450], [793], [240, 241], [], [417], [436], [22], [2], [37], [886], [618], [277], [642, 462], [317], [733], [349], [754], [796], [425], [908, 404], [314], [416, 638], [8], [518], [893], [], [671], [574], [908, 895], [300], [224], [], [341], [292], [972, 976], [971], [918], [578, 689, 501, 885], [846], [444], [938], [], [471], [317], [657], [898], [836, 869], [302], [48], [175], [949], [728], [681, 620, 478], [14], [645], [141], [399], [614], [359], [], [920], [865, 411], [28], [758], [976], [318], [971], [409], [677], [500], [556], [279], [], [178], [616, 494], [206], [], [988], [673, 681, 810, 620, 527, 782, 664, 508], [928, 923], [439], [71], [37], [853], [881], [172], [170], [595], [156], [889, 541], [491], [45], [906], [866], [649], [901], [232, 249], [753], [149], [511], [895], [465], [590], [376], [545], [39], [364], [476], [782, 664], [3], [63], [599], [684], [69], [311], [681, 620, 508], [127], [611], [27], [667], [726], [865], [630], [135], [545], [838], [343], [49], [], [624], [123], [418], [205], [883], [993], [243], [766], [53], [151], [31], [215], [427], [213], [880], [208], [], [475], [621], [526, 673, 681, 620], [], [104], [578, 654], [490], [235], [702], [720], [832], [732], [528], [999, 861], [503], [639], [745], [362], [238], [731], [738], [204], [966, 907], [598], [719], [11], [444], [667], [215], [151], [407], [985], [50], [314], [], [216], [954], [], [962], [108, 973], [294], [], [945], [317], [16, 19], [388], [788], [806], [23], [883], [210], [836, 837], [929], [525], [], [301], [534], [959], [], [836, 775, 655], [461], [127], [822], [800], [996], [524, 461], [597], [169], [857], [36], [101], [21], [570], [255], [185], [635], [836, 457], [926], [470], [71], [851, 548], [290], [250], [489, 251], [], [333], [373], [820], [175], [], [453, 493], [318], [], [547], [165], [653], [911, 824, 474], [695], [395], [123], [430], [334], [657], [908, 814], [258, 259], [596], [523, 664], [834, 906], [188], [829], [563], [14], [724, 536], [235], [687], [147], [154], [936], [69], [760], [593], [606], [682], [114], [646], [857], [538, 185, 975], [897, 651, 760], [880], [112, 125], [922], [881], [662], [831, 721, 608], [661], [684], [391], [859], [518], [574], [902, 488], [967, 968, 504], [268], [529, 667], [61], [373], [873], [183], [785], [], [], [882], [929], [577], [434], [143], [804], [381], [], [], [436], [306], [953], [749], [367], [479], [522], [16], [815], [36], [58], [85], [610, 770, 862], [927], [58], [651], [954], [864], [830], [298], [212], [], [262], [987], [665], [558, 593, 819], [873], [276], [35, 36], [467], [956], [713], [753], [948], [231], [564], [899, 532, 725], [69], [947], [869], [423], [86], [838, 631], [2], [416, 702], [816], [825], [696], [925], [177], [35], [40, 46], [176], [242], [473], [], [], [115], [162, 167], [92], [682], [], [177], [], [34], [990], [968, 809, 849, 659, 923], [842], [430], [162, 166], [505, 899], [602], [907], [582], [488, 679, 455], [], [941], [780], [681, 620, 760, 508], [750], [190], [761, 831], [98], [], [513, 439], [543], [], [578, 982, 601], [949], [382], [149], [199], [235], [704], [], [794], [892], [784], [143], [268], [274], [138], [455, 440, 444], [171], [694], [779], [899], [883], [813, 942], [821], [230, 231], [], [296], [581], [98], [365], [387], [213], [756], [286], [56], [514], [300], [446], [650], [24], [357], [826], [673, 553, 526, 527, 782, 664, 508], [896, 897, 827], [475], [388], [574], [929], [992], [941], [648], [810, 508], [301], [761], [182], [294], [102], [356], [], [352], [430], [957], [9], [191], [419], [369], [207], [825], [28], [307], [996], [141], [119], [997, 947], [300], [617, 823], [59], [303], [368], [333], [130], [83, 883], [901], [336], [396, 973], [780], [784, 792, 477], [], [], [908, 404, 895], [625], [369], [125], [743], [300], [131], [678], [865], [168, 159], [666], [56], [70], [436, 581, 479], [574], [530], [728, 412], [374], [], [300], [716], [304], [791, 582], [265, 266], [482], [152], [811], [42], [971], [697, 823], [956], [451], [685], [20], [290], [709, 710, 526, 692], [73], [71], [455], [707], [62], [185], [545], [844], [933], [796], [405], [14], [783], [377], [979], [], [], [451], [230], [], [680], [811], [988], [400, 667], [179], [879], [587], [690], [428], [384], [366], [727], [923], [518, 671], [586], [905], [54], [223], [836, 837, 445], [565], [328, 973, 991], [212], [179], [782, 851, 664], [562], [703], [340], [302], [501], [13], [758], [229], [855], [122], [525], [430], [978], [233], [824], [360], [67], [461], [292], [464], [949, 927], [939, 943], [552], [222], [106], [346], [487], [256], [623], [202], [992], [502], [837, 433, 445], [329], [754], [520], [117], [642], [39, 47, 978], [619, 846], [889], [30], [92], [426], [231], [442], [], [373], [323], [894], [], [], [84], [738], [790], [604], [869], [846], [581], [518, 880], [], [791], [99, 8, 730], [626], [587], [673, 742, 664, 526, 527, 782, 508], [53], [489, 981], [387], [985], [286], [76], [110], [619, 846], [595], [388], [434, 533], [826], [745], [], [363], [879], [19], [768], [31], [475], [419, 648, 720], [938], [646], [320], [730], [240, 248], [910], [83], [900], [903], [942], [254], [910], [943, 923], [152], [775, 459], [96], [426], [216], [787], [339], [], [87], [44, 26], [3], [920], [181], [368], [858], [36], [825, 409, 892], [521], [921], [115], [911, 796], [616, 87], [832], [935, 923], [], [539], [105], [581, 479, 511], [228], [615], [290], [89, 951], [515], [500], [11], [527], [321], [603], [96], [94], [246], [57], [8, 7], [342], [14], [967, 968, 923], [150], [624], [575], [8], [854], [], [], [483], [124], [921, 667], [976], [], [510], [424, 423], [127], [197, 205], [403], [874], [658], [770, 788], [213], [773], [256], [744, 657], [675], [711], [595], [], [203], [849], [], [], [673, 742, 620, 526, 527, 664, 508], [513, 903], [551], [564], [358], [176], [839, 821], [925], [388], [354], [350], [253], [399, 501], [561], [598], [794], [804], [820], [929], [123], [529], [407], [64], [515, 593], [823], [366], [896], [907, 572], [], [], [596], [453, 454, 905, 750], [992], [169], [190], [557, 692, 509], [], [1], [968, 659], [], [805], [463], [187], [911, 636], [774], [], [976], [497], [999, 861], [636], [693, 472], [], [571], [971], [523], [548], [38], [808], [915], [652, 683], [1], [614], [261], [75], [929, 245], [], [206], [311], [923, 71, 868], [47], [], [538], [984], [608, 514], [230], [372], [549, 742], [981], [949], [196], [641, 642], [521], [267], [919, 858], [823], [297], [405], [289], [674], [410], [2], [758], [233], [199], [915], [407], [610], [99], [332], [723], [970, 979], [326], [897], [226], [898, 681, 620], [306], [248, 249], [], [900], [644], [774], [763], [8], [813, 910], [868], [108], [190], [516, 431], [839], [358], [], [612], [22], [914, 484], [258], [26], [384], [190], [810, 878], [954], [], [699], [902], [721], [959], [214], [177], [129], [232], [909, 827], [436], [781], [911, 658], [], [560], [], [82], [979], [897], [412], [800], [683], [98], [109], [311], [635], [686], [929], [320], [778], [53], [10], [683], [980], [97], [987, 998], [883], [], [970], [575], [177], [394], [948], [914], [125], [971], [374], [396], [703], [449, 979], [122], [550, 967, 968, 505], [], [198], [654], [596], [933, 934], [352], [608, 806], [579], [681, 810, 620, 508], [23], [317], [585], [877], [836, 837, 971], [6], [747], [375], [], [172], [481, 482], [173], [66, 68], [770], [777, 623], [386, 101], [36], [242, 243], [686], [472], [86], [219], [868, 949, 953], [905, 919], [336], [952], [40], [337], [2, 3], [224], [237], [588, 610, 492, 636], [114], [441, 572], [169], [637], [768, 560], [963, 966, 762, 923], [809, 659], [503], [], [792], [777], [], [472], [422], [416], [431], [731], [877], [395], [76], [6], [767], [314], [635], [196], [934], [360], [783], [344], [710], [164], [267], [294], [807], [571], [343], [359], [922, 918], [840], [640], [642], [484], [116], [170], [456], [267], [350], [], [132], [129], [71], [407], [88], [992], [488, 600], [626], [919], [513, 650, 819], [538], [732], [342], [701], [391, 758], [836, 837, 842, 445], [935], [927], [507], [923], [892], [205], [520], [96], [171], [675, 671], [800], [], [492], [518, 691, 570], [106], [62], [988], [463, 696], [], [], [116], [509], [834], [], [713], [425], [568], [492], [39], [631], [785], [908, 895], [124], [963, 335], [390], [969, 474], [221], [241], [876, 435], [199], [763, 597], [681, 620, 508], [162], [278], [], [940], [672], [193], [311], [814, 977, 978], [275], [952], [416], [136], [], [150], [387], [940], [448], [350], [967], [974], [120], [], [514], [723], [310], [287], [536], [234], [894], [52], [213], [842], [898], [], [810, 878], [608, 836, 837, 841], [193], [256], [923], [391], [617], [269], [663], [545], [], [], [608], [762, 532], [146], [297], [578, 630, 982, 601], [26], [390], [128], [189], [], [880, 414, 671], [914], [383], [17], [50], [512], [], [694], [248, 250], [252], [458], [162], [511], [600], [459], [961], [553, 446], [472], [673, 664, 526, 527, 632, 508], [522], [351], [421], [679], [360], [918], [914], [], [500], [418, 709, 710], [597], [974], [122], [976], [938], [92], [751], [509], [386], [109], [539], [277], [309], [802], [132], [143], [], [865, 968], [773], [962], [447], [], [805], [778], [737], [285], [992], [636], [44], [477], [607], [127], [71], [470], [250], [], [], [518, 665, 671], [771], [841, 610], [273], [694], [997], [366], [230], [72], [245], [238, 241], [653], [313], [879], [62], [911, 533], [390], [843], [18], [160], [988], [965], [812], [350], [813], [930, 934], [1], [689], [786], [351], [934], [403], [676, 597], [45], [456, 777, 623, 787], [367], [274], [568], [299], [679], [563], [327], [650], [829], [679], [633], [996], [644], [422], [131, 134], [719], [], [923], [17], [929], [416], [311], [708], [679], [467, 766], [898, 596], [11], [345, 690], [289], [673, 526, 527, 782, 664, 508], [], [705, 460, 975], [159], [712], [288, 290], [594], [274, 277], [732], [808, 515], [426], [75], [174], [864], [175], [902], [612], [332], [780], [372], [802], [], [809, 923], [507], [299], [228], [718, 821], [763], [858], [867], [789], [38, 44, 463, 26], [654], [], [578, 601, 415], [747], [269], [995], [466], [935], [579], [56], [546, 650, 819], [62], [483], [272], [608], [450], [776], [295], [677], [110], [708, 862], [70], [192], [428], [529], [791], [505], [447], [321], [156], [658], [724], [299], [898, 664, 527, 782, 508], [149], [144], [664, 508], [736], [78], [218], [234], [715], [789], [19], [462, 734], [514], [611], [926], [175, 185], [], [791], [], [116], [703], [440], [569], [626], [728], [581, 874], [404], [37], [405], [], [969], [713], [526, 664, 508], [933], [], [546, 486, 650, 402, 819, 541], [921], [471], [150], [666], [], [258, 270], [458], [419], [233], [746, 455], [403], [489], [227], [952], [533], [854], [132], [17], [23], [814], [114], [], [825], [205], [993], [581], [390], [445], [832], [89], [110], [711], [496], [226, 698], [911], [9], [252], [836, 837, 123], [112], [759], [263], [11], [724], [647], [693], [159], [814, 975], [711, 631], [322], [881], [27], [575], [823], [342], [137], [288], [422, 543], [304], [640], [613], [371], [24], [739], [440, 831, 455, 721, 737], [472, 693], [99], [586], [568], [], [735], [584], [299], [480], [225], [487], [23], [144], [673, 681, 620, 508], [69], [794], [717], [], [544], [447], [476], [735], [887], [484], [78], [835, 858], [251], [634], [363], [397], [685], [809, 925], [283], [543], [935], [643], [513], [521], [30], [31], [412], [957], [], [440], [394, 758], [223], [872, 745, 761], [292], [647], [326], [792], [758], [8, 7], [561], [604], [557], [47], [725], [942], [327], [267], [987, 924], [488], [956], [347], [135], [817], [819], [530, 531, 409, 892], [489], [155], [859], [420], [359], [298], [553], [608], [456], [349], [178], [578, 601], [217], [950], [673, 526, 527, 664, 508], [496], [920], [368], [772], [32], [518, 671], [742], [476], [], [993], [677, 587, 783], [659], [750, 189], [216], [626], [823], [105], [394], [505], [899], [100], [], [663], [94], [301], [551], [482, 754, 761], [], [755], [249], [71], [11], [673, 526, 527, 664, 508], [235], [847], [], [962, 923], [816], [150], [], [257], [161], [819, 601], [551], [424, 423], [892], [512], [723], [298], [884], [908, 404], [812], [119], [171], [968, 849], [265], [346], [853], [745], [313], [911, 824], [921], [763], [913], [600], [260], [973, 991], [884], [49], [625], [651], [203], [136], [307], [40, 46], [634], [488], [248, 250], [853], [820], [288, 293], [802], [601], [100], [679], [], [875], [168], [111], [961], [452, 911], [], [727], [588], [376], [239], [39], [84], [658], [497, 442], [], [24], [320], [755], [890], [236], [875], [559], [5], [438], [205], [796], [929], [547], [99], [431], [88], [847], [369], [25], [565], [674], [31], [], [383], [349], [101], [944], [476], [494], [10], [328], [159], [505], [563], [680], [83], [679], [140], [258], [768], [335], [122], [125], [876], [371], [65], [661], [829], [945], [618, 910], [578, 457, 689, 982, 601], [778], [785], [741], [923], [546, 650, 818, 819], [589], [40], [798], [148], [876], [], [66], [], [551], [], [713, 742], [990], [], [549, 616], [604], [50], [918], [673, 742, 620, 664, 526, 527, 632, 508], [784], [562], [411, 849, 762], [], [], [299], [874], [744, 657], [968], [904], [84], [533], [200, 232], [18], [469], [], [484], [622], [232], [398], [65], [680, 529], [69], [857], [908, 404], [85], [990], [239], [905, 750, 894, 799], [869], [82], [861], [872, 759], [572], [796], [572], [901, 907], [431], [722], [621], [], [252], [987], [487], [9], [480], [117], [310, 314], [681, 620], [10, 15], [977], [124], [91], [818], [827], [232], [964], [], [283], [14], [435], [220], [155], [152], [666], [805], [642, 542], [822], [579, 875], [205], [534], [515], [401], [744, 657], [691], [801], [548, 851], [22], [181], [141], [736], [42], [28], [431], [339], [152], [916], [581, 479], [74], [182], [], [280, 985], [578], [687], [], [262], [162], [299], [386, 101], [451], [869], [673, 664, 526, 527, 632, 508], [844, 539], [431], [297], [65], [369], [45], [486, 889], [568], [940], [], [], [260], [405], [353], [450], [104], [825], [144], [99], [], [93], [956], [27], [452, 911], [157], [62], [344], [493], [225], [195], [181], [538], [], [728], [975, 616], [631], [421], [805], [380], [204], [616, 600], [258], [95], [398], [364], [518], [], [847], [570], [890, 445], [548, 851, 831, 598], [798], [623], [731], [581, 479, 817, 511], [5, 390, 973], [195], [824, 834], [673, 526, 527, 664, 508], [366], [864], [214], [882], [991], [239, 222], [766], [54], [744], [17], [155], [757], [153], [183], [223], [980], [451], [805], [472], [326], [5], [179], [107], [518, 880], [350], [339], [626], [489, 919, 412], [666], [932], [295], [890], [417], [101], [631], [141], [234], [382], [959], [6], [293], [871], [90], [224], [874], [97], [354], [633], [454], [331], [948, 950, 951], [486], [383], [604], [996], [998, 987], [936], [570], [392], [255], [694], [308], [311], [337], [359], [901], [571], [495], [881], [423], [334], [289], [741], [309, 599], [19], [109], [653], [929], [581, 734], [455], [167], [125], [102], [722], [40, 46], [443], [937], [371], [341], [867], [463], [882], [773], [111, 52], [54], [82], [244], [601], [39], [481, 482], [638, 639], [449, 718, 733], [716], [738, 559], [652], [331], [236], [147], [765], [], [932], [110], [654], [669], [29], [420], [69], [69], [524, 461], [918], [251], [], [281, 283], [139], [627, 479], [79], [736], [872, 949], [488, 535], [126], [716], [409], [909, 910], [81], [210], [21], [92], [22], [], [26], [650], [102], [], [404], [33], [], [845], [33, 973], [471], [898, 605], [425], [255], [117], [], [962], [], [198], [246], [421, 506], [683], [877], [66, 68], [756], [196], [821], [603], [626], [136], [181], [752, 852], [533], [95], [488], [549], [531], [282], [358], [703], [152], [749], [696], [89], [564, 750], [392], [914], [18], [457], [867], [602], [825], [705, 547, 733], [190], [281], [661], [27], [488], [941, 923], [445], [864], [945], [], [724], [905], [229], [530], [82], [948], [342], [887], [397], [354], [497], [143], [669], [119], [368], [360], [17], [], [52], [968, 849], [235], [401], [581, 479, 436], [899], [890], [314], [662], [421], [975], [376], [851], [], [234], [543], [], [937], [395, 758], [537], [367], [8, 7], [553], [850], [395], [292], [736], [772], [771], [75], [54, 60], [330], [754], [399], [967, 968, 504], [], [241], [322], [115], [681, 810, 620, 662], [820], [466], [444], [], [278], [477], [673, 526, 527, 664, 508], [582, 998, 987], [148], [], [785], [497], [537], [461], [900], [927], [], [621], [25], [106], [153], [128], [233], [10], [268], [284], [], [397], [], [107], [610, 976], [191], [717], [931], [718], [528], [326], [728, 458], [241], [42, 44], [676], [966, 907], [149], [703], [561], [852], [238], [100], [851, 532, 825], [324], [450], [552], [852], [549, 633], [162, 166, 167], [417, 975], [535], [483], [], [72], [55], [849, 505], [], [581], [619], [809], [912], [], [279], [259], [530], [386, 101], [432], [907], [514, 841, 608, 610, 630, 636], [310], [162, 167], [251, 246], [753], [935], [863], [945], [946, 322], [38], [277], [231], [652], [227], [494], [688], [832], [743], [592], [365], [169], [679], [742], [487], [405], [975], [481, 482], [71], [291], [340], [937, 923], [557], [849], [961], [729], [], [568], [341], [849], [760], [], [260], [254], [759], [934], [847], [418], [931], [846, 526], [952], [943], [], [179], [58], [960], [320], [293], [462], [297], [240, 241], [16], [520], [295], [488], [323], [512, 473], [438], [39], [732], [602], [721], [31], [247], [621], [308], [45], [], [86], [470, 736], [581, 479, 817, 511], [899], [118], [641], [459], [101], [98], [368], [139], [690], [186], [205, 246], [582, 951], [35, 37], [518, 665], [754], [195], [402], [576], [418, 709], [733], [159], [892], [100], [44, 26], [903], [801], [263], [734], [672], [997], [918], [992], [273], [913], [322], [426], [298], [869, 879], [66], [872, 759], [212], [452], [578], [456], [886], [93], [671], [248, 250], [572], [131], [62], [52], [153, 155], [718, 821], [546], [189], [28], [418], [32], [20], [21], [829], [157], [854], [201], [515], [795], [382], [955], [369, 381], [300], [297], [97], [566], [225], [115], [774, 731, 861], [314], [501], [207], [985], [675], [832], [833], [], [479], [287], [700], [536], [336], [392], [], [405], [867], [741], [603], [951, 949, 950, 954, 923], [119], [942], [460], [522], [800], [708], [456], [91], [104], [70], [886], [73, 815], [208], [75], [440], [100], [582], [821], [314], [363], [661], [94], [651], [], [386], [140, 142], [188], [597], [324], [281], [31], [992], [672], [365], [569], [174], [348, 349], [991], [604], [], [158], [629], [], [603], [789], [568, 715, 716], [385], [983], [197], [725], [38], [884], [784, 477], [386], [], [30], [567], [779], [27], [481, 482], [958], [852, 752], [591], [680], [760], [523, 655, 765], [721, 750], [673, 681, 620, 526, 527, 664, 508], [125], [716, 912], [411], [712], [429], [850], [747], [472], [269], [149], [739], [489], [], [886], [278], [706], [614], [134], [678], [833], [680, 910, 659, 828], [267], [900], [662], [403], [147], [847], [65], [169], [], [662], [293], [392], [205, 589], [435], [271], [75], [979], [501], [238], [262], [662], [794], [949], [404], [923], [564], [734], [903], [323], [358, 359], [380], [382], [504], [76], [117], [48], [619, 846], [289], [973], [463], [877], [719], [931], [907, 572], [667], [6, 983], [500], [218], [], [937, 567], [361], [], [432], [636], [374], [], [565], [544, 672, 596], [520], [670, 518], [652], [197], [500], [30], [39], [92], [189, 175], [733], [], [458], [], [737], [295], [810], [506], [652, 413], [424, 423], [117], [428], [455], [95], [458], [659, 666], [305], [521], [239], [775, 842, 616], [355], [908], [755], [72], [293], [330], [907, 966], [764], [707], [627], [952], [614, 887], [789], [183], [347], [441], [], [867], [694], [637], [454], [174], [896, 804], [247], [851], [185], [985], [239], [608, 836, 837, 582], [127], [], [114], [], [552], [10], [683], [70], [], [512], [820], [162, 168], [247], [845], [457], [852], [924], [685], [842, 879, 977, 978, 445, 638, 639], [], [580], [383], [385], [250], [444], [3], [801, 433, 793], [152], [162], [393], [280], [943, 923], [562], [339], [496], [636], [203], [921, 446], [392], [], [682], [57], [179], [262], [586, 652, 413], [578, 903, 689, 885], [], [388], [347], [806, 630], [228], [494], [279], [600], [739], [131], [232], [205], [], [610, 487, 655], [674], [661], [253], [115], [842, 638, 639], [569], [649], [64, 55], [400, 667], [201], [801], [277], [49], [40, 46], [928], [287], [500], [267], [850], [674], [834], [411], [19], [758], [438], [954], [769], [964], [918], [880], [734], [555], [539], [816], [919], [339], [967, 968, 504], [213], [425, 858], [789, 799], [759], [614], [4], [401], [959], [196], [678, 487], [389], [175], [42], [275], [26], [48], [790], [764], [762], [89], [294], [667], [755], [999], [726], [847], [127], [834, 906], [774], [816], [395], [647], [853], [345], [17], [712], [628], [923, 934], [868], [800], [480], [930, 934, 936], [934], [295], [520, 516], [181], [572], [522], [894], [703], [159], [], [924], [883], [435, 794], [757], [196], [867], [22], [729], [430], [165, 852], [137], [724], [63], [324], [127], [], [4], [], [869], [256, 244], [552], [529], [171], [317], [140], [548], [750], [928, 923, 960, 927], [304], [896], [319], [974], [96], [595], [640], [320], [134], [460], [297], [65], [441], [307], [570], [343], [953], [350], [560], [195], [678], [557], [518], [], [645], [367], [576], [852], [147], [207, 219], [497], [238, 241], [748, 414], [145, 146], [235], [393], [32, 30], [559], [966, 441], [], [375], [814, 977], [107], [], [921], [710], [909, 926], [207], [834, 630, 703], [190], [466], [846], [19], [247], [283], [315], [334], [986], [], [832], [11], [57], [673, 526, 527, 782, 664, 282, 508], [513], [284], [69], [292], [], [769, 773], [185], [161, 785], [603], [516, 520, 697], [248], [514], [679, 327], [241, 238], [254], [495], [468], [312], [368], [700], [72, 815], [318], [539], [772], [263], [524], [151], [889], [656, 479], [135], [903, 689], [619, 846], [653], [951], [623], [901], [79], [32, 31], [249], [], [], [336], [377], [162], [], [234], [679], [673, 553, 526, 527, 782, 664, 508], [102], [411], [892], [437], [225], [629], [101], [582, 519, 939, 943], [868], [700], [923, 934, 933], [389], [742], [843], [98], [9], [981], [112, 327], [710], [567], [292], [], [803, 866], [205], [572], [471], [678], [473], [560], [85], [588], [105], [17], [176], [], [524, 461], [887], [488, 718, 536], [644], [555], [], [404], [813, 910, 659], [824], [582], [48], [88], [980], [85], [955], [825], [183], [756], [395], [530], [163], [845], [132], [715], [139], [679], [549], [484], [489], [445, 638], [944], [581], [325], [40, 46], [310], [812], [850], [76], [483], [950], [854], [447], [874], [276], [710], [857], [161], [32, 30, 31], [360], [816], [1], [883], [298], [836, 837, 655, 879], [], [921], [884], [778], [754], [482], [715], [525], [927], [898, 784], [], [725], [122], [699], [403], [24], [76], [191], [761], [534], [238, 240], [526, 527, 782, 664], [], [3], [794], [174], [716], [569], [342], [261], [245], [284], [771], [662], [974], [147], [531], [768], [646], [234], [], [641], [912], [493], [975, 822, 541, 542], [752], [212, 217], [18], [353], [653], [512], [141], [388], [57], [398], [486], [836, 837], [540], [119], [901], [], [974], [357], [913], [158], [554], [105], [634], [703], [625], [299], [66, 68], [915], [115], [591], [251], [459, 445], [378], [814], [908], [188], [674], [980], [430], [940], [672], [102], [628], [323], [464], [718], [721], [15], [], [973, 108], [214], [401], [758], [390], [960], [439], [948, 957], [92], [869, 818], [621], [240, 239], [], [301], [], [25], [679], [664, 851], [319], [463], [972, 500], [332], [411], [909], [510], [524, 461], [91], [734], [], [384], [202], [81], [268], [470, 406], [509], [894], [922], [851, 548], [604], [424, 922], [658], [515, 808, 639], [995], [386, 101], [389], [825], [589], [991], [636], [866], [884], [794], [86], [449], [849], [553], [], [384], [31], [935, 937, 923], [701], [397], [972], [531], [130], [729], [891], [275], [17], [164], [], [563], [205], [575], [774], [828], [532, 762], [], [386], [482], [903], [602], [312], [257], [886], [531], [344], [161], [338], [779], [264], [340], [203], [893], [131], [958], [940], [258], [689], [29], [207], [484], [], [57], [427], [291], [], [717, 656, 436, 479], [781], [24], [513], [947, 997, 114], [720], [480], [364], [499], [47], [], [265], [802], [598], [119], [69], [916], [240, 241], [785], [593], [132], [60], [640], [997], [492], [159], [184], [505], [264], [645], [435, 876], [680], [396], [594], [919], [324, 325], [], [245, 254], [579], [551], [92], [624, 453], [], [438], [853], [826], [297], [669], [], [874], [384], [558], [682], [117], [572], [990], [42], [606], [437], [681, 810, 620], [], [87], [496], [0], [41], [632], [776], [526, 786], [627], [], [728], [562], [539], [385], [73, 815], [47], [259], [59], [48], [120], [669], [528], [322], [37], [458], [281], [950], [912], [781], [673, 613, 761, 605], [256], [214], [896], [], [518], [292], [], [318], [958], [162], [195], [800], [43], [439], [], [628], [428], [952], [41], [453, 454], [184], [123], [956, 957], [498], [935], [324], [651], [525], [704], [457], [844], [754], [], [810], [754], [107], [849], [532, 398], [791], [], [582, 790], [315], [520], [874], [138], [448], [491], [], [223], [748], [112], [213], [242], [137], [260], [523], [182], [305, 306], [868], [365], [700, 999], [636], [128], [268], [252], [694], [241, 238], [246, 251], [98], [426], [522], [948], [514], [371, 373], [487], [56], [329], [502], [456, 652], [139], [520], [833], [724], [318], [499], [148], [108], [384, 383], [305], [36], [234], [437], [73], [125], [315], [132], [373], [67], [615], [361], [919], [667, 151], [870], [90], [901], [331], [955], [766], [], [], [962, 923, 935], [136], [43], [37], [33], [178], [244], [886], [], [714], [727], [704], [435, 876], [221], [543], [126], [384], [325], [91], [498], [291], [335], [603], [], [201], [510], [666], [649], [483], [948], [232], [551], [198], [751, 479], [763], [513], [], [259], [560], [473], [877], [421, 904, 905], [], [783], [969], [468], [5], [811], [316], [35], [617], [907, 440], [408], [533], [687], [641], [963], [], [209], [782, 664], [213, 248], [899], [672], [645], [987, 998], [270], [679], [779], [849], [330], [618, 909, 828], [973], [614], [], [828], [495], [69], [624, 453, 454], [753], [623], [547], [362], [848, 632], [428], [708, 458], [893], [936], [699], [702], [228], [626], [868, 967, 968, 504], [], [], [871], [897], [481, 482], [379], [633], [904], [455], [287], [262], [432], [308], [985], [489, 733, 919], [49], [989], [316], [117], [471], [519, 907, 440], [544, 909, 828], [917, 921], [172], [697], [267], [], [140], [587, 596], [443, 836, 837], [921], [], [57], [350], [838, 631], [568], [59], [205], [310], [242], [309], [535], [518], [323], [325], [263], [20], [582, 938], [240], [279], [219], [191], [126], [466], [683, 558], [603], [988], [551], [458, 708], [131], [210], [650], [322], [783], [477], [219], [981], [2], [196], [427], [628], [304], [], [], [113], [419], [746], [34], [836, 837, 656, 785], [48], [767], [630], [892], [882], [457], [], [386, 101], [60], [5], [72], [900], [614], [717, 479], [749], [813], [563], [440], [169], [385, 101], [514, 948, 836, 837, 852, 489, 636], [388], [100], [379], [456], [400, 667], [999, 893], [809, 925], [351], [834, 906], [224, 223], [206, 221], [660], [688], [], [209], [396], [549], [], [721], [942, 923], [352], [578], [769, 71], [805], [599], [585], [639], [767], [849, 505], [581, 479], [841, 610], [313], [746], [834, 585], [839], [468], [53], [418, 709, 710, 767], [929, 968], [], [62], [783], [528], [132], [194], [137], [337], [941], [544], [594], [400, 667], [712], [], [250], [824], [], [72], [327], [714], [380], [], [577], [371], [900], [93], [111], [223], [486], [749], [637], [428], [98], [211], [], [695], [815], [890], [15], [868, 505], [891], [73], [617, 823], [], [941], [301], [681, 620], [44, 48], [668], [923], [607], [684], [322], [980], [928], [892], [572], [888], [76], [873], [518], [], [400], [136], [114], [310], [911], [599], [806], [257], [516, 431], [27], [965], [], [87], [660], [24], [540], [462], [340], [48], [], [7], [774], [624, 453], [322], [177, 170], [46], [605], [931], [], [323], [478], [148], [738], [217], [603], [908, 895], [262], [796], [517, 733], [9], [458], [997], [781], [92], [531], [], [543], [15], [548], [759], [343], [530], [335], [385], [395], [136], [105], [8], [892], [839, 718], [860], [507, 695], [664], [467], [706], [938], [735], [8, 7], [651], [72], [279], [], [921, 917], [295], [191], [996], [848, 632], [263], [609, 500], [728, 281], [280], [277], [378], [52], [588, 692, 415], [], [949], [73], [283], [182], [601], [306], [395], [40, 46], [122], [425], [862], [777], [195], [99], [539], [604], [329], [976, 979], [852], [], [659], [126], [289], [654], [551], [611], [579, 881], [563], [728], [315], [792], [931], [114], [902], [372], [424, 423], [205, 653], [149], [792], [121], [540], [], [274], [514], [128], [399], [903], [], [646], [883], [942], [734], [669], [2], [588], [615], [966], [935], [837, 518, 671], [559], [778], [], [677], [237], [645], [59], [802, 518], [251, 575], [911], [352], [619], [510], [571], [357], [330], [347], [389, 395], [300], [414], [449, 975], [85], [597], [207], [938], [11], [911], [933], [173], [823], [440], [194], [315], [308], [470], [53], [52], [491], [909, 926], [136], [118], [658], [608, 869, 824], [945], [918], [96], [242], [392], [426], [168], [989], [348, 825], [39], [638, 639], [], [350], [746], [50], [84], [650], [274], [991], [599], [295], [], [417], [317], [253], [645], [193, 187], [452, 911], [825], [406], [651, 187], [19], [932], [27], [494], [834, 906, 982], [316], [479, 511], [123], [72], [106], [514], [361], [160], [539], [54], [682, 538], [219], [162], [218], [811], [109], [312, 311], [], [452], [793], [], [488, 843], [299], [350], [289], [203], [834, 906], [572], [707], [351], [855], [866], [996], [159], [840], [948], [690], [592], [801, 570], [165], [18], [696], [655], [480], [580], [694], [707], [715], [340], [736, 681, 620], [727], [422], [208], [], [632], [142], [733], [62], [], [4], [288], [791], [838, 711], [650], [348], [790], [625], [272], [669], [208], [], [58], [525], [193], [378], [798], [565], [781], [344], [61], [293], [13], [782, 851], [239], [409], [794], [938], [944], [865], [388], [202, 189], [322], [194], [430], [371], [172], [546, 650, 819], [2, 3], [912], [394], [622, 759], [312], [945], [801, 983], [903, 789], [985], [162], [397], [769, 622], [], [331], [574], [141], [649], [567], [], [718, 637], [61], [85], [487], [578, 854], [57], [328], [583], [863], [566], [375], [], [], [240], [990], [277, 278], [625], [961], [], [969], [503, 572], [949], [508], [866], [381], [560], [27], [72], [338], [466], [375], [227], [352], [603], [137], [265], [754], [892], [589], [573], [286], [325], [754], [539], [613], [987, 923], [787], [959], [63], [192], [83], [507], [489, 86], [869], [738, 580], [385], [196], [924], [], [610], [], [414], [536, 484, 871], [538], [168, 211, 159], [143], [132], [961, 659], [858], [371], [292], [2, 3], [902], [301], [544], [100], [675], [335], [92], [309], [737], [418], [522], [907], [842], [769], [232], [482], [70], [866], [576], [732], [480], [146], [324], [574], [699], [82], [540], [591], [479], [409, 892], [], [940], [127], [29], [255], [], [282], [655, 570], [313], [30], [963], [], [213, 205], [661], [13], [741], [93], [366], [974], [329], [387], [790], [435], [328], [911, 533, 539], [564], [263], [183], [944], [98], [578, 982], [952], [656, 784, 477], [353], [786], [372], [], [142], [369], [14], [284], [540], [], [243], [976], [224, 805, 223], [], [78], [420], [687], [55], [285], [736], [203, 246], [521], [911], [320], [769], [132], [258], [891], [650], [809, 618], [514, 763, 445], [905, 493], [295], [230, 231], [74], [], [877], [770], [267], [40, 46], [521], [889], [80], [140, 142], [817], [268], [129], [69], [459, 445], [24], [176], [487], [714], [], [576], [135], [517], [929], [599], [347], [117], [802], [732], [868], [49, 50], [788], [85], [741], [642], [239], [471], [443], [481], [232], [153], [790], [826], [], [], [83], [937], [], [750], [533], [581], [], [560], [619, 844, 846, 761], [98], [514, 515], [752], [825], [75], [493], [371], [29], [328], [234], [738], [32], [644, 470], [630], [786], [354], [407], [33], [239], [], [79], [368], [166], [836, 837], [626], [916], [322], [733, 862], [205], [622, 179, 245], [781], [659, 923, 925, 809, 950], [], [190], [595], [369], [858], [861], [432], [517], [51], [836, 837, 975], [159], [682], [985], [578, 601], [645], [301], [444, 671], [326], [344], [943, 923], [], [147], [856], [597], [508], [261], [187], [722], [344], [451], [], [311, 312], [121], [535], [315], [891], [432], [448], [], [362], [209], [494], [488], [135], [217], [442], [176], [94], [276], [107], [], [793], [826], [], [880], [914], [14], [62], [567, 827], [], [828], [907], [275], [937], [851], [933], [73], [930, 415], [531], [920], [167], [422], [482], [721], [406], [], [774], [426], [438], [967, 968, 911, 504], [578, 834, 982], [382], [858], [112], [340], [169], [891], [146], [162, 167], [273], [716], [227], [662, 632, 761], [], [642], [2], [532], [638, 639], [561], [347], [400, 667], [731], [175], [582, 728], [908, 404], [67, 54], [9, 489], [805], [], [627, 654], [749], [138], [652, 465, 792, 413], [577], [180], [205], [185], [437], [302], [886], [368], [], [439], [771], [], [93], [187], [], [15], [554], [324], [], [274], [721], [883], [28], [233], [544, 909, 827], [766], [44], [320], [247], [500, 286], [], [355], [779], [681, 620, 526, 508], [453], [897], [148], [478], [658], [825], [984], [11], [399], [823], [140], [127], [309], [763, 597], [898], [675], [61], [210], [194], [997], [339], [], [962], [374], [801, 836, 445], [986], [871], [109], [619], [115], [116], [452], [751], [205], [896, 804], [382], [998], [506, 117], [656], [464], [779], [784], [289], [905, 619, 846, 831], [309, 599], [394], [10], [824, 735], [900], [683], [780, 976, 914, 405], [], [711], [371], [643], [205], [534], [290], [582], [115], [379], [221], [951], [820], [], [224], [879, 977], [159], [608, 999, 861], [523], [636], [717], [324], [759], [944], [365], [955], [996], [613], [34], [866], [579, 421], [270], [953], [538], [437], [163], [571], [], [822, 542], [86], [], [574], [681, 526, 664, 761], [608, 515, 788], [338, 333], [93], [522], [946], [560], [652, 872], [542], [944], [936], [422], [], [319], [183], [996], [157], [28], [515], [85], [187], [181], [257], [696], [106], [203], [871], [554], [19], [902], [782, 664], [901], [741], [179], [22, 23], [508], [597], [767], [389], [616], [559], [860], [510], [345], [904], [107], [481], [410], [], [588], [], [], [987, 998], [], [213], [84], [647, 968, 809, 659], [63], [368], [], [227], [700], [72], [145], [876, 435], [130], [779], [], [702], [], [489, 85], [364], [719], [658], [933], [76], [943, 692, 963, 868], [951], [172], [837, 454], [276], [622], [453, 454, 553, 917], [164], [839, 660], [271], [301], [509], [591], [13], [444], [144, 540], [801], [157], [576], [788], [18], [397], [863], [842], [196], [731], [854], [800], [153], [487], [561], [394], [460], [], [], [393, 108], [825], [442], [830, 691], [980], [140], [405], [564], [695], [191], [], [332], [13], [], [93], [234, 236], [555, 734], [169], [573], [854], [805], [405], [], [602], [256], [261], [999], [778], [879], [880, 879], [985], [262], [252], [516], [630], [31], [31], [66], [66], [202], [333], [650], [254], [428], [129], [257], [749], [79], [816], [376], [367], [344], [55], [440], [618, 813, 909, 827], [991], [74, 815], [772], [159], [712], [870], [581, 479, 511], [491], [987], [363], [336], [537], [231], [604], [862], [300], [529], [30], [948], [651], [9], [845], [673, 526, 527, 664, 508], [352, 351], [74], [234], [905, 831], [707], [441], [565], [764], [58], [291], [6], [671, 518, 535], [477], [385], [683], [44], [833], [21], [87], [], [55], [194], [713], [194], [83], [452], [830], [590], [643], [845], [613], [288, 290], [221], [362], [939], [], [882], [682], [582, 950, 951], [225], [326], [414], [158], [65], [181], [], [375], [], [710], [6], [313], [256], [673, 613, 681, 620, 526, 527, 662, 632, 508], [419], [98], [780], [805], [898], [52], [836, 837, 552, 459], [961], [97], [995], [574], [576], [304], [664, 782, 527], [559], [185], [687], [352], [81], [581], [173], [836, 837, 850], [584], [473], [896, 567], [306], [574], [], [900], [], [168], [114], [424], [], [34], [], [795], [661, 479], [], [113], [783], [], [911, 533, 539], [468], [834, 650, 851], [739], [104], [480], [781], [988], [518], [], [981], [952], [450], [446], [], [703], [370], [188], [505, 827], [844], [984], [362], [], [532], [82], [748], [497], [532], [], [677], [569], [257], [246], [349], [862], [372], [645, 733], [247], [], [907, 720], [379], [287], [65, 395], [524, 461, 728], [], [250], [847], [], [301], [851], [], [187], [844], [535], [335], [398], [323], [453], [528], [520], [948], [222], [305], [230], [157], [281, 282], [], [351], [35], [112], [673, 527, 761, 664, 633], [682], [943], [715, 524, 461], [896], [], [861], [422], [628], [217], [922], [12], [321], [777], [87], [768], [126], [284], [65], [139], [31], [497, 557], [307], [619, 818], [745], [706], [688], [915], [279], [130], [822], [609], [], [552], [567, 926], [959], [716], [300], [916], [920], [622], [145], [977, 978], [], [272], [892], [506], [125], [615], [872], [702], [272], [466], [758], [738, 580, 428], [658, 911], [923], [387], [863], [556], [202], [991], [485, 632], [886], [87], [565], [801], [162], [390], [360], [161], [144], [], [875], [771], [457], [836, 837, 785], [153], [433], [481, 482], [834], [96], [462], [21], [471], [773], [440, 455], [231], [88], [684], [572], [576], [379], [984], [484], [7], [407], [787], [231], [941], [592], [919], [581, 654], [657], [957], [881], [258], [337], [111], [999, 861], [930], [104], [542], [497], [673, 664, 526, 527, 508], [23], [251], [917], [862], [64], [526, 664, 508], [404], [160], [123], [381], [], [843, 702], [971], [289], [799], [753], [711], [303], [480], [72, 815], [215], [581], [887], [748], [453], [786], [273, 274], [], [985], [], [807], [970, 980], [2], [426], [720], [99], [628], [], [635], [781], [612, 879], [547], [51], [14], [570], [152], [308], [908, 404], [386], [763], [955], [], [157], [68], [], [194], [495], [232], [927], [495], [577], [829], [269], [956], [680], [236], [], [49], [682], [138], [], [884], [722], [361], [255], [530, 844], [273], [958], [357], [206], [741], [785], [535], [372], [391], [355], [289], [912], [493], [851], [195], [4], [622], [808], [855], [564], [394], [485], [301], [713], [763, 597], [379], [265], [183], [166, 958], [787, 501], [113], [29], [477], [240, 241, 238], [203], [907, 910, 532, 923, 924, 936, 966, 762], [130], [548, 782, 851, 598, 664, 889], [434], [939, 943], [717], [955], [76], [735], [561], [222], [636], [146], [48], [714, 539], [994], [415], [860], [856], [659], [421, 882], [114], [628, 536], [475], [], [], [683], [284], [288], [372], [515], [599], [384], [990], [288], [19], [58], [514, 836, 837, 703], [], [884], [930], [98], [486], [370], [231], [977], [840], [973], [277], [380], [676], [41], [934], [], [646], [569], [310], [971], [390], [710], [791], [597, 763], [], [378], [186], [654], [496], [431], [376], [834, 457], [588, 285], [], [691, 638, 639], [704], [82], [576], [850], [779], [353], [319], [542], [954, 950], [123], [636], [699, 541], [617], [678], [443], [58], [666], [77], [106], [460, 557, 718, 814], [734], [955], [561], [426], [947], [294], [414, 703, 841, 608], [944], [471], [111], [155], [286], [724], [893], [538], [641], [423, 424], [430], [], [680], [373], [304], [450], [58], [602], [637], [174], [800], [23], [722], [289], [756], [448, 853], [287], [683], [644], [463], [977, 978], [881], [300], [524, 461], [855], [], [111], [514, 792], [651, 700], [], [320], [485, 848], [621], [577], [405], [988], [938], [481], [880], [], [603], [33], [673, 742, 664, 526, 527, 782, 632, 508], [90], [], [714], [221], [708], [], [70], [512], [814], [281], [], [993], [218], [490], [347], [164], [957], [968, 918], [565], [595, 958], [815], [884, 406], [608, 610, 836, 837], [86], [945], [903], [671], [535], [398], [781], [239], [756], [768], [854], [455], [106], [387], [983], [383], [274], [682], [], [908, 404], [581, 479, 817], [913], [507], [771], [675, 478], [172], [672], [91], [154], [98], [948], [565], [728], [298], [268], [335], [434, 435], [223], [85, 86], [850, 791], [682], [256], [416], [292], [968], [376], [581], [], [781], [], [], [776], [810, 878], [162], [], [812], [913], [957], [970], [297], [615], [425], [2], [321], [190], [770], [602], [440], [394], [88], [144, 127], [604], [], [462], [9], [654], [155], [], [481, 482], [591], [574], [274], [329], [968, 618], [123], [905], [319], [546], [296], [623], [173, 176], [868, 923, 659, 532], [375], [452], [394], [525], [358, 359], [], [73, 77], [497], [998], [], [418], [105], [647], [437], [218], [636], [559], [300], [762], [620, 594], [], [459, 445, 638], [868, 438], [985], [906], [948, 572, 849], [189], [749], [720], [241], [692, 948, 950, 951], [269], [478], [462], [437], [45], [896, 435, 861], [635], [733], [670], [], [206], [455], [329], [678], [19], [547], [419], [], [724], [212], [852], [20], [661], [989], [919], [369], [626], [650, 818, 819, 632], [101], [422], [116], [691], [496], [907, 760], [314], [122, 123], [983], [89], [892], [494], [371], [769, 911], [414, 455, 631], [542], [], [], [254], [85], [796], [973, 991], [777], [45], [31], [380], [458], [337], [950], [770], [498], [762], [979, 972], [242, 243], [952], [], [658], [328], [901], [200], [634], [414], [292], [776], [868], [357], [742, 728], [134], [85], [553], [198], [729], [28], [88], [314], [160], [], [799], [510], [913], [707], [205], [204, 155], [553, 621, 882], [152], [349], [619, 846], [56], [667, 263], [801, 983], [653], [269], [429, 981], [42], [448], [], [853], [444], [776], [847], [870], [159], [], [494], [66], [148], [], [162], [507], [31], [670], [811], [257], [198], [863], [958], [776], [], [321], [986], [322], [940], [712], [825], [518], [501, 568], [195], [287], [340], [796], [836, 837, 775, 759, 445], [551], [162, 167], [819], [424], [489], [32], [793], [37], [236], [710, 767], [777], [591], [], [433], [459], [886], [380], [834, 982], [532], [434, 631], [878], [308, 309], [194], [586, 847], [284], [418], [33], [234], [647], [834, 570], [105], [99], [146], [122], [], [97], [429], [], [539], [996], [216], [811], [894], [77], [749], [66], [524, 461], [], [650], [668], [790], [93], [179], [313], [889], [524], [713], [489, 381], [843], [343], [272], [412], [16], [679], [777], [94], [680], [290], [279], [], [719], [], [117], [270], [693, 472, 445], [], [8], [841], [822], [523], [102], [712], [467], [343], [838], [602], [478], [86], [461, 465], [426], [626], [742], [752, 852], [786], [92], [288], [189], [908], [425], [192], [27], [257], [986], [836, 837, 793], [460], [311], [870], [115], [579], [29], [], [810, 878], [911], [2], [877], [189], [321], [347], [], [321], [854], [459], [205], [670], [911], [681, 810, 620, 508], [599], [943], [931], [985], [425], [], [191], [7], [23], [800], [876], [813], [231, 232], [831], [967, 504], [716, 573], [961], [277], [241], [900], [225], [378], [922, 441, 762], [587], [240, 241], [820], [236], [312, 311], [149], [518, 671], [896], [594], [962, 923], [949], [256, 220], [868], [], [874], [159], [936], [226], [782, 851], [849], [110], [373], [989], [543], [533, 824, 735], [181], [436], [], [488, 679], [381], [10], [245], [269], [81], [995], [968], [359], [904, 905, 831], [789], [647], [303], [23], [609], [650, 906, 834, 632], [340], [458], [861], [296], [193], [2], [581, 479, 717], [170], [768], [361], [917], [612], [901, 427], [979], [125], [90], [390], [346], [881], [98], [547], [974], [234], [188], [35], [298], [369], [683, 432], [771], [757], [436], [778, 943], [910, 659], [697], [236, 237], [500], [49], [979], [524, 461], [489, 429, 981], [653], [381], [400, 667], [434], [590], [], [904, 309], [107], [457, 869], [805], [661], [324], [217], [441, 572], [914, 780], [174], [759], [64, 55], [88], [], [605], [188, 189], [], [727], [198], [190], [497], [236], [310], [675], [42], [723], [187, 201], [944], [895], [809], [722], [143], [400, 667], [810, 620, 526, 508], [44], [221], [365], [930, 588], [346], [836, 837], [276], [925], [811, 753], [381], [40], [121], [908, 895], [732], [470], [763, 597], [816], [997, 947], [365], [122], [152], [611], [517, 733], [136], [673, 526, 527, 664, 508], [123], [819], [879], [], [13], [], [711], [845], [208], [96, 489], [110], [533], [950], [518, 671], [564], [219], [729], [156], [296], [913], [435], [195], [487], [704], [23], [109, 973], [47], [48], [748, 893], [48], [276], [487], [830], [49, 50], [307], [888], [449, 853], [], [40], [984], [272], [370], [196], [790], [489, 59], [76], [911, 658], [73], [727], [672], [851], [981], [883, 942], [], [336], [], [861], [444], [540], [927], [352], [375], [78], [902], [], [688], [546, 650, 402, 818, 819], [504, 850], [343], [480, 608, 539, 799], [166], [857], [495], [993], [], [40, 41, 44, 46], [70], [738], [], [632], [752, 852], [], [192], [179], [466], [670, 518], [732], [], [262], [45], [255], [513, 650], [676, 488], [19], [389], [223], [167], [659], [179], [346], [883], [459, 445], [98], [425], [], [354], [483], [], [279], [843], [735, 223], [783], [191], [], [820], [548, 664, 851, 632], [225, 235], [437], [162], [275], [617, 501], [312], [766], [105], [109], [697, 470], [334], [585], [], [513], [518, 429], [547], [54], [612], [574], [765], [391], [496], [831], [872], [0, 391, 758], [841], [], [922], [134], [355], [325], [523], [0], [893], [605], [], [759], [244], [933], [465], [514, 788], [49], [189], [894], [358, 359], [233], [], [501], [851], [702], [808], [507], [515, 451], [703, 578, 601], [816], [640], [390], [82], [774], [230], [599], [293], [120], [787], [830, 836, 837, 610], [948], [45], [323], [842], [19], [978], [904], [481, 482], [945], [866], [], [899], [232, 231], [756], [467], [], [757], [444], [502], [603], [18], [265], [671], [767, 692], [183], [729], [246], [959], [442], [997, 947], [473], [357], [439], [695], [197], [272, 62, 67], [450], [302, 314], [14], [207], [257], [627], [673, 526, 527, 782, 664, 508], [181], [573], [520], [257], [], [62], [698, 538], [565], [371], [], [52], [351], [94], [774, 608, 610], [995], [149], [340], [963], [975, 979], [489, 429], [], [336], [256], [790], [305], [900], [], [130], [617], [2], [299], [191], [985, 309], [656], [], [972], [489, 429, 981], [928], [980], [560], [580], [98], [789], [473], [987, 998], [651, 655], [305], [739], [614], [430], [402], [42], [659], [631], [588, 850], [722], [828], [3], [], [107], [786], [616], [993], [949], [851], [84], [922], [616], [988], [682], [], [769], [595], [914], [433], [], [370], [535], [757], [240, 241, 238], [938], [], [983], [842, 433, 445], [640], [], [834, 906, 630], [331], [920], [859], [825], [529], [875], [132], [62], [714], [571], [536, 403], [334], [], [], [37], [983], [845], [807, 561], [376], [382], [606], [560], [], [7], [], [315], [98], [673, 526, 527, 664, 508], [913], [711], [76], [], [550], [117], [224], [3], [], [197], [405], [771], [584], [623, 563], [], [317], [557], [987, 998], [566], [237], [421], [248], [0], [514], [916], [], [384], [793], [554], [593], [480], [433, 639], [24], [977], [422], [165], [316], [11], [608, 836, 837, 869, 464], [777, 490, 461, 464], [489], [385], [616], [271], [552, 619, 493, 846], [744, 657], [742], [812], [480], [286], [325], [549], [38], [299], [677], [491], [269], [528], [112], [286], [265], [440], [314], [513], [384], [608], [], [], [149], [342], [726], [403, 895], [457], [331, 478], [230, 222], [944], [362], [619], [581], [33], [783], [42], [352], [424], [444], [385, 386], [109], [802], [409, 892], [509], [923, 926], [955], [59], [], [108], [], [491], [752], [90], [835], [], [498], [174], [52], [127], [695], [449], [779], [601], [887, 884, 406], [121], [678], [44], [916], [38], [702], [937], [868], [391], [], [811], [470], [677], [619, 846], [9], [743], [809, 926], [0, 515, 853], [636], [], [337], [4], [630], [472], [910], [741], [98], [138], [545], [302], [132], [680], [870], [76], [384], [8], [], [651], [], [192], [644], [521, 659, 950], [627], [73, 74, 815], [897], [49], [632], [299], [653], [283], [670], [770, 478], [929], [715], [382], [37], [6], [542], [713], [335], [441], [577], [146], [968], [10], [856], [], [593, 541], [140], [3, 6], [940, 943, 948], [], [850, 765], [726], [70], [575], [681, 841, 620], [763, 413], [], [789], [57], [125], [128], [268], [307], [710], [701], [210], [354], [313, 315], [962, 923], [658], [724], [718], [], [288, 290], [3], [], [942], [451], [528], [398], [109, 973], [70], [421], [624], [367], [], [981, 429], [607], [64], [22], [471], [164], [6], [225], [], [908, 404], [605], [423], [], [], [18], [313], [640], [55], [642], [243], [37], [483], [800], [736], [351], [492], [843], [771], [169], [111], [895], [653], [18], [145], [], [483], [578, 885], [656], [575, 479], [130], [319], [342], [0], [], [297, 295], [608], [193], [810], [486], [], [], [840], [653], [467, 499], [1], [524], [971], [835], [288], [894], [85], [155], [763], [168], [608, 728], [174], [241], [623], [448], [0], [484], [966], [550], [], [720], [650, 818, 819], [540], [480], [946], [], [985], [707], [], [835], [325], [603], [21], [719], [122], [443], [117], [654], [876, 435], [259], [340], [847], [659], [305], [976], [185], [84], [311], [37], [771], [265, 266], [518], [149], [], [418], [363], [123], [642], [618], [559], [280], [228], [882], [558], [464, 608, 610], [666], [586], [147], [907, 671], [3], [242], [552], [640], [744, 657], [18], [629], [890], [921, 917], [768], [], [988], [613], [438], [560], [305], [236], [920], [78], [936], [769, 77], [579], [711], [768], [17], [383], [9], [628], [215], [528], [869, 742, 526, 655, 630], [346], [], [389], [275], [584], [383], [479, 817], [517], [604], [780], [677, 587], [485, 685], [319], [980], [152, 155], [567], [726], [761], [673, 681, 526, 527, 664, 508], [326], [489, 747], [744, 657], [544, 964, 926], [940], [247], [740, 477], [], [121], [699], [387], [548, 613, 664, 526, 527, 851], [606], [320, 985], [778], [911], [546, 650, 819], [460], [363], [492], [225, 419], [878], [305], [], [175], [130], [428], [776, 439], [314], [], [650, 822, 542], [471], [281], [2], [193], [475], [112], [141], [873], [350], [574], [745], [286], [655], [137], [310], [766], [974], [680], [539], [913], [342], [941], [256], [881], [396], [645], [180], [86], [], [597], [805, 205], [870], [336], [238], [789], [618], [750], [395], [422], [308], [518], [], [517], [607], [941], [749], [546, 402], [964, 987], [210], [903], [], [380], [224], [717], [693], [342, 343], [381], [910], [273], [581, 479, 436, 511], [241], [1], [632], [113], [581, 479, 817, 511], [836, 837], [], [617], [325], [334], [514, 876, 435], [636], [67], [138], [514, 515, 655, 958], [985], [960], [857], [730], [263], [643], [672], [], [964], [954], [838], [681, 620], [773], [807], [37], [417], [380], [866], [666, 924], [316], [12], [102], [], [652], [968, 504], [592], [915, 853], [834, 630], [759], [578, 894], [822], [68], [428], [681, 620], [629], [8, 792, 958], [158], [827], [789], [149], [21], [257, 258], [618], [432, 683], [900], [183], [452, 850, 610], [144], [126], [852], [795], [168], [67, 68], [292], [410], [100], [825], [660], [309], [195], [467, 125], [330], [479, 436], [387], [481], [495], [566], [480], [704], [], [777], [314], [178], [750, 564], [748], [655], [758], [492], [412], [356], [762], [548], [147], [122], [153], [9], [151], [832], [10], [12], [739], [47], [355], [914], [398], [725], [182], [284], [87], [309], [610, 758], [839], [344], [199], [573], [672], [854], [101], [783, 535], [69], [306], [], [75], [948], [610, 836, 837], [291], [948], [49, 679], [820], [400], [462], [19], [232], [267], [922], [171], [133], [744, 657], [361], [815], [259], [28], [372], [937], [234], [847], [474], [649], [779], [731], [694], [950, 951], [920], [115], [6], [777, 764], [150], [224, 235], [419], [834, 906], [587], [713], [384], [807], [615, 890], [979], [159], [593, 650], [735], [37], [244], [119], [551], [896], [861], [326], [190], [376], [], [905, 846, 721, 831], [146], [360], [485, 848, 851, 632], [594], [127], [694], [152], [48], [495], [21], [960, 470], [758], [203], [], [], [988], [152], [559], [829], [704], [646], [], [294], [809, 923], [50], [68], [937], [569], [521], [58], [768], [73, 74], [515], [694], [814, 977, 978], [], [974], [775], [727], [242], [644], [603], [75], [835], [], [345], [49], [92], [459], [137], [294], [647, 967, 606], [968], [204], [879], [831], [471], [643, 881], [112], [967], [328], [781], [338], [61], [856], [578, 982], [976, 972], [41, 44, 26], [118, 119], [939, 943], [149, 150], [898], [503], [231], [940], [615], [431], [696], [880], [596], [833], [916], [768], [], [951], [162, 230], [996], [185], [911, 658], [103], [668], [821], [495], [509], [158], [560], [876, 912, 435], [937], [705, 825], [999], [930], [67], [938], [479], [585], [756], [621], [923, 499], [690, 345], [681, 620], [773], [869], [869], [311], [56], [], [673, 742, 664, 526, 527, 782, 632, 508], [739], [969], [867], [208], [862], [804], [840], [54], [495, 725], [685], [474], [585, 655], [761], [150], [12], [451], [477], [47], [494], [857], [719], [972], [742, 713, 664, 526], [535], [766], [847], [956], [825], [794], [934, 478], [665], [840], [751], [822], [581, 479], [], [858], [401], [272], [169], [977, 978], [870], [107], [853], [785], [204], [942], [17], [177], [492], [608, 903, 841], [444], [502], [723, 549], [927], [336], [], [444], [133], [191], [95], [596], [924], [947], [816, 911], [166], [981, 429], [777], [90], [537], [680], [820], [209], [139], [953], [696], [205], [674], [926], [], [171, 237], [770, 788], [70], [452], [560], [94], [715], [597, 413, 671], [32], [574], [252], [27], [814, 693], [871], [299], [907, 440], [122], [243], [526], [186], [578, 834, 523, 906, 630], [28], [882], [846], [980], [301], [111], [495], [320], [70, 985], [778], [], [561], [780, 975], [501], [837, 670], [723], [391], [93], [243], [47], [233], [63], [650], [991], [805], [866, 730], [427], [192], [360], [15], [273], [575], [407], [916], [74], [], [354], [828], [451], [236], [758], [170], [], [825], [419], [79], [97], [779], [626], [820], [108], [932], [655], [703], [920], [503], [557], [988], [804], [937], [314], [431], [773], [138], [945], [507], [599], [], [], [896], [970, 979], [919], [840], [474], [637], [120], [489, 791], [721], [313], [380], [577], [809], [980], [698], [780], [796, 837, 836], [992], [610, 697], [938, 942, 943], [967, 504], [34], [258], [917], [187], [87], [175], [68], [60], [19], [403], [], [713], [867], [647], [140, 94], [865, 692], [], [376], [261], [787], [217], [440], [761], [102], [901], [], [763], [131], [952], [386], [128], [200], [955], [522], [268], [690, 345], [741], [43], [704, 581, 919], [770], [250], [235, 465], [], [851], [794], [926], [628], [744, 657], [546], [], [977], [130], [373], [940], [872], [258], [997, 623, 696], [119], [840], [458], [835, 855], [520], [], [812], [680], [142], [128, 144], [101], [], [329], [398], [50], [636], [335], [509], [693], [199], [242], [807], [], [610, 770, 862, 733], [592], [337], [386, 101], [110], [], [123], [22], [254], [91], [], [999], [679], [695], [769, 418, 709, 600], [44], [469], [896, 999, 861], [49], [331], [169], [887], [737], [349], [348], [220], [581, 479], [846], [608, 806], [27], [568], [], [281], [312], [101], [90], [39], [494], [44], [189], [746], [662], [63], [282], [292], [], [286], [166, 167], [621], [890], [581, 479], [196, 198], [89], [513], [281], [355], [96], [719], [417], [952], [670], [255], [928, 960], [651, 760], [551, 629, 696], [119], [688], [], [301], [992], [738], [450], [726], [501], [723], [255], [177], [703], [293], [114], [842, 529, 562], [364], [810, 508], [867, 675], [996], [344], [649], [312, 311], [525], [321], [321], [385], [325], [455], [621], [933], [146], [150], [566], [], [826], [], [584], [564], [936], [970], [342], [85], [], [], [871], [279], [108], [28], [808], [932], [232, 231], [457], [344], [740], [926], [421, 765], [249], [670, 415], [526, 400], [431], [], [967, 968, 923], [383], [363], [176], [363], [164], [235], [651], [392, 393, 108, 973], [318], [95], [615], [574], [367], [73], [512], [863], [301], [308], [766], [531], [891], [879], [166], [333], [207], [400, 667], [589], [363], [], [204], [872], [959], [231], [574], [344], [398], [132], [13], [517], [986], [836, 837], [139], [], [462], [127], [513, 875], [549, 968, 504], [22], [894], [813, 659], [549], [682], [526, 787], [770, 605], [436, 733], [288], [459, 445, 638], [228, 265], [51], [367], [561], [308], [868, 954], [928, 927], [296], [401, 881], [67], [749], [297, 295], [7], [722], [216], [681, 810, 620, 508], [645], [548], [306], [79], [662], [722], [430], [756], [638], [378], [760], [507], [844], [923], [840], [666], [900], [618, 926], [221], [], [510], [], [928], [974], [643, 692, 478], [807], [50], [950], [923], [60, 68], [861], [398], [646], [144], [146], [728], [690, 345], [204], [206], [370], [960], [983], [945], [371], [329], [67], [817, 511, 479], [968, 504], [33], [], [676], [513], [155], [373], [198], [820], [134], [770], [588], [362], [64], [847], [474], [866], [581, 817, 479], [10], [255], [512], [933], [430], [631], [108], [367], [317], [603], [999], [621], [484, 871], [915], [983], [375], [186], [195], [287], [340], [56], [975, 497], [560], [295], [987], [206], [861], [770], [969, 659], [292], [506], [188], [784], [397], [300], [815], [349, 350], [419], [174], [573], [6], [926], [897], [], [895], [678], [520, 516], [701], [899, 849], [167], [866, 730], [652, 764, 413], [644], [137], [874], [494], [557, 602, 733], [721], [636], [923, 960], [128], [817, 511], [611], [869], [31], [409], [297, 295], [997], [562], [521], [897], [499], [452], [492], [388], [], [], [449], [394], [173], [920], [285], [584], [813, 910], [891], [711], [144], [222], [813, 909, 910], [351], [633], [273], [362], [638, 639], [261], [489], [225], [196], [335], [], [148], [966], [713], [40, 44], [203], [555], [806], [630], [474], [18], [764], [651], [], [198], [512], [164], [644, 720], [112], [181], [709], [582, 953], [813], [609, 586, 413], [601], [], [], [82], [168], [453, 454], [37], [73, 74, 815], [67], [159], [], [], [324], [718], [311], [534], [976], [230, 231], [607], [476], [400, 667], [274], [765], [814], [143], [109], [806], [701], [433], [], [171], [41], [582, 950, 954], [581, 661, 479], [560], [497], [815], [341, 342], [753], [248], [102], [680], [262], [738, 633], [157], [329], [516, 850], [821], [715], [8], [], [569], [426], [946], [770], [333], [754], [839, 978], [275], [486, 819, 889], [321], [461], [123], [453, 885, 887], [827], [139], [281], [276], [241], [], [836, 837], [611, 207], [948], [696], [317], [77], [614, 879], [684], [707], [479], [618], [851], [680], [553], [138], [362], [927], [381], [47], [989], [920], [359], [793], [881], [890], [81], [608, 615, 792], [244], [652], [347], [984], [681, 620, 508], [581, 717, 479], [377], [720], [258], [194], [784], [478], [451], [660], [416], [308], [914], [532], [412], [662], [361], [688], [985], [121], [754], [863], [577], [231], [443], [200], [104], [203], [189, 191], [547], [673], [209], [621], [105], [450], [752], [810, 878], [], [300], [149], [73], [840], [946], [447], [464, 608], [234], [], [783], [739], [979, 525], [292], [971], [145], [608], [256], [926], [408], [691], [273], [360], [434, 533], [835], [326], [299], [679], [852], [59], [594], [616], [151], [308], [557], [529], [212], [729], [907, 440], [868], [], [971], [661], [], [244], [307], [974], [46], [226], [748], [563], [128], [535], [882], [830], [623], [200], [404], [995], [326], [489, 395], [288], [652, 847, 465, 413], [18], [140], [886], [278], [], [643, 759], [373], [933], [294], [830], [737], [723], [75], [140], [172], [], [234], [838, 631], [625], [690], [677], [261], [76], [161], [943], [61], [487], [851], [162, 166], [555, 475], [354], [], [987, 998], [309], [372], [686], [], [187], [95], [936], [339], [716, 765], [95], [527, 592, 782, 664, 508], [980], [627], [], [86], [311], [214], [692], [548], [690, 345], [406], [908, 404, 812], [892], [850], [431], [950, 951], [465, 652, 413], [570], [], [95], [46], [299], [984], [835], [625], [623], [589], [946], [584], [254], [753], [679], [864], [379], [755], [909], [70], [84], [904], [520, 850], [382], [122], [2], [484, 814], [639], [222, 207], [6], [733, 920], [745], [422], [797], [861], [107], [587], [], [714], [921], [811], [624, 453], [726], [], [928, 960, 954, 572], [712], [173, 253], [650, 402, 818, 819, 632], [995], [686], [962], [681, 620, 664, 508, 477], [803], [306], [620, 526, 664, 508], [982], [324], [971, 724], [920], [440], [405], [440], [954], [417], [581, 734], [974], [791], [369], [581], [423], [637], [990], [858], [791], [400, 667, 733], [818, 872, 622, 759], [9], [748], [328], [670, 518], [250], [326], [531], [673, 526, 527, 664, 508], [139], [469], [317], [352], [704], [694], [148], [74], [935], [130], [987, 998], [136], [813], [259, 265, 153], [465, 763], [38], [928, 960], [628], [248, 250], [471], [476], [41], [432], [188], [715], [801, 983], [135], [283], [607], [935, 567, 923], [], [], [840], [963], [245], [128], [297], [344], [749], [249], [], [126], [419], [], [861], [195], [357], [770, 811], [617, 823], [86], [839], [679], [992], [], [558, 402, 699], [628], [628], [673], [358], [485, 754], [346, 351], [905, 825], [487], [100], [820], [673, 664, 526, 527, 632, 508], [769], [582], [815], [999], [651], [113], [823, 836], [851], [467], [727], [151], [410], [], [], [54], [907, 532, 440, 966, 762], [2], [514], [447], [], [555], [458], [], [], [634], [277, 278], [59], [946], [336], [], [378], [237], [938], [810, 878], [95], [961], [81], [19], [629], [], [539], [538], [245], [693], [290], [339], [49, 50], [999, 281, 861], [], [32], [47], [859, 521, 651, 760], [693, 472], [639], [713], [616], [], [510], [210], [79], [], [783], [655], [236], [550], [953], [744, 657], [929], [894], [253], [851], [732], [659, 809], [], [415], [205], [188], [], [210], [717, 581, 479], [529], [185, 186], [897], [999, 700], [275], [130], [206], [293], [531], [776], [538], [360], [863], [57], [407], [148], [6], [760], [855], [167], [778], [68], [91], [801], [45], [2], [792], [35], [863], [683], [146], [163], [519], [899, 951], [205], [489], [367], [], [151, 188], [874], [546, 650, 402, 818, 819], [619, 314], [831], [377], [282], [279], [272], [161], [73], [775], [554], [602], [149], [782, 851], [373], [297], [384], [535], [785], [347], [175], [853], [14], [472], [275], [608, 806, 841, 831], [658], [609], [888], [48], [322], [284], [457], [969, 470], [383], [955], [259], [603], [705], [2], [307], [531], [229], [487], [760], [247], [286], [862], [483], [685], [721], [624], [66], [674], [137], [517], [22], [739], [519], [274], [744, 657], [], [980, 975], [307], [600], [868, 968, 504], [882], [996], [953], [41], [770], [270, 279], [281, 282], [], [611], [373], [506], [882], [428], [516], [448], [138], [733], [743], [406], [679], [708, 975], [384], [859], [213], [888], [502], [150], [646], [], [981], [], [651, 813, 567, 827], [939, 943], [20], [902], [846], [64], [573], [651, 567, 760], [605], [638, 639], [779], [711], [458, 708], [162], [778], [75], [259], [872], [552, 515], [659, 937], [199], [659], [184], [61, 62], [518, 671], [123], [110], [308], [635], [993], [912], [613], [385, 386], [583], [889], [849, 505], [125], [662], [147], [334], [841], [244], [950], [842, 977, 978], [], [358], [193], [741, 687, 884, 406], [312], [153], [224], [349], [947], [203], [264], [185], [454], [693], [79], [421], [144], [968], [45], [258], [625], [200], [985, 301], [610], [579], [913], [867], [579, 582], [931], [259], [564], [900], [987, 998], [864], [81], [813], [528], [626], [770], [883], [257], [732], [54], [338], [836, 837, 841], [792, 428], [272], [413], [804], [922], [546, 650, 402, 819, 541], [], [441], [852], [316], [277], [457], [605], [658], [40], [515], [], [755], [25], [992], [809, 925], [887], [898, 680], [877], [16], [506], [230, 231], [796], [615], [324], [918], [333], [374], [392], [545], [760], [640], [359], [], [763, 597], [112], [190], [294], [791], [496], [64], [], [225], [646], [129], [908, 895], [], [486], [167], [651], [905], [736], [594], [], [206], [294], [], [145], [593], [72], [], [13], [138], [834], [326], [770], [222], [155], [375], [560, 768], [433], [907], [248, 250], [794], [15], [824], [810, 878], [549], [], [295], [710, 809], [481, 482], [176], [206], [981], [83], [499, 923], [434], [775], [977, 842, 978], [866], [43], [], [291], [951], [531], [848], [840], [370], [393], [], [767], [818], [314], [768], [806], [347], [263, 151], [493], [496], [268], [173], [195], [786], [], [873], [816], [447], [910, 567], [22], [758], [696], [190], [207], [573], [2], [377], [546], [986], [], [200], [868, 813], [857], [766], [972, 976], [], [], [281], [622, 759], [966], [684], [546, 819], [433], [85], [807], [269], [38], [426], [49, 50], [819], [942], [585], [743], [207], [206], [642], [816], [519], [316], [762, 532], [205], [], [28], [845], [539, 741], [0], [847], [99], [510], [836, 837], [866], [], [], [15], [500], [115], [719], [903], [123], [703, 463, 738], [659], [], [29], [872], [767], [372], [195], [191], [252], [546, 819], [659], [284], [487], [28], [165], [726], [355], [272], [879], [271], [600], [], [241], [8, 912], [958], [472], [105], [810, 878], [313], [562], [870], [259], [744, 657, 812], [392], [814], [608, 770], [888], [98], [333], [725], [233], [447], [139], [717], [82], [540], [77], [329], [333], [244], [517, 625], [325], [639], [789], [115], [536], [965], [840, 462], [389], [284], [882], [896], [588, 948], [933], [711], [360], [756], [508], [95], [636], [276], [420], [962, 987, 923], [207], [796], [660, 557], [32, 31], [923], [646], [980], [82], [958], [85], [244], [182], [294], [804], [697], [366], [479, 535], [999], [824], [], [724], [92], [232], [877], [946], [], [496], [812], [985], [813, 567], [662], [883], [100], [359], [444], [771], [426], [917], [811], [775, 977, 978], [983], [871], [322], [187], [940], [8], [343], [207], [859], [630], [155], [306], [322], [681, 620, 526], [69], [59], [336], [393, 327], [93], [174], [170], [68], [206], [85], [477], [185], [112], [934], [463], [730], [650, 819, 822, 632, 542], [562], [870], [977], [928, 923], [451], [168], [898, 655], [14], [89, 414], [715], [775], [734], [765], [38], [681, 620, 526, 846, 632, 508], [396], [951], [815], [], [149], [597], [594], [546, 650, 819], [354], [266, 267], [488, 695, 508], [363], [455], [247], [813, 910], [55], [471], [249], [536], [2, 3, 973], [503], [288], [795, 970], [673, 681, 620, 526, 527, 664, 508], [85], [47], [160], [722], [452], [655], [201], [788, 502], [648], [643], [477], [203], [42], [210], [23], [], [342], [741], [77], [], [113], [704], [501], [721, 697], [534, 729], [382], [89], [], [161], [303], [579, 881], [38], [137], [908, 404], [661], [496], [333], [217], [924], [899, 868, 968, 809, 463, 659], [609, 465, 413], [728], [50], [937], [369], [630], [778], [153], [968, 504], [], [459], [581, 479], [578], [304, 301], [899], [22], [863], [384], [308], [619, 846], [471], [], [334], [679], [382], [87], [460, 718, 975, 977, 978], [499], [842, 977, 978], [842], [649], [26], [761], [738, 825], [460], [157], [719], [864], [585], [634], [88], [618, 809, 926], [161], [933, 923], [195], [950], [243], [395], [879], [285], [991], [333], [934], [648, 760], [992], [907, 440], [578], [250], [176], [570], [80], [879], [23], [51], [700, 999], [290], [91], [208], [596], [814], [764], [77], [480, 785, 731, 414], [366], [411], [330], [836, 837, 774, 655], [557], [56], [240], [119], [695], [586], [669], [331], [361], [526, 527, 664, 508], [671], [37], [382], [105], [458], [768], [658], [518, 671], [281], [], [], [72], [22], [921], [132], [369], [547], [41, 48], [], [248, 250], [783], [282], [350], [608, 792], [584], [], [933], [], [385, 386, 101], [], [], [83], [921, 917], [], [369], [972], [92], [], [623, 923], [382], [738, 580], [593], [], [347], [647, 968, 532], [902], [514, 515, 476, 765], [476], [308], [390], [954], [790, 952, 954], [957], [616, 972], [935], [138], [224], [37], [320], [381], [425], [216], [923, 572], [441, 572], [891], [627], [715, 652, 764], [791], [187], [980], [834, 869, 906], [36], [581], [145], [989], [818], [427], [728], [216], [888], [131], [903], [427, 756], [261], [36], [544, 909], [], [], [447], [418], [537], [], [337], [293], [917], [437], [247], [489, 275, 276], [923], [805], [512], [346], [847], [871], [82], [190], [465, 597, 728], [892, 721], [347], [682], [641], [858], [5, 6], [652, 465, 413], [944], [864], [562], [295], [300], [439], [888], [135], [40], [218], [548], [763], [168, 159], [82], [0], [88], [900], [417], [673], [984], [437], [400], [479], [931], [257], [558], [400], [511, 479], [287], [935, 469, 923], [695], [385, 386], [294], [633], [882], [539], [854], [151], [52], [641, 808], [716], [329], [226], [823], [3], [953], [141], [], [648], [], [332], [907], [160], [308], [758], [522], [219], [806], [842], [602], [22], [258], [734], [520], [148], [252], [248], [348], [17], [916], [793], [659], [617, 823], [], [650, 401, 819], [178], [32], [944], [], [757], [634], [23], [15], [239], [471], [697], [419], [151, 158], [941], [746], [24], [553], [481, 482], [773], [700], [18], [391], [757], [480], [680], [42], [66, 68], [873], [], [117], [], [232], [], [331], [], [274], [129], [804], [454], [538], [654], [], [411], [797], [259], [236], [443], [657], [211], [55], [936], [25], [140], [643], [836, 837], [22], [936], [809, 925], [453], [997, 947], [410], [697], [], [], [617, 438], [118], [995], [387], [801], [777], [989], [562], [472], [348], [513], [720], [755], [215], [939], [865], [], [893], [761], [582], [277], [113], [110], [772], [794], [709], [521, 947], [739], [347], [656], [83], [898], [164], [490], [], [684], [304], [72], [839], [552], [472, 693], [86], [422], [977, 978], [947], [490], [910], [], [346], [39, 47], [312, 311], [703], [270], [600], [720], [15], [890], [986], [563], [447], [976], [839], [440], [217], [404], [461], [153], [863], [349], [902], [898], [836, 837, 617, 789], [132], [268], [490], [], [866, 596], [486, 559], [541], [716], [28], [], [198], [498], [5], [768], [546], [188], [920, 475], [260], [634], [772], [776, 819], [559], [928, 927], [747], [728], [579, 881], [567], [241], [695], [78], [161], [115], [157], [561], [183], [164], [413], [27], [116], [489, 815], [381], [], [610, 430], [251], [462], [], [738], [605], [573], [605], [702], [13], [291], [22], [343], [991], [], [123], [198], [23], [276], [40], [679], [230], [104], [840], [70], [209], [332], [926], [947], [628], [291], [222, 257], [652, 413], [292], [975], [289], [798], [641], [673, 418, 526, 527, 664, 508], [570], [998, 939, 943], [380], [209], [173], [189], [299], [630], [181], [245], [], [291], [829], [956], [176], [575], [324], [4, 391], [915], [464, 597, 763], [83], [740], [52], [713], [205], [504, 441, 572], [], [], [138], [681, 810, 620], [761], [777], [109, 973], [], [541], [223], [613], [180], [512, 623], [540], [154], [], [], [217], [594], [903], [], [465, 796], [964], [923], [], [], [896, 804, 631], [398], [523], [550], [917], [840], [574], [608, 796, 806, 478], [704], [572, 966], [357, 958], [597], [399], [84], [], [364], [40], [429], [436], [920], [163], [680], [2], [557], [190], [156], [722], [571], [894], [439], [756], [871], [198], [564], [438], [373], [149], [232], [391], [269], [933], [721], [], [151], [151], [248, 539], [814], [866, 595], [655], [421], [698], [454], [568], [865], [267], [183], [553, 493], [281], [], [237], [721], [477, 587, 784], [333], [663], [382], [296], [652, 764, 413], [951], [987, 998], [270], [996], [952], [620, 662], [929], [369], [79], [118], [], [359], [697], [761], [29], [617, 438], [37], [222], [233], [106], [76], [36], [53], [310], [608, 515, 610, 841], [283], [], [795], [19], [290], [326], [], [36], [290], [118], [293], [506], [989], [996], [366], [], [], [746], [391], [834, 655], [73, 77], [782, 664], [255], [85], [953], [316], [123], [919, 860], [347], [178], [274], [84], [], [418, 709, 748, 563], [323], [632], [724, 536], [841], [402], [638, 639], [25], [345, 730], [588], [874], [], [363], [130], [532], [673, 526, 527, 782, 664, 508], [707], [401], [576], [413], [0], [603], [674, 630], [197], [828], [978, 437], [], [498], [298], [325], [], [413, 439], [863], [97], [680], [89, 284, 799], [97], [709], [573], [368], [805], [284], [683], [222], [411, 828], [659], [86], [633], [642], [792], [459], [155, 204], [], [756], [798], [673], [], [338], [296], [178], [462], [843], [187, 201], [218], [117], [169], [711], [], [389, 391], [634], [713], [330], [553], [772, 869, 488, 464], [122], [523], [625], [54], [402], [889], [692], [29], [356, 359], [578, 515, 689, 982, 601], [97], [213], [98], [729], [], [459, 445], [20], [358], [], [950], [485], [169], [240], [548], [891], [650, 819], [125], [], [184], [588], [476], [666], [140], [754], [559], [937], [385], [913], [643, 906], [], [718], [516, 669], [673, 504, 508], [50], [596], [866, 803], [135], [496], [667], [486], [211], [18], [387], [563], [931], [142], [767], [310], [910, 411], [448, 489], [245], [64, 55], [439], [64], [157], [240], [578], [922], [288], [842, 523, 433, 795], [808], [108], [934], [861], [209], [517, 540], [112], [769], [423], [652], [], [187, 201], [839], [22], [130], [289], [746], [780], [447], [995], [780, 914], [888], [179, 180], [], [27], [], [373], [879], [536], [582, 936, 939, 943], [146], [518], [659, 949, 950], [218], [], [475], [684], [820], [75], [], [960, 968], [49], [650], [173], [], [565], [405], [690], [345], [652, 822, 541, 542], [889], [343], [], [944], [300], [784], [780, 724], [768], [514], [], [35], [741], [], [983], [83], [906], [518], [229], [487, 590], [218], [864], [91], [147], [617, 823], [237], [920], [866], [469], [746], [581], [892], [], [911], [962], [89], [], [154], [487], [714], [378], [627], [515, 348], [247], [343], [18], [529], [], [142], [739], [332], [491], [517], [926], [220], [930], [926], [977, 978], [581, 479, 817, 511], [974, 468], [912, 977, 978], [606], [577], [40], [464], [488, 600], [], [784], [492], [996], [589], [439], [650, 632], [438], [588, 790], [251], [780], [130], [319], [521], [543], [357], [137], [622, 759], [399], [689], [], [240, 238], [608, 681, 620], [752], [39], [116], [263], [842], [522], [684], [], [], [665], [713], [], [], [791, 582], [850], [336], [823], [971], [588, 813, 910], [9], [227], [945], [307], [194], [578, 689, 601, 831], [379], [726], [695], [800], [831], [802], [131], [71], [686], [485, 848], [352], [501], [810, 878, 658], [185], [765], [18], [496], [209], [437], [698, 483], [964], [103], [276], [388], [243], [841, 911], [578, 982], [228], [799], [773], [741], [575], [15], [424, 919], [581, 479], [], [296], [203], [586], [62], [888], [227], [695], [771], [40, 46], [335, 412], [486], [637, 879], [8], [42], [854], [136], [39, 47], [325], [299], [609], [231], [577], [233], [814], [382], [978], [472, 693], [524, 461], [], [355], [], [979, 821], [537], [249, 250], [], [965], [398], [553], [850], [811], [804], [83], [613], [680], [94], [803, 586], [56], [608, 610, 841], [407], [151], [18], [615], [489, 818], [889, 831], [], [390], [741], [712, 126], [821], [471], [63], [578, 982], [983], [762], [275], [396], [459, 445], [], [172, 173], [148], [834, 522], [472], [], [578, 689, 601], [901], [539], [378], [239], [], [], [894], [897, 534, 729], [532], [896], [522], [459], [439], [344], [691], [372], [875], [513, 776, 875], [5, 6], [], [314], [198], [14], [42], [429], [], [555], [832], [986], [591], [359], [311], [446], [349], [222], [518, 671], [602], [290], [399], [682], [413], [750], [351], [568], [792], [], [581, 479], [389], [842, 977, 978], [31], [577], [996], [592], [821], [117], [140, 142], [], [431], [81], [759], [618, 813, 910], [523], [914, 780], [66, 54], [379], [672], [293], [567], [29], [673, 664, 526, 527, 782, 508], [], [189], [731], [745], [899], [156], [240, 241], [], [17], [77, 815], [806], [76], [596], [580, 807], [581, 717], [905], [117], [865], [897], [893], [431], [928, 923], [588], [454], [490, 524, 787], [497, 406], [563], [783], [646], [82], [44], [416], [339], [669], [8], [220], [722], [255], [494], [230], [826, 488], [273], [181], [349], [391], [995], [265, 266], [283], [57], [810, 651, 508], [518], [170], [913], [436], [], [464], [], [619, 846], [203], [138], [421], [], [], [564], [213], [736], [125], [789], [], [744], [899], [575], [483], [516, 520, 721], [470], [82], [489], [258], [330], [53], [291], [303], [730], [52], [229], [75], [854], [330], [702], [781], [325], [612], [515, 841], [211], [727], [668], [818], [775], [831], [649], [4], [107], [420], [900], [], [751, 479], [257], [750], [894], [949], [628], [410], [533], [874], [745], [293], [224], [896, 804, 711, 585, 631], [28], [422], [810], [619, 846], [254], [967], [583], [541], [618], [281], [697], [638, 639], [479], [316], [582, 936], [452], [470], [738], [172], [868], [206], [], [417], [364], [131], [464], [514], [104], [667], [325], [864], [664], [110], [539], [69], [747], [941], [503], [], [565], [338], [720], [215], [409, 892], [989], [606], [871, 913], [], [860], [421], [683], [144], [957], [596], [211, 243], [218], [], [904, 281, 282], [289], [376], [569], [417, 557, 562], [258], [505], [673, 526, 527, 782, 664, 508], [530], [449], [], [860], [865, 692], [946], [694], [656], [353], [984], [258, 222], [465], [], [636], [867], [94], [], [403], [379], [455], [206], [722], [230], [303], [922], [705, 547], [600, 517], [334], [392, 109], [627], [270], [159], [711], [101], [884], [404], [492], [335], [288], [], [699], [245], [650, 819], [617], [233], [316], [153], [778], [624], [905], [728], [], [143], [727], [640], [331], [541], [27], [46, 47], [987, 998], [322], [633], [879], [847], [892], [108], [78], [669], [191], [414], [], [400, 667], [845], [88], [], [533], [522], [683], [395], [398, 529], [343], [131], [347], [321], [503], [199, 197], [182], [281], [711], [509], [172, 173], [41], [349], [685], [86], [270], [281], [156], [616], [979], [69], [967], [732], [578], [614, 879], [867], [923], [753], [168, 159], [565], [114], [870], [14], [313], [298], [903, 584], [], [515, 910], [269], [927], [459], [793], [198], [213], [366], [544], [546], [930], [649], [77], [409], [469], [614], [66], [965], [537], [454], [179], [83], [350], [179], [771], [517], [581, 479, 817, 511], [390], [766], [], [467], [519], [896, 281], [357], [114], [2, 3], [403], [843], [978], [474], [40], [33], [151], [269], [543], [293], [770, 774, 655], [362], [772, 679], [250], [425], [723], [530], [193], [], [955], [561], [581, 489, 479], [], [40], [777, 623, 542], [961], [917], [819], [664], [], [672], [758], [566], [53], [910], [764], [574], [207], [], [946], [756], [242], [809, 910, 925], [419], [918], [727], [903], [634], [], [615, 652, 465, 413], [], [628], [168, 159], [877], [109], [811], [579], [638, 639], [594], [888], [753], [93], [], [884], [234], [509], [224], [450], [373], [152], [701], [632], [344], [849], [42], [843], [602], [33, 973], [909, 659, 951], [409, 892], [987], [400], [], [455], [492, 857], [618, 567], [644], [494, 442], [430], [646], [514], [962], [306], [868, 968, 923], [44], [872], [765], [707], [836, 837, 842], [175], [554], [], [995], [], [293], [], [448], [57], [920], [], [213, 852], [999], [247, 151], [372], [471], [386, 101], [441], [924], [144], [58], [368], [767], [480], [258], [861], [], [189], [431], [376], [816], [315], [346], [397], [40], [577], [], [424, 423], [], [402, 889], [692], [183], [273], [805], [877], [746], [427], [581], [793], [961, 910, 659], [896], [372], [599], [840, 462], [515, 824], [744, 657], [986], [333], [453, 831], [674], [236, 852], [764], [131], [], [386], [814], [976], [547], [322], [323], [968], [466], [713], [481, 482], [944], [237], [708], [535], [295], [559], [481], [532], [], [358, 359], [151], [258], [748, 600], [659], [624, 453, 454], [999], [788], [666], [103], [855], [57], [472], [275], [83], [841], [], [344], [10], [947], [97], [685], [250], [995], [410], [429, 527, 916, 664], [], [84], [581, 436, 479], [], [156], [459], [98], [300], [8], [751], [614], [644], [53], [402], [616], [759], [], [], [427], [749], [91], [229], [], [91], [710], [], [220], [318], [6], [468], [836, 837], [771], [587], [282], [], [669], [258], [127], [631], [857], [679, 616], [737, 898, 886], [], [48], [772], [108], [789], [433, 842], [123], [481, 482], [161, 195], [609], [562], [979], [644], [908], [968], [733, 557], [897], [572], [893], [355], [578, 654, 982], [813], [227], [82], [403], [462], [], [], [271], [655], [765], [750], [], [32], [468, 479], [109], [51], [579, 881], [], [701], [928, 923, 960], [403], [219], [232], [402], [64, 55], [551], [], [], [554], [626], [863], [849], [0], [870], [754, 507], [407], [747], [15], [939], [330], [233], [905, 619], [982], [315, 311], [802], [987, 926, 998], [252], [489], [154], [498], [346], [566], [917, 794], [85], [287], [456], [], [719], [870], [638, 639], [738], [775, 819, 842, 602], [901], [730], [868, 532, 923, 572], [835], [826], [766], [117], [274], [9], [831], [479, 436], [49], [770, 518, 414, 842, 978], [], [84], [611], [], [683], [215], [474], [340], [], [224], [839], [544, 521, 926], [68], [836, 837, 869], [924], [672], [219], [957], [129], [720], [626], [222], [107], [190], [853], [654], [715], [389, 390, 395], [311], [928], [822], [909, 544, 336, 469], [954], [309], [763, 764], [573], [886], [325], [], [86], [192], [36], [366], [926], [131], [49], [545], [42], [210], [710], [11], [338], [657, 812], [491], [484], [872], [82], [596], [603], [599], [102], [333], [364], [365], [366], [71], [16], [37], [890], [832], [872], [175], [647], [], [482], [756], [393], [375], [496], [943], [616], [562], [690], [616], [], [562], [373], [743], [40, 46], [88], [819, 822], [33], [880], [157], [], [848], [831], [], [941], [866], [53], [140], [134], [830], [367], [922], [691, 692], [673, 681, 620, 526, 782, 664], [247], [827], [736], [546], [743], [260], [770, 774, 655], [160], [270], [171], [52], [540], [], [372], [468], [], [647], [875], [896, 553, 493, 894], [63], [330], [63], [338], [136], [802], [450], [514], [582], [192], [533], [189], [952], [855], [755], [48], [28], [396], [302], [100], [345, 475], [], [], [], [193], [57], [8, 7], [773], [481], [391], [723], [357], [952], [40], [975, 976], [], [], [495, 894], [440], [260], [712], [880], [518], [689], [147], [433], [110], [136], [479], [177], [121], [383], [970, 976, 979], [64], [554], [554], [845], [829], [874, 654], [963], [551], [], [418], [270], [328], [495], [150], [449], [191], [801], [500], [302], [193], [], [991], [211], [540], [315], [335], [327], [449], [388], [114], [672], [133], [538, 668], [776, 513], [625], [949], [412], [992], [793], [], [41], [814], [881], [186], [246], [497], [346], [662], [264], [384], [703], [259], [874], [467], [33], [601], [644], [528], [228], [944], [430], [311], [], [68], [87], [937], [364], [512], [748], [354], [283], [268], [512], [339], [918], [582], [573], [781], [171], [419, 823, 845], [392], [592], [901], [6], [19], [778], [18], [262, 243], [562], [], [159], [786], [], [835], [], [750], [162], [602], [585], [830], [701], [960], [497], [698], [736], [275], [909], [686], [999, 700], [824], [849], [296], [294], [], [116], [320], [], [755], [379], [71], [576], [962, 532, 923], [59], [718], [254], [882], [983], [], [463], [951], [993], [972, 500], [263], [738, 939], [92], [901], [671], [303], [842], [65], [29], [256], [49], [632], [883], [393], [652, 691, 895], [852], [93], [805], [53], [430], [626], [123], [892], [184, 170], [167], [209], [296], [987], [646], [320], [], [273], [832], [414], [34], [729], [340], [38], [136], [501], [335], [480], [103], [321], [849], [241], [726], [847], [], [836, 837], [240], [3], [76], [848], [651], [615, 465], [574], [656], [57], [594], [915], [362], [608, 515], [272], [457], [822], [888], [297], [640], [449], [425, 716], [517], [734], [440], [862], [], [12], [792], [738, 723], [773], [621, 412], [571], [], [479, 817], [1, 728], [], [22], [699], [857], [64], [66, 68], [6], [739], [], [619], [559], [240, 241, 238], [244], [921], [671], [166], [930], [568], [860], [821], [994], [255], [352], [646], [783], [786], [319], [358], [591], [923], [250], [23], [207, 692], [934], [269], [172], [834, 451, 457], [436], [692], [224], [451], [738, 580], [223], [815], [], [678], [443], [671], [946], [361], [432], [278], [], [349], [309], [269], [435, 876], [321], [738, 716], [315], [868, 968, 849, 505, 828], [], [386], [159], [361], [983], [874], [980], [849], [103], [236], [669], [201], [583], [941], [681, 810, 620], [505], [393, 108, 973], [671], [988], [538, 727], [277, 278], [471], [265, 266], [634], [745], [696], [578, 971, 982], [607], [582, 950], [437], [644], [543], [974], [], [894], [274], [126], [261], [947], [401], [100], [876, 445], [754, 632], [356], [289], [106], [676, 570], [569], [486], [385, 865], [650, 819], [973, 123], [809], [768], [227], [537], [444], [551], [760], [], [61], [908, 404, 812], [576], [734], [104], [], [19], [110], [], [83], [827, 534, 411], [350], [424, 423], [348], [973], [744, 657, 812], [382], [953], [911, 474, 894, 735], [324], [567], [], [420], [155], [], [841], [834, 906, 907], [505], [804], [286], [967, 441], [162], [128], [239], [6], [894], [123], [903], [478], [729], [], [99], [517], [677, 587], [2], [559], [437], [6], [651], [660, 757], [105], [872], [532], [119], [862], [583], [155], [316], [162, 882], [549], [809, 925], [235, 434], [440, 441, 455], [597], [86], [259], [450], [120], [8], [456, 872], [290], [723], [959], [756], [713], [631], [243], [962], [77], [], [714], [399], [940], [634], [986], [530], [98], [196], [360], [935], [148], [917], [606], [139], [973], [370], [446], [734], [529], [704, 444], [], [250], [41], [897], [193], [401], [821], [195, 811], [946], [819], [302], [251], [681, 620, 761, 508], [636], [273], [388], [911], [292], [546, 650, 818, 819], [993], [897], [487, 635], [384], [218, 215], [309], [258], [859], [221], [202], [462], [70], [997], [514, 655, 824], [463], [467], [732], [492], [368], [], [329, 126], [685], [408], [186], [700, 999], [647], [728, 703], [672], [492], [482], [928, 659, 949], [626], [715], [339], [581, 717, 479], [328], [431], [824], [247], [488], [279], [903, 650, 819, 851], [101], [692], [553], [35], [89], [888], [328], [580], [481], [278], [748], [40], [], [], [814], [722], [123], [410], [90], [874], [538, 727], [866, 595], [901], [276], [999, 700, 861], [508], [296], [654], [640], [226, 170], [651], [], [102], [433], [659], [], [614], [40], [278], [311], [852], [740], [135], [934], [139], [], [513, 875, 566], [382], [117], [734], [984], [592], [], [896, 804, 648, 861], [792], [353], [579], [947], [369], [879], [941, 942], [543], [521], [], [831], [890], [976], [840, 882], [763], [481], [273], [864], [221], [322], [705, 850], [521, 809, 827, 926], [284, 453], [993], [912], [728], [343], [], [575], [], [178], [400, 667], [670], [431], [], [955], [148], [329, 126], [836, 837, 869], [757], [68, 58], [775], [625], [129], [331], [138], [661], [969], [], [320], [621], [893], [603], [223], [505], [773], [], [913], [573, 479], [314, 126], [459, 445], [], [693], [805], [360], [913], [171], [615], [40], [785], [868, 968, 504], [72], [388], [23], [417], [793], [581, 479], [992], [853], [], [882], [317], [834, 630], [428], [503], [], [833], [569], [292], [309, 599], [938], [940], [695], [786], [104], [218], [314], [777], [398], [773], [498], [269], [], [175], [438, 728], [963], [475], [857], [912, 348], [], [138], [644], [851], [929, 452], [135], [116], [76], [31], [192], [271], [412], [790], [711], [205], [366], [995], [311], [627], [987], [581, 479], [], [321], [], [715], [], [357], [214], [840], [247], [], [191], [714, 679], [928], [140], [80], [232, 247], [606], [610], [486], [800], [568], [747], [527, 916, 664, 508], [288], [255], [915], [782, 664], [453, 454, 624], [463], [983], [583], [324], [652], [145], [385], [728, 636], [695], [295], [692], [209], [526, 799], [], [260], [204], [114], [228], [675], [782, 664], [347], [34], [780], [397], [314], [], [78], [114], [637], [865], [727], [], [417], [], [491], [891], [], [810, 878], [530], [262], [607], [531, 692], [391], [883], [104], [614], [453], [907, 440], [916], [254], [966], [], [502], [9], [659], [107], [953], [105], [749], [79], [376], [601], [861], [690], [942], [140], [638, 639], [326], [433], [515, 402], [342], [484], [915], [366], [883], [716], [983], [660], [673, 742, 526, 527, 782, 664, 508], [797], [343], [894], [558], [970], [548, 851, 598, 632], [830], [481], [945], [546, 402, 819], [991], [927], [521], [309], [133], [414], [327], [413], [70], [352], [160], [462], [918], [673, 526, 527, 664, 508], [711], [264], [274], [424, 423], [653], [335], [754], [465, 413], [], [544], [747], [893], [463], [869, 885, 568, 894], [433, 691, 983, 570], [594], [706], [647, 332], [441], [], [836, 837], [530], [84], [943], [829], [], [760], [], [396], [984], [629], [180], [229], [172], [520], [], [39], [207], [281], [356, 358], [297], [841], [106], [776], [], [225], [312, 311], [343], [405], [362], [993], [770, 842, 610], [870], [698], [220], [983], [424], [400, 667], [434], [748], [], [376], [203], [929], [887], [86], [], [47], [856], [575], [948], [751], [700, 999], [], [205], [284], [121], [442], [235], [622, 759], [541, 542], [537], [681, 620, 526, 527, 782, 664, 508], [991], [525], [926], [87], [72], [], [588, 790], [788], [375], [547], [739], [604], [777], [208], [], [236], [65], [132], [159], [], [276], [997], [129], [647], [637], [], [21], [489, 429, 981], [849], [868, 923], [208], [284], [960, 827], [773], [652, 413], [484, 871], [138], [613], [149], [358, 359], [454], [364], [429], [653], [677, 783], [308], [654], [850], [896], [146], [914], [45], [27], [690], [918], [734], [855], [423], [288], [33], [66, 68], [958], [481, 605], [581, 479], [990], [789], [948], [376], [279], [649], [588], [136], [244], [191], [472], [304], [], [320, 319], [629], [83], [265], [933], [259], [854], [736], [799], [284], [505], [172], [299], [543], [], [456, 489], [256], [], [205], [72], [903], [188], [85], [71], [545], [612], [982], [616], [475, 815], [505], [894], [815, 126], [776], [], [117], [180], [76], [], [858], [518], [622], [745, 851, 598], [946, 309], [445], [92], [127], [294], [472, 693], [570], [808, 879], [], [547], [595], [886], [551], [31], [966, 907, 572], [385], [214], [97], [491], [71], [652], [182], [505, 827], [152], [548], [661], [896, 943], [170], [165, 237], [671], [993], [258], [619], [619, 846, 504], [796], [798], [40], [649], [244], [912], [326], [280], [434], [36], [234], [958], [541, 542], [960], [659], [791], [908], [6, 976], [714], [150], [242], [187], [238], [987, 998, 463], [608, 652, 465, 597, 413], [650, 541, 819, 822], [395], [677], [329], [82], [404], [997, 947], [471], [58], [44], [33], [931], [333], [304], [974], [], [559], [280], [905, 721, 831], [555], [384], [82], [860], [592], [], [642], [995], [570], [923], [315], [762, 923], [513, 875], [868, 923, 521, 809, 926], [257], [801], [211], [580], [798], [227], [359], [746], [566], [660, 977, 978], [489], [247], [555, 247], [453], [565], [531], [762], [652], [43], [253], [443], [544, 469], [323], [702], [319], [485, 632], [820], [751, 573, 479], [972], [339], [15], [116], [111], [31], [989], [159], [973], [128], [525], [692, 790], [66], [565], [30], [212], [638], [], [456], [485, 851, 632], [225], [253], [807], [607], [467], [6], [641], [881, 579], [297], [492], [554], [418], [860], [379], [537], [67], [169], [673, 681, 620, 905, 526, 508], [803], [853], [751, 479], [757], [642], [], [194, 175], [984, 985], [562], [915], [675], [937], [71], [61], [288], [592], [711], [106], [59], [], [477], [873], [860], [79], [496], [435], [], [], [185], [331], [493], [232], [637], [31], [172], [469, 567, 505], [659], [670], [657], [193], [944], [559], [937, 941], [460, 437], [924], [326], [589], [228], [682], [907, 440], [764, 413], [7], [168], [570], [673, 681, 810, 620], [167], [12], [229], [907], [89], [896, 285], [514], [621], [], [391, 758], [], [670, 518], [671], [185], [599], [343], [326], [624, 884], [594], [306], [965], [581], [210], [410], [90], [485], [261], [584], [798], [141], [280], [51], [774], [320], [358, 360], [260], [456], [49], [810, 508], [551, 629, 631], [621], [295], [946], [], [821, 444], [606], [331], [711], [591], [], [333], [227], [329], [37], [948], [906, 834, 501, 630], [388], [789], [638, 639], [636], [203, 186], [246], [638, 639], [494], [110], [136], [154], [626], [866, 661], [484], [620, 681], [85], [425], [151], [94], [24], [380], [594], [590], [144], [488, 778, 600], [463], [49, 50], [193], [132], [201], [137, 975], [431, 281], [867, 517, 536, 510], [34], [], [218], [84], [97], [933], [320, 319], [599], [709], [698], [818], [255], [814], [905], [211], [711], [782, 851], [409, 892], [], [809, 923], [585], [588, 790], [554], [970], [34], [117], [673, 526, 527, 664, 508], [470], [101], [96], [371, 382], [228], [335], [414], [327], [126], [265], [], [618], [720], [803], [357], [582], [], [], [713], [851], [479], [431], [548], [721, 831], [330], [842, 764], [591], [236], [589], [505], [106], [228], [503], [713], [331], [651], [222], [149], [], [284], [810, 878], [707], [150], [467], [547], [850], [964], [586], [630], [180], [708], [342], [66], [], [874], [558], [], [195], [950], [20], [524, 461], [645], [514, 655], [169], [504], [612], [733], [965], [157], [382], [212, 251], [40, 46], [476], [166], [578, 982], [394], [187], [449, 536], [19], [752, 852], [809], [825], [447], [745], [208], [545], [407], [670, 518], [802], [941], [140], [200], [267], [73, 74], [985], [593], [], [387], [238], [826], [741, 697], [721], [642], [167], [930], [915], [585], [573], [546], [310], [927], [538, 668], [71], [887], [810, 878], [551, 629], [351], [44], [623, 784], [738], [160], [561], [164], [461], [136], [284], [86], [], [93], [], [835], [755], [57], [537], [243, 254], [923, 959], [248, 250], [], [], [], [974], [295], [90], [975, 698], [979], [719], [900], [710], [302], [449, 536, 557, 733], [383], [434, 435], [579], [276], [773], [280], [649], [338], [824, 735], [865], [431, 850], [144], [834, 435], [305], [978], [211], [538], [406], [676], [677], [389], [765], [819], [564], [992], [718], [792], [347], [870], [874], [371], [267], [795], [421, 981], [758], [33], [856], [60], [382], [985], [500], [605], [979], [770], [375], [6], [666], [884], [300], [702], [274], [789], [877], [479], [], [673, 681, 620, 526], [643], [340], [732], [842, 433, 638, 639], [145], [809], [], [849], [687], [397], [237], [839], [763], [487, 590], [222], [801], [616], [294], [482], [327], [532, 762], [], [881, 579], [807], [926], [781], [836, 837], [534], [869], [356], [514, 836, 837, 869, 501, 636], [924], [574], [494], [401], [588], [825], [266, 267], [7], [349], [635], [484], [893], [652, 847], [], [345, 690, 462, 463], [743], [619, 846], [842], [], [211], [9], [], [], [910], [309], [139], [906], [73], [930, 907, 470], [934, 923], [40, 46], [759], [152], [397], [936], [22], [], [963], [773], [630], [352], [954], [684], [918], [10], [191], [653], [242, 243], [], [561], [88], [145], [198], [147], [43], [773], [913], [23], [43], [264, 263], [915], [60], [930, 931, 415], [740], [84], [68], [479], [652, 465, 413], [745], [253], [828], [], [699], [254], [702], [41], [922], [457], [379], [83], [479, 511], [160], [796], [203], [447], [494], [770], [944], [839], [834, 836, 837, 457], [871], [236], [62], [184], [260], [153], [715], [144], [176], [], [966, 532, 470, 762, 923, 572], [262], [578, 903], [760], [605], [797], [64], [154], [570], [872, 759], [301], [659], [294], [575], [990], [552], [31], [336], [884], [954], [758], [901], [], [315], [873], [549], [406], [517], [151], [223], [920, 405], [898], [616, 913], [416, 602], [227], [918], [215], [565], [841], [991], [962], [648], [769], [214], [168], [483], [504], [284], [593], [337], [966, 907], [415], [665], [968, 532, 762, 923], [283], [98], [457], [], [690, 345], [468], [114], [608, 836, 837], [382], [344], [877], [569], [608, 617, 438], [459, 445], [211], [383], [977], [], [60], [171, 173], [280], [735], [949], [414], [792], [327], [343], [45], [830, 678], [127], [784], [327], [438], [949], [641], [252], [993], [340], [773], [111], [956], [656], [257], [803], [104], [673, 742, 526, 527, 782, 664, 508], [995], [379], [814], [673, 742, 508, 526, 664, 782, 412], [798], [635], [582, 631], [30], [822], [512], [689], [888], [349], [533], [537], [940], [537], [399], [429], [334], [694], [14], [52], [677], [99], [62], [412], [469], [834, 650, 402], [471], [199], [89], [321], [976, 977, 978], [46], [638, 639], [712], [401], [362], [450], [349], [805], [45], [433], [566], [987, 998], [85], [945], [928, 923, 960], [160], [217], [17], [659], [798], [769, 533, 824], [890], [], [359], [663], [179], [485], [353], [962], [51], [294], [], [309], [94], [483], [984], [964], [716], [500], [726], [718], [], [582, 519, 945, 948, 950], [272], [714], [894], [418], [723], [933, 923], [282], [974], [530], [962, 813, 567, 505, 827], [176], [627], [836, 875], [], [363], [], [852], [19], [177, 172], [926], [739], [348], [507], [362], [400], [991], [605], [66], [409], [383], [622, 759], [2, 3], [963], [478, 592], [580], [581, 479, 627], [845], [49], [216], [984], [148], [], [729], [385, 716], [425], [990], [622], [809, 618, 926, 959], [790], [182, 607], [534], [], [560], [350], [], [376], [92], [699], [801], [671], [417], [90], [484], [916], [572], [57], [877], [625], [479], [810, 508], [262], [118], [426], [159], [1], [905], [283], [508], [553], [879, 638, 639], [724], [389], [332], [250], [739], [475], [192], [91], [715, 652], [438], [396], [61], [401], [362], [920], [533], [327], [517], [145], [364], [572], [771], [689, 578], [518], [821], [296], [107], [582, 939, 940, 943], [297], [148], [829], [608], [916], [793, 830], [226], [941], [178], [845], [665], [159], [497], [535], [641], [839], [366], [514], [758], [673, 664, 526, 527, 782, 508], [756], [800], [855], [518], [], [], [], [905], [722], [319], [744, 652, 847, 657], [805], [771], [753, 282], [713], [872, 622, 759], [347], [808], [606], [530], [867], [76], [977, 978], [958], [850], [], [], [174], [707], [604], [424, 423, 636], [702], [559], [343], [662], [404, 895], [502], [931], [268, 179], [849], [418], [909, 923, 926], [501, 665], [18], [424], [423], [291], [568], [581], [708], [481], [187], [803, 555], [192], [595], [491], [737, 455], [], [641], [782, 664, 281, 285], [93], [783], [], [603], [731], [713], [], [84], [268], [567], [556], [618], [568, 655], [901], [321], [155], [981, 429], [692], [159], [318], [866, 575], [669], [201], [181], [968], [668], [641], [942], [263], [98], [958], [143], [681, 620, 526], [869], [927], [437], [662], [537], [358], [270], [248], [833, 913], [685], [419, 719], [404], [923, 934, 933], [928, 572], [], [695], [], [486], [962], [563], [26], [758], [752], [336], [752], [610], [95], [104], [], [633], [400, 667], [495], [533], [192], [8], [224], [594], [533], [884], [909, 532, 883], [518], [482], [194], [168, 159], [841, 894], [105], [31], [984], [678], [652], [669], [737, 901, 440], [682], [857], [310], [], [738, 834, 906], [], [326], [93], [913], [427], [583], [372], [878], [375], [242], [418], [643, 454, 917], [15], [962], [397], [316], [548], [520], [93], [468], [548, 851, 789, 632], [101], [38], [446], [481, 482], [], [568, 748], [440], [473], [150], [488], [95], [431], [578], [64], [510], [895], [], [597], [724], [399], [334], [35], [951], [288], [182], [177], [417], [820], [205], [], [654], [415], [123], [201], [207, 208], [], [241], [204], [615], [511, 581, 479], [512], [], [494], [89], [276], [], [713], [850], [999, 700], [699], [571], [453, 606], [732], [754], [285], [452], [], [139], [15], [755], [240], [851], [812], [723], [65], [139], [753], [221, 206], [581, 717], [972], [306], [339], [653], [80], [422], [390], [521], [514], [], [182], [611], [542], [342], [646], [311], [], [230, 478], [316], [402], [858], [612], [645], [330], [544, 521], [497], [584], [521], [607], [531], [], [593], [813], [100], [157], [364], [616], [521], [724], [440, 441], [199], [241, 238], [890], [523], [532], [585], [263], [662], [687], [349], [751], [90], [237], [781], [909, 828, 926], [168], [880], [956], [619], [486, 401], [523], [603], [846, 883, 532], [632], [187], [433, 693], [579, 881], [770, 979], [469], [718], [64], [589], [202], [238], [293], [229], [863], [], [416], [922], [892], [418], [573, 479], [515], [262], [958], [55], [303], [787], [529, 793, 831], [926], [], [141], [902], [9], [], [948], [845, 531], [746], [423, 424, 892], [462], [318], [314], [920], [759], [995], [484], [275, 276], [658], [541], [831], [690, 346], [205], [583], [424], [29], [87], [824], [18], [521], [513, 683, 558, 432], [451], [361], [745], [162], [316], [100], [438], [861], [666], [770], [605], [543], [129], [528], [782, 664], [583], [722], [724], [959], [177, 170], [888], [495], [189], [109], [54], [243], [531], [473], [659], [809, 969], [14], [594], [67], [193, 191], [58], [770], [208], [547], [232], [834, 836, 837, 906], [762], [423], [877], [274], [163], [928], [347], [54], [589], [407], [671], [740], [553, 493], [391], [962, 987, 923], [133], [836, 837, 629], [353], [], [554], [192], [633], [855], [232, 250], [307], [531], [713], [64], [311], [490], [62], [329], [694], [587], [102], [259], [378], [627], [], [49], [421], [82], [547], [203], [], [626], [993], [888, 839], [566], [62], [272], [891], [450], [319], [], [163], [985], [668, 562], [961], [681, 620, 526], [296], [], [428], [802], [930, 966, 907], [192], [522], [736], [240, 241, 238, 239], [314], [933], [], [552, 283], [535], [9], [387], [638, 639], [875], [519], [], [443], [], [], [704], [453], [811], [383], [116], [474], [205], [425], [632], [152], [], [160], [935], [334], [932], [705, 537, 248], [79], [981, 429], [723], [681, 620, 526, 664, 508], [349], [841], [860], [806], [], [406], [39], [389], [155], [238], [984], [654], [413], [676, 246], [872], [532], [885], [723], [111], [264], [546, 818, 819, 541], [426, 635], [454], [75], [98], [205, 246], [270], [992], [537], [510], [843], [271], [892], [321], [750], [565], [750, 735], [847], [924], [51], [575], [142], [574], [203], [683], [44], [626], [490], [208], [589], [878], [377], [799], [11], [275], [115], [265], [692], [321], [938], [496], [200, 244], [285], [], [179], [217], [616], [], [660], [698, 538], [708], [672], [664, 851], [131], [204], [362], [582, 943], [219], [868, 935, 809, 923], [273], [402], [571], [603], [626], [546, 402, 819], [957], [], [827], [496], [946], [187], [881], [281], [995], [475], [280], [146], [551], [425], [892], [618], [], [34], [390, 149], [758], [595, 730], [960], [307], [87], [819], [765], [727], [269], [175], [673, 681, 268, 620, 508], [683], [355], [841], [353], [262], [195], [383], [810, 878], [203], [328], [899], [733], [827], [367], [718], [986], [605], [757, 535], [618, 909, 827], [907, 440], [4], [521], [923], [], [674], [491], [758], [243], [806], [23], [679], [573], [816], [780, 914], [466], [644], [188], [], [873], [619], [419], [884], [567], [259, 261], [735], [801], [118], [424, 589], [521, 618, 809], [910], [181], [204], [], [741, 765], [44], [612], [313], [531], [529], [377], [902], [973], [921, 917], [934], [339], [803], [609], [820], [119], [676], [505], [110], [540], [682], [271], [488], [843], [629], [174], [651, 504], [], [874], [701], [667], [27], [577], [201], [31], [979], [927], [836, 837, 970], [435, 281], [918], [526], [38], [857], [476], [605], [628], [539, 316], [572], [233], [771], [666], [867], [596], [], [164], [388], [992], [412], [802], [988], [877], [268], [523], [87], [517, 600], [513, 650, 819], [], [569], [970], [219], [86], [320, 319], [44], [436], [962, 923], [2], [178], [424, 423], [853], [525], [], [589], [93], [190], [931, 868], [901], [722], [], [794], [809, 923, 925], [905], [821, 693], [224], [374], [775], [98], [886], [752], [139], [578, 585, 982], [22], [57], [460, 437], [810, 878], [287], [988], [451], [587], [361], [459, 445], [479], [822, 542], [100], [], [647], [574], [546], [], [604], [629], [557], [683, 558], [654, 734], [170], [629], [397], [297], [333], [252], [597], [823], [324], [421], [277], [834, 432], [858], [280], [430], [392], [941], [548, 851], [494], [158], [], [515], [89], [583], [266], [719], [467, 499], [264], [628], [788], [], [9], [569], [182], [162], [764, 413], [43], [760], [], [364], [920], [871], [351], [45], [], [591], [], [115], [141], [32], [516, 431, 797], [987, 998], [40], [686], [613], [352, 138], [576], [451], [539], [557], [908], [235], [142], [90], [659, 700], [300], [343], [], [409, 826], [718], [557], [826], [725], [522], [602], [63], [827], [406], [], [481], [777], [345, 730], [270, 279], [923], [327], [387], [779], [113], [867], [467], [989], [203], [108], [372], [474], [508], [760, 737, 886], [776], [61], [83], [220], [54], [721], [195], [765], [355], [644, 470], [93], [597], [763], [135], [608], [230, 232], [889], [376], [184], [], [111], [17], [364], [826], [53], [496], [797], [263], [505], [105], [717, 733], [639], [681, 620, 508], [762], [], [755], [49, 50], [897], [450], [240], [850], [693, 472], [880], [672], [217], [337], [948], [142], [989], [740, 440], [156], [591], [950], [204], [697], [234], [], [297], [926], [978], [25, 28], [324], [385], [454], [762], [673, 664, 526, 527, 508], [7, 8], [342], [159], [592], [806], [818], [613], [950], [900], [142], [878], [462], [501], [25], [915], [942], [373], [109], [87], [953], [364], [487, 619, 526, 846, 504], [62], [849], [605], [390], [4], [153], [340], [836, 837], [899], [606], [288], [102], [174], [587, 784, 477], [791], [557, 858, 738], [237], [405, 538, 603], [913], [436], [951], [10, 15], [208], [671], [670], [823], [154], [366], [40], [880], [672], [244], [392], [740], [830], [], [932], [992], [650], [811], [478], [624, 453, 454], [280], [15], [631], [351], [279], [963, 966, 532, 762, 923, 572], [902], [912, 716], [974], [387], [608, 741], [670], [111], [738], [286], [738], [944], [54], [294], [652, 764], [723], [112], [361, 759, 794], [], [246], [777], [777, 499], [19], [462], [92], [564], [109], [449], [254], [727], [428], [168], [415], [590], [479], [615], [66], [524, 461], [602], [990], [990], [], [733, 127], [983], [573], [474], [147], [], [539], [468, 407], [981], [309], [996], [160], [373], [663], [620, 508], [466], [85], [660], [792], [865, 850], [242, 180], [281], [502], [789], [524], [803], [682], [104], [729], [], [228], [259], [252], [339], [417, 866, 595], [208], [], [776, 650, 819], [57], [], [608], [643], [], [232, 248], [738], [90], [808], [820], [999], [640], [610], [611], [294], [617], [], [834, 906, 893], [784], [334], [403], [931], [389], [289], [188], [], [], [707], [987, 998], [503], [576], [524, 461], [619, 846], [338], [524], [], [267], [449], [15, 91], [277], [111], [73, 815], [613], [383], [143], [496], [968, 504], [849, 827], [365], [239], [666], [109], [550, 521, 651], [888], [77], [], [661], [968, 114, 504], [512], [672, 970], [490], [748], [272], [658], [962, 942], [373], [463], [140], [809, 567], [568, 825, 608], [620, 508], [], [560], [932, 415], [853], [745], [713], [981, 429], [679, 488, 695], [106], [536, 540, 510], [578, 689, 982], [50], [524, 461, 715], [263], [560], [525], [117], [732], [826], [137], [284], [608, 423], [64], [795], [], [185], [571], [89], [210], [339], [244], [151], [670], [711], [101], [50], [213], [715, 524], [708], [676, 269], [534], [479, 751], [], [520], [440], [977], [], [948], [], [70], [890], [489], [358], [868], [823], [171], [921, 764], [779], [887, 497, 406], [967], [370], [780], [10], [714], [890], [81], [92], [785], [587, 477], [737, 582, 440], [416], [138], [452], [444], [532], [9], [986], [667], [395], [897], [423], [89], [339], [764], [709, 836, 837, 767], [672], [370], [618, 469], [10], [991], [971], [67], [616], [281, 282], [659], [909], [832], [834, 906, 400], [837, 582, 954], [927], [699], [458], [110], [867], [], [690, 345], [335], [], [150], [221], [580], [308], [544], [271], [176], [316], [102], [], [346], [234], [714], [552], [828], [813], [26], [], [269], [232], [522], [437], [249], [708], [], [836, 542, 822], [600], [446], [125], [857], [278], [], [418], [655], [162], [477], [623], [970], [508], [697, 478], [756], [985], [], [593], [338], [13], [57], [230, 231], [649], [987, 943], [], [860], [193], [290], [318], [675], [360], [436, 479], [], [589], [238], [772, 488], [481], [947], [441], [770, 674], [491], [5], [86], [424], [100], [537], [332], [596], [783], [43], [563], [117], [305], [259], [869, 457], [687], [988], [186], [804], [99], [213], [554], [933], [400, 667], [318], [652], [619], [], [123], [988], [829], [280], [223], [578], [818], [534], [230], [552], [673], [672, 669], [698], [308], [144], [211], [222], [916], [8], [234], [301], [321], [8], [487], [44, 633], [346], [514], [640], [803], [882], [571], [820], [494], [673, 620, 527, 664, 508], [70], [519], [166], [582], [590], [19], [316], [524, 461], [80], [724], [931], [], [127], [888], [756], [458], [688], [4], [20], [773], [398], [203], [395], [795, 615], [735], [905], [23], [631], [772], [555], [263], [64], [796], [467], [727, 538], [222], [], [], [277], [358], [471], [328], [832], [289], [741], [399], [112], [867], [10], [22], [832], [234], [647, 332], [896, 804], [], [241, 238], [], [326], [523], [], [12], [65, 973], [477], [370], [681, 620, 526, 664], [267], [728], [834], [615], [920], [553], [201], [822], [789], [710], [], [715], [387], [458], [418, 623], [95], [898, 762, 572], [485, 526], [363], [380], [74], [538, 858], [392], [769, 438], [389], [930], [563], [426], [29], [798], [844], [696], [470], [194], [383], [], [922], [198], [880], [543], [291], [40, 46], [953], [980], [297], [310], [183], [849], [174], [], [433], [679], [835], [725], [546, 806], [156], [235], [727], [418], [260], [529], [517], [21], [553], [97], [771], [780], [945], [], [388], [822], [605], [891], [207], [], [319], [943], [672], [643, 903], [905, 532, 799], [208], [292], [478], [156], [], [89], [883], [545], [875], [448, 637], [230], [520], [184], [190], [561], [965], [317], [759], [35, 37], [], [99], [993], [2], [868], [692], [76], [244], [169], [646], [903], [], [205], [772], [185], [145], [80], [936], [236], [21], [263], [873], [696], [960, 910], [582], [994], [], [464], [193, 189], [419], [486], [342], [831], [199], [1], [735], [], [807], [809, 925], [572], [677, 587, 783, 784], [251], [778], [311], [325], [777], [768], [143], [311], [45], [], [420], [609], [961, 499, 728], [644], [881], [913], [130], [16], [472], [836, 837, 445], [862], [675], [187], [896], [884, 501], [695], [610], [391], [696], [867], [779], [167], [904], [812], [761], [652, 597, 764, 413], [835], [], [735], [126], [634], [998], [927], [0], [540], [659, 556, 827], [101], [48], [586], [811], [187], [131], [442], [576], [484, 536], [842], [738], [393], [367], [], [973], [284], [467], [58], [38], [985], [720], [644], [90], [97], [260], [38], [915], [479], [561], [616], [497, 406, 857], [68], [595], [344], [303], [490], [59], [842], [829], [584], [356], [544], [673], [80], [60], [253, 846], [504], [188], [902], [834, 906], [329], [624], [0], [795], [865], [697, 610], [641], [389], [547], [20], [235, 174], [754], [], [608], [165], [381], [0], [978], [658], [650, 402, 819], [209], [432], [561], [241], [], [426], [117], [295], [662], [382], [236], [637], [394], [793], [358], [544], [305, 302], [165], [427, 756], [181], [918], [645], [585], [808], [69], [993], [303], [135], [165], [87], [324], [679, 455], [814], [198], [918], [223], [240, 238], [370], [462], [979], [29], [4], [122], [], [338], [411], [211], [772], [557], [879, 242, 850], [531], [688], [5], [251], [761], [158], [491], [591], [384], [225], [571], [113], [259], [], [18, 86], [815], [955], [133], [294], [63], [795, 703], [483], [265], [910], [292], [140], [905], [270, 207], [535], [205], [603], [537], [804], [553], [165], [654], [155], [164], [], [996], [913], [971], [42], [714], [182], [54], [240, 241, 238, 239], [938], [744, 657], [908, 404], [240, 241], [318], [784], [185], [591], [424], [920], [375], [492], [471], [687, 406], [238, 241], [501], [], [327], [774], [41], [718], [], [133], [89], [736], [79], [627, 795], [], [768], [417], [769, 418, 772, 623], [595], [], [753, 894], [135], [416], [77], [63], [495], [766], [], [972, 825], [892], [997, 947], [588], [895], [692], [952], [54], [938], [909], [288, 290], [732], [892, 409], [383], [297], [], [731, 861], [64], [80], [98], [766, 341], [204], [257, 222], [524, 461], [933], [648], [242], [329], [478], [355, 912], [535], [35], [311], [884], [464], [760], [527, 664, 508], [], [453], [386], [800], [191], [716, 765], [329], [698], [578, 982], [851, 548], [33], [710], [14], [161], [105], [73], [854], [410], [102], [], [], [136], [137], [841], [310], [400, 667], [47], [506], [572], [270], [85], [764], [692, 969, 588, 728], [21, 22], [325], [798], [33], [141], [109], [673, 681, 620, 526, 527, 664], [794], [500], [567], [335], [506], [829], [33, 983], [45], [965], [550], [447], [510], [933], [976], [109], [643], [987, 998], [400, 667], [371], [686], [25], [], [864], [37], [681, 620], [196], [744], [473], [849], [612], [542], [675], [58], [], [612], [253], [805], [], [368], [412], [647], [768], [93], [260], [], [200], [277, 278], [775], [902], [382], [36], [357], [198], [568], [374], [171], [105], [762], [474], [166], [], [846], [855], [25], [727], [893], [432], [820], [801], [962, 923], [405], [721], [2], [911], [16], [652, 465], [], [741], [849, 725], [571], [582], [122], [555], [909, 567], [957], [360], [38], [332], [760, 664], [855], [644], [930], [591], [610], [66], [728], [524, 461], [511], [253], [769], [224], [694], [817, 479], [595], [949, 953], [692], [683, 558], [212], [294], [615], [626], [441], [57], [233], [], [363], [821], [], [], [149], [], [207], [842, 500], [679], [756], [64], [676], [399], [710], [852], [548], [946], [320], [884], [389, 391], [404], [945], [485], [392], [], [83], [648], [864], [466], [25], [], [748], [450], [239], [113], [666], [755], [91, 14], [0], [323], [393, 108], [547], [809, 923, 926], [52], [42], [428, 670], [9], [471], [736], [476], [685], [458, 401], [420], [300], [], [625], [986], [553, 493, 883], [915], [164], [122], [], [545], [42], [780], [963, 964, 567, 572], [17], [565], [458], [761, 223], [669], [95], [], [224], [886], [27], [196], [353, 372], [782, 664, 810, 508], [456], [860], [217], [183], [281], [975], [71], [763, 597], [634], [881], [937, 923, 963], [857], [738, 949], [527, 664, 508], [92], [21], [716], [229], [67], [384], [752], [], [544], [98], [983], [273], [881], [400, 667], [754], [680], [139], [], [329, 108], [554], [756], [544], [173], [394], [506], [986], [140], [768], [776], [799], [865], [], [481], [272], [880], [810, 878], [532], [125], [350], [39], [950], [852], [41], [911], [528], [759, 622], [700], [844], [477], [90], [554, 628], [720], [652], [737, 455, 760, 440], [499], [104], [], [964], [459], [557], [41], [795], [], [67, 58], [833], [322], [490], [948], [964], [507], [429], [926], [917], [84], [716], [716], [411], [74], [707], [642], [472], [280], [61, 62], [262], [342], [618], [7], [670], [341, 342], [841], [622, 759, 478], [195], [236, 237], [], [517], [226], [357], [864, 717], [938], [450, 462], [39], [134], [763], [83], [900], [323], [353], [739], [19], [668], [317], [865], [], [222], [896], [19], [], [654], [319], [463], [30], [905], [879], [27], [548, 851], [168], [122], [977], [882], [67], [949, 953], [96], [584], [57], [716], [711], [], [533], [420, 559], [232], [849], [866], [793], [836, 837, 151], [981], [385, 101], [581, 818], [162, 167], [396], [367], [281], [964], [898], [530], [619, 750, 846, 721], [143], [], [509], [52], [206], [552], [574], [707, 886], [275], [429], [448], [512], [515], [], [709], [226], [543], [477], [973], [544], [853], [265], [812], [306], [], [519], [640], [62], [59], [392], [548], [827], [372], [364], [835], [5, 6], [297], [0], [], [328], [497], [946], [721], [138], [593], [904], [93], [862], [769], [403], [487], [969], [488, 635], [898], [53], [39], [358, 359], [686], [666], [493], [391], [771], [7], [864, 919, 733], [124], [380], [928, 923, 960], [78], [174], [230, 231], [437], [127], [88], [608, 718, 975], [178], [135], [66], [64, 55], [484], [], [586], [975], [327], [543], [789], [179], [43], [518, 671], [497], [113], [238], [354, 676], [968], [289], [891], [849, 725], [389], [791], [652, 465], [554], [457, 338], [722], [22], [382], [891], [948], [377], [686], [985, 324], [374], [284], [915], [], [954], [561], [711], [710], [], [789], [], [480], [171], [382], [628], [893], [115], [642], [488], [368], [256], [636], [158], [714], [], [726], [504, 985], [], [137], [668], [581], [133], [962, 923], [631], [708, 698, 671], [125], [907, 440], [772], [86], [], [118], [288, 290], [487], [438], [50], [28], [379], [108], [], [696], [83], [626], [253], [673, 681, 620, 664, 526, 527, 508], [112], [396], [770], [865, 850], [152], [847], [936], [22], [593], [776], [562], [769], [549], [339], [793, 697], [41], [654], [], [863], [888], [370], [962, 762, 966, 532, 923, 572], [146], [489], [51], [755], [491], [526], [940], [9], [383], [575], [181], [347], [496], [783], [513, 683, 875, 558], [600], [789], [960, 923], [529], [372], [815], [295], [566], [189], [845], [170], [589], [377], [], [750, 655], [139], [684], [595], [869, 618, 824], [842, 445], [517], [223], [903], [833], [800], [250], [], [], [454], [163, 168], [299], [432], [719], [417], [62], [980], [575], [], [850, 220], [156], [298], [367], [167], [194], [407], [547], [481], [36, 58], [91], [700, 999], [203], [633, 937, 333], [354], [678, 636], [315], [187, 700], [948], [849], [543], [807], [351], [918], [328], [453, 633], [834, 906, 630], [802, 518], [677], [520], [236, 237], [924], [129], [21], [595, 866], [832], [], [], [646], [182], [46], [367], [985], [995], [760], [384], [556], [722], [], [412], [], [10], [354], [168], [545, 589, 861], [96], [25], [101], [150], [225], [472, 693], [235], [194], [294], [491], [109], [416], [150], [249], [140], [285], [867], [787], [886], [986], [437], [244], [482, 754], [52], [497], [], [811], [305], [368], [302], [160], [290], [469], [], [531], [565], [677], [933], [656], [756], [223], [834, 906], [13], [582, 692, 790], [983], [889], [], [92], [35], [254], [685], [552], [578], [504], [781], [430], [696], [690], [928], [898, 836, 837, 774, 842, 502], [288], [560], [208], [453], [153], [995], [562], [216], [267], [523], [252], [566], [893], [467, 341], [108], [72], [545], [597], [10], [659], [475], [228], [736], [807], [883, 532, 762, 923, 572], [930], [355], [562], [847], [872, 447], [], [365], [64], [], [271], [145], [613], [325], [261], [654], [670], [301], [412], [994], [747], [470], [], [404], [859], [682, 562], [853], [997], [232], [78], [399], [922], [], [946], [371], [855], [615], [734], [601, 578, 689], [212], [], [864], [165], [47], [113], [418], [741], [735], [738, 944], [742], [789], [651], [572], [142], [299], [110], [502], [916], [171], [484], [673, 526, 527, 664, 508], [738], [823], [235], [97], [990], [595], [459], [930], [104], [555], [594], [624, 454], [126], [569], [827], [1], [343], [18], [], [139], [643, 474], [420], [458], [], [853], [], [886], [698], [939, 943], [812], [342], [74], [132], [82], [279], [228], [802], [947], [425], [844], [399], [25], [381], [394], [63], [297], [], [477], [21], [552, 151], [135], [338], [424, 423], [482], [876, 435], [524, 461, 501], [498], [178], [84], [602], [328], [900], [103], [703, 323, 998], [281], [588], [145], [441], [440], [1], [450], [404], [680], [864, 627], [231], [122], [29], [900], [97], [624], [546], [171, 268], [894], [887, 406], [508], [25, 28], [723], [671], [148], [269], [889], [514, 464], [557], [709, 748], [419], [198], [160], [166], [836, 837], [188, 189], [68], [359], [462, 792], [], [160], [454, 917], [487], [762], [583], [97], [46], [630], [877], [652], [109], [400, 667], [110], [254], [754], [253], [296], [293], [359], [448], [117], [99], [267], [606], [990], [11], [531], [79], [507], [802], [719], [488], [], [798], [953], [323], [398], [130], [226], [311], [65], [287], [901], [144], [361], [218], [650], [673, 681, 620, 526, 527, 664, 508], [130], [621], [739], [577], [465, 413], [530], [106], [122], [491, 634], [68], [256], [688], [836, 837, 869], [307], [758], [573], [687], [641], [792], [], [855], [99], [564], [309], [913], [700, 999], [451], [725, 505], [5, 6], [431], [420], [713], [190], [], [548, 851], [630], [722], [28], [646], [606], [428], [587, 784, 477], [91], [959], [718], [626], [668], [476], [500], [920], [555], [284], [524, 461], [33], [89], [991], [99], [188], [221], [476], [274], [607], [184], [304], [], [165], [299], [153, 266], [457, 920], [20], [75], [766], [489], [162], [376], [108], [818], [630], [535], [899, 725, 572], [630], [910], [833], [], [412], [], [893], [589], [938], [783], [384], [], [333], [716], [262], [701], [249], [240, 241], [221], [369], [], [133], [378], [437], [131], [698], [921, 692, 917], [623, 917], [619, 846, 721, 831], [889], [441], [549, 692], [70], [459], [784, 587], [147], [841], [318], [627], [343], [507], [489, 243], [391, 758], [911], [736], [884], [883], [991], [618], [758], [754], [873], [39], [379], [518], [261], [830], [569], [], [21], [451], [291], [72], [753], [489], [802], [531], [674], [698], [832], [157], [47], [], [888], [406], [778], [515, 836, 837], [694], [596], [], [424], [907, 470], [573], [225], [56], [850, 760], [465], [684], [850, 765], [950, 951], [365], [881], [43], [], [108], [61, 62], [952], [92], [82], [913], [503], [567], [589], [481], [246], [756], [53], [599], [793], [200], [244, 537], [58], [178], [904], [363], [988], [616], [126], [], [551], [83], [632], [400], [442], [], [70], [81], [716], [311], [391], [335], [965], [], [433], [685], [334], [343], [474], [395], [180], [14], [977, 978, 437], [761], [247], [152], [221], [973], [355], [341, 572], [374], [661], [412], [977], [558], [742], [133], [], [259], [895], [442], [105], [117], [530], [216], [847], [772], [805], [849], [], [44], [824], [298], [983], [340], [960, 931, 923], [903], [353], [162], [], [364], [843, 602], [348], [494], [485, 632], [402], [100], [352], [704], [4], [51], [855], [732], [176], [214], [], [849, 505], [107], [79], [730], [185], [757], [13], [844], [708], [624, 453, 454], [319], [79], [527, 592, 664], [215, 218], [943], [250], [992], [519], [533], [986], [922], [610], [633, 769], [21], [475], [309], [716], [42], [560], [222], [796], [514, 515], [325], [498], [216], [497], [591, 721, 885], [839], [378], [612], [893], [653], [956], [], [878], [374], [417], [569], [], [686], [328], [], [], [688], [725], [423], [], [65], [711], [761], [643], [987, 998], [720], [543], [], [303], [], [389], [235], [1], [86], [760], [38], [9], [612], [785], [780], [80], [50], [103], [939, 943, 945], [564, 750], [854], [857], [253], [113], [981], [178], [470], [119], [210, 852], [448, 637], [882], [526, 784], [651], [], [878], [351], [33], [771], [578], [735], [681, 620, 508], [], [4], [107], [311], [71], [270], [967, 504], [102], [246], [699], [408], [940], [], [510], [578, 689, 601], [182], [], [881], [946], [186], [683], [517, 540, 510], [946], [508], [630], [281], [323], [155], [297], [], [619, 846], [555], [882], [], [992], [613], [843], [796], [733, 541, 542], [546, 650, 402, 818, 819], [133], [465], [85], [436], [16], [731], [119], [441], [457, 834], [252], [529, 667], [982], [909, 567, 827], [619, 750, 846, 721], [360], [726], [822], [190], [108], [637], [417], [90], [172], [836, 837], [339], [764], [804], [90], [30], [234], [331], [600], [506], [960, 582], [419], [797], [620], [173], [744, 657], [135], [863], [813], [935], [824], [494, 442], [261], [787], [], [191], [781], [372], [753], [526], [148], [963, 945], [375], [770, 788], [296], [854], [908, 895], [488], [439], [121], [314], [101], [275], [618], [558], [582, 937, 938], [404, 895], [386, 101], [305], [733], [165, 234], [608, 428], [396], [], [88], [430], [625], [594, 579], [74], [309], [638, 639], [], [298, 63], [622], [821], [658], [617], [348, 349], [1], [694], [695], [809, 926], [785], [244], [951], [520, 431], [487, 605], [606], [406], [199], [624], [76], [609], [743], [933], [131], [548, 598, 632], [836, 837, 655], [762], [553], [254], [178], [197], [553], [88], [555], [6], [514, 515], [245, 183], [15], [488, 679], [], [717], [920], [267], [989], [999], [634], [313], [400, 667], [366], [839], [635], [851, 632], [713], [597], [71], [435], [888], [457, 834, 906], [787], [292], [400, 667], [808], [176], [975], [106], [748], [487, 590], [104], [30], [973], [818], [20], [866], [], [514, 655], [643, 306], [251], [518, 652, 465, 413], [597], [565], [407], [738], [21], [814], [883], [956], [365, 379], [431], [312], [872], [695], [621], [928], [187], [199], [625], [578], [179], [750, 414], [263], [972], [51], [367], [], [402, 881], [961, 963, 964], [655], [852], [887], [891], [809, 925, 923], [], [458], [702], [144], [], [205], [579], [708, 596], [114], [45], [717], [426], [821], [588, 790], [890], [33], [392], [325], [738], [145], [800], [511], [12], [518], [272], [398], [560], [853], [868, 532, 441, 762, 923], [], [88], [741], [563], [267], [545, 745, 619, 818, 831], [607], [970, 795], [795], [313], [701], [763], [169], [839], [351], [641], [], [833], [26], [], [644], [], [318], [517], [986], [], [], [321], [895], [7], [800], [454], [233], [850, 211], [153], [874], [740], [20], [323], [573], [946], [133], [207], [889], [492], [371], [905], [811], [576], [218], [562], [], [713], [920], [495], [346], [530], [294], [8], [343], [253], [713], [194], [959], [903], [661], [321], [245], [890], [694], [141, 142], [813], [270], [322, 946], [333], [714, 402], [], [486], [418, 563], [602], [456], [328], [956], [701], [], [691], [774], [824], [836, 837, 979], [141], [489], [557], [825], [382], [765], [476], [780], [476], [933], [697], [], [388], [945], [738, 653], [799], [358], [945], [446], [548, 664, 526, 527, 508], [277], [826], [350], [616], [244], [], [56], [644, 470], [44], [708], [351], [742], [989], [485], [767], [586], [306], [873], [236], [352], [567], [968, 504], [788], [18], [954], [404], [965], [728], [34], [], [990], [586], [205, 174, 223], [420], [415], [], [761], [649], [552], [972], [898, 692], [60], [974], [411], [699], [659], [271], [945, 939, 943], [53], [382], [76], [730], [944], [462], [573], [346], [328], [827], [153, 265], [315], [776], [908, 895], [578, 650, 818], [770], [452], [683], [319], [329], [747], [], [885], [], [713], [315], [10], [455], [119], [661], [744, 657], [901], [963], [533], [515, 655, 818, 731, 608, 630], [805], [555], [363], [513, 875, 402], [120], [391], [751, 479], [], [110], [245], [761], [682], [], [693], [301, 304], [403], [320], [462, 655], [495], [820], [481], [46, 47], [891], [656], [], [479], [156], [10], [666], [637], [563], [261], [264], [106], [895], [905, 619, 846, 831], [72, 815], [], [282], [851], [109], [304], [738], [634], [695], [400], [659, 940, 813], [780], [643, 570], [596], [938], [], [318], [495], [36], [790], [], [518, 665], [101], [487], [772, 949], [75], [994], [177], [929, 338], [313, 414], [517, 540], [865, 850], [287], [365], [631], [910], [822], [845], [554], [874, 779, 920], [24], [824], [763, 597], [953], [352], [650], [91], [51], [758], [102], [271], [481, 482], [929], [182], [234], [111], [154], [955], [162], [653], [150], [70], [514, 652], [604], [661], [635], [962], [211], [195], [603], [892], [], [772], [322], [838, 551, 629], [993], [393], [582, 945], [322], [349], [997, 588, 947, 790], [451], [583], [703], [167], [128], [136], [466], [34], [964, 937, 945], [683], [605], [625], [553], [405], [252], [789], [784], [846], [770, 543], [949], [145], [547, 716], [301], [90], [896, 905, 435], [499], [252], [896], [206], [814, 977, 978], [594], [369], [770, 830, 608], [442], [703], [100], [980], [66], [890], [715, 524, 461], [329], [827, 926], [28], [608, 489], [578, 689, 703], [8], [963], [224], [], [132], [674], [641], [740], [452], [301], [], [835, 708], [770, 791, 480, 502], [465], [684], [898], [972], [947], [635], [74], [], [334], [205, 213], [312], [883], [602], [222], [303], [299], [773], [305], [255], [923, 928], [665, 671], [], [364], [524], [296], [197], [336], [945], [386], [313], [942], [826], [823], [506], [357], [644, 532], [458], [376], [642], [194], [181], [], [45], [296], [118], [19], [386], [604], [752], [404], [66], [678], [572], [618], [147], [690], [295], [5], [245], [552], [683, 558, 432, 566], [152, 155], [748], [257], [588], [781], [244], [553], [731], [358, 359], [622, 759], [618], [38], [12, 475], [710], [662], [566], [919], [], [206], [92], [851], [], [246], [957], [224, 223], [29], [892], [433], [673, 929, 681, 620, 526], [12], [994], [781], [92], [555, 570], [195], [692], [955], [284], [812], [272], [137], [741], [902], [972], [162], [929], [875], [914], [949, 927], [146], [735], [297], [24], [896, 435, 794], [405], [16], [], [575], [276], [824, 836, 837], [190], [325], [800], [292], [341], [], [718, 975, 536], [950], [979], [853], [449], [226], [132], [], [900], [422], [832], [388], [699], [759], [96], [0], [], [831], [997], [489, 733], [], [432], [672], [51], [44], [710], [503], [559], [357], [904], [272], [866], [33, 35], [962, 935, 937, 923], [364], [300], [912, 339], [533], [762, 884], [863], [95], [829], [573], [822], [288], [89], [], [251], [920], [200, 204], [130], [], [864, 479], [8], [750, 564], [577], [533], [], [33], [680], [429], [67, 68], [681, 620], [97], [977], [157], [], [523], [34], [681, 810, 620, 508], [338], [699], [142], [746], [812], [951], [88], [349], [431], [911], [107], [475], [766], [674, 333], [162], [647], [384], [819], [122], [754], [907], [153], [652, 764], [342], [406], [430], [56], [25], [], [252], [897], [302], [365], [108], [788, 502], [365], [28], [680], [863], [955], [68], [433], [], [539], [566], [212], [893], [76], [508], [479, 817], [742], [255], [267], [], [815], [920], [230], [637], [465], [516, 520], [876, 435, 794], [750], [], [572], [489], [668], [798], [306], [619, 846], [929], [774, 788, 502], [236, 165], [636], [666], [154], [532], [491], [765], [220], [115], [952], [135], [889, 486], [], [403], [792], [], [144], [94], [146], [554], [688], [118], [768], [517, 847], [640], [197], [], [69], [327], [790], [17], [199], [628], [135], [226], [933, 923], [735], [432], [286], [698], [189], [554], [346], [252], [433, 639], [292], [828], [849], [995], [], [292], [652], [884, 406], [241], [680], [275], [905, 750, 721], [713], [373], [399], [487], [897], [659, 969], [102], [289], [477], [216], [651], [868], [930], [247], [319], [673], [235], [829], [524, 461], [75], [735], [111], [591], [], [886], [711], [922], [318], [629], [797], [434], [867], [989], [203], [], [328], [318], [823], [770], [421], [251], [802], [938], [890], [553, 493], [173], [394], [914], [489], [262], [12], [274], [216], [278], [803], [592], [546, 402], [654], [25], [], [839], [347], [615], [662], [706], [840], [886], [535, 479], [472], [513], [871], [882], [352], [880], [607], [975], [25], [898], [977, 978], [], [39], [146], [219], [517], [], [528], [477], [721], [371], [192], [300], [], [820], [], [], [31], [629], [822], [614], [239], [820], [210], [615], [685], [], [836, 837, 879, 535], [197], [], [663], [356], [540], [273], [276], [299], [263], [291], [887], [768], [76], [466], [513], [863], [51], [850], [347], [256], [218], [490], [239], [581, 751, 479], [574], [819], [478], [], [655], [685], [75], [545], [358], [987], [265], [738, 470], [786], [226], [702], [535], [165], [977, 978], [346], [218, 215], [224], [928, 923, 960], [907, 440], [133], [735], [50], [827], [752, 852], [891], [327], [386], [51], [96], [838], [802], [20], [129], [514], [475], [581, 656, 475, 479], [], [928, 712], [488, 695], [338], [], [119], [604], [802], [617], [640], [523], [515], [518], [306], [414], [829], [403], [64, 55], [138], [4], [598], [232, 239], [949], [406], [361], [704], [756, 412], [629], [], [968, 967], [748, 636], [197, 198, 199], [738], [790], [251], [166], [78], [332], [527], [941], [502], [878], [492], [9], [82], [966, 907], [783], [806], [453, 454, 526, 527, 782, 664], [127], [190], [], [723], [656, 475, 479], [674], [650], [184], [117], [649], [418], [659], [354], [770], [681, 620, 526, 527, 782, 664, 508], [876], [], [704], [728], [267], [741], [257, 222], [542], [213], [937], [513, 776, 875], [402], [], [599], [983], [240, 241], [], [], [188, 190], [134], [322], [12], [896], [128], [470, 862], [668], [350], [608], [], [230], [], [], [843], [467], [872, 622, 759], [720], [], [106], [14], [399], [257, 222], [373], [223], [144], [800], [129], [434], [983, 801], [335], [9, 340], [953], [751, 479], [588], [972], [47], [303], [990], [547], [637], [686], [517], [530], [918], [834, 906], [963], [820], [318], [379], [983], [815], [780], [839], [272], [210], [740, 783, 784], [321], [570], [661], [593], [84], [581, 661, 479], [611], [730], [868, 567], [794], [348], [906], [70], [], [11], [674], [872], [666], [72], [805], [290], [289], [357], [392], [206], [859], [86], [237], [638, 639], [140], [493], [136], [990], [577], [728, 412], [689], [14], [806], [544], [139], [560, 981], [194], [844], [428], [692], [366], [682], [320, 319], [883], [838, 631], [894], [622], [191], [94], [67], [992], [103], [603], [43], [881], [278], [348], [804], [746], [262], [218], [619, 532, 846], [522], [389], [757], [300], [860], [781], [692], [210], [349], [], [472], [660], [398], [489], [], [523], [251], [], [386, 101], [847], [202], [896], [608], [979], [911], [959], [], [997], [51], [335], [731], [148], [693, 919, 472, 733], [450], [731], [984], [847], [71], [223], [142], [955], [], [], [576], [684], [438, 126], [739], [328], [890], [778], [496], [131], [923], [734], [374], [244], [673], [499], [700], [986], [964, 923], [92], [81], [836, 593], [326], [49, 50], [14], [250], [793], [391], [937], [210, 178], [823], [753], [122], [11], [435, 876], [1], [933], [263, 247], [719], [708], [761], [], [644], [766], [961], [774], [693], [738], [646, 884, 406], [533], [373], [], [712], [], [231], [604], [148], [595], [], [1], [905, 789, 799], [727], [804, 896], [842, 978], [986], [769], [532, 762], [434], [636], [205], [504, 968, 254], [532, 495], [132], [583], [619], [992], [783], [904, 905, 968, 610, 504], [294], [456], [444], [], [56], [140], [478], [3], [784, 499], [336], [639], [304], [319], [692], [379], [759], [738, 580], [788], [857], [114], [464, 608, 610], [192, 186], [791], [905], [85], [128], [38], [782, 851, 664], [584], [690], [72], [217], [160], [428], [959], [653], [491], [391], [923], [234], [757], [41], [169], [902], [790], [992], [553], [720], [795, 796], [], [575], [985], [604], [460], [934], [783], [368], [296], [792], [608, 610, 531], [67], [630], [721], [688], [651], [225], [648], [391], [468], [8, 7], [696], [371], [190], [882], [55], [859], [985], [22], [595], [326], [189], [228], [772], [635], [677], [915], [0], [774], [273], [208], [435], [197], [985], [922], [571], [66, 68], [295], [247], [740], [986], [934], [747], [631], [], [570, 691], [780], [524], [73, 815], [35], [988], [879], [623], [31], [918], [258], [757], [768], [2, 3], [671], [744, 657], [959], [177], [434], [827], [354], [587], [635], [168, 211], [833], [398], [761], [732], [222], [127], [52, 111], [291], [368], [107], [700], [351], [752], [335], [834], [454], [154], [872], [767], [334], [893, 446], [555], [830], [661], [428], [180], [479], [529], [603], [242], [320], [310], [349], [268], [840], [256], [955], [], [892], [423], [], [102], [514, 655], [421], [536, 718, 814, 977, 978], [83], [506], [777, 623], [977], [490], [400, 667], [404], [197], [229], [25], [425], [], [764, 597], [477], [584, 523], [], [42], [970, 795, 796], [745], [854], [864], [129], [831], [136], [939], [], [339], [470], [918], [319], [580], [769], [990], [188, 189], [9], [851], [460], [96], [893], [933], [968], [908, 404], [421, 825], [923], [519], [], [642], [28], [811], [110], [481], [102], [797], [868], [762, 934], [375], [], [581, 479, 874, 751], [876], [267], [40], [545], [495, 532], [30], [352], [433], [413], [872, 764], [365], [], [322], [719], [650], [], [212], [517], [863], [325], [], [791], [275], [562], [854], [168], [606], [19], [862], [], [175], [619], [], [252], [868, 415], [669], [526], [132], [310], [786], [618, 926], [708], [901], [109], [774], [151], [544, 909], [370], [62], [960, 954], [954], [628], [2], [52], [306], [963, 809, 923], [146], [803], [673, 681, 620, 526, 664, 508], [774], [], [235], [334], [779, 654], [617], [10, 478], [123], [7], [64, 55], [580], [], [841], [458], [46], [512], [221], [229, 200], [111], [443], [171], [376], [991], [178], [740], [165], [636], [257, 222], [319], [347], [], [641], [691], [177], [495], [875, 671], [540], [850], [923, 959], [516, 750, 431], [896, 804, 905, 700, 799], [891], [162], [393], [105], [217], [934, 923], [358], [125], [702], [469, 919], [131], [654], [897], [143], [439], [166], [632, 733], [630], [907, 440], [677], [614], [80], [], [446], [463], [214], [792], [874], [258], [810, 878], [25], [255], [990], [522], [507], [993], [217], [827], [890], [6], [769, 695], [488, 695], [872], [510], [803], [305], [35], [898, 671], [277], [174], [655], [87], [], [758], [385], [930, 868, 968, 923], [971], [451], [226], [543], [330], [770, 788, 630], [421], [760, 827], [345], [292], [330], [197], [477], [], [603], [238], [132], [305], [335], [789, 799], [171], [212], [604], [420], [818, 819], [529, 669], [269], [460], [277], [479, 817, 475], [682], [116], [661, 479], [299], [674], [650], [727], [582, 941, 951], [469], [572, 966], [938], [61], [834], [87], [451], [127], [32], [427], [614], [533], [345], [512], [607], [366], [171], [809, 618, 923], [219], [256], [], [912], [673, 526, 527, 782, 664, 508], [104], [348], [50], [253], [746], [], [903], [131, 134], [641], [822, 887], [581], [], [879, 977], [163], [294], [617], [290], [708], [678], [29], [234], [99], [], [215], [548, 664, 851, 894], [247], [707], [924], [891], [720], [923, 924], [744, 657], [888], [188], [768], [809, 925], [412], [715], [115], [948], [621], [328], [49, 50], [178], [449, 975], [833], [365], [], [965], [719], [652, 733], [518, 444], [840], [307], [760], [816], [771], [522], [289], [385], [766], [673, 526, 527, 782, 664, 508], [], [659, 923, 926], [51], [990], [253], [854], [391], [852], [891], [834, 895], [625], [373, 377], [155], [851, 921], [], [191], [452], [113], [600, 116, 126], [851], [433], [157], [], [97], [239], [323], [746], [48], [158], [703], [784, 508], [849], [386, 101], [299], [817], [722], [440], [408], [674], [868], [871], [736], [246], [985], [829], [410], [], [119, 121], [412], [320], [393], [], [843], [966], [884, 538], [63], [713], [774, 788], [748], [792], [893], [847], [782, 664], [464], [962, 932, 923], [530, 719], [788], [323], [109], [373], [434], [739], [431], [76], [859], [608, 602], [430], [755], [288], [933], [786], [567], [536], [], [291], [72], [848, 632], [138], [767], [509], [287], [255], [179], [896, 794, 861], [], [131], [438], [950], [587, 813, 910], [538, 668], [486, 594, 501], [479], [51], [816], [930], [238], [321], [992], [614], [642], [487], [329], [], [653], [653], [327], [797], [435, 876], [251], [891], [357], [452], [61], [176], [279], [515, 808], [576], [696], [143], [108], [217], [997], [581, 479, 717], [650], [809], [808], [], [934], [22], [679], [890], [275], [73, 77], [726], [869, 975], [549, 623], [838], [652], [67], [64], [484, 536, 628], [590], [357], [442], [965], [442, 494], [975], [651, 909, 827], [618, 926], [468], [96], [], [987, 998], [655], [650], [204], [840], [396], [806], [349], [899, 647, 849, 505], [872], [698], [809, 910], [870], [10], [822, 541, 542], [242], [548, 485, 851, 632], [], [], [241], [403], [327], [], [], [413], [537], [349, 350], [759], [612], [84], [1], [212], [783], [806], [], [588], [892], [955], [594], [891], [82], [673, 810, 527, 508], [672], [119], [417], [712], [626], [48], [372], [162], [339], [954, 951], [921], [557], [56], [812], [302], [717], [295], [159], [], [747], [1], [356], [458], [512], [102], [922], [], [309, 599], [644], [983], [255], [], [276], [488], [292], [894], [509], [665], [44], [359], [30], [312], [24], [167], [424], [218], [272], [947], [723], [35], [781], [672], [], [262], [995], [43], [201], [248], [670], [733], [1, 124], [492], [], [757], [124], [831], [829], [546, 650, 819], [84], [911], [183], [873], [20], [476], [475], [208], [435], [665], [817], [834, 683], [956], [640], [109], [579, 881], [752, 852], [89], [543], [332], [926], [], [539, 741], [991], [493], [440], [518], [442], [719], [425], [880], [397], [963], [840, 462, 463], [641], [751], [804], [923, 928, 291, 737], [35], [349, 350], [638, 639], [336], [923], [760], [621], [945], [133], [1], [886], [437], [265, 266], [971], [827, 840], [812], [256], [977], [442, 437], [302], [62], [434], [231], [149], [], [872], [603], [245], [270], [50], [581], [428], [721, 285, 831], [467], [412], [395, 758], [330], [391], [634], [325], [494], [169], [518, 570], [143], [511], [849], [454], [671], [515, 420], [673, 526, 527, 782, 664, 508], [883], [812], [248, 249, 537], [160], [199], [748], [530], [190], [103], [163], [117], [892], [], [616, 159], [400, 667], [796], [703], [335], [834], [673, 742, 664, 526, 527, 632, 508], [33], [849, 505], [], [15], [602], [172], [], [], [298], [37], [130], [527, 664], [], [465], [838], [294], [581, 717, 479], [746], [743], [220], [572], [], [451, 679], [931], [843], [794], [641], [154], [148], [75], [16], [790], [216], [612, 741], [873], [810, 878], [162, 166], [786], [259], [789], [484, 628], [710, 767], [224, 223], [423, 424], [658], [670], [162], [], [547], [294], [63], [926], [591], [227, 235], [437], [763, 597], [161, 676], [342], [698], [928, 659, 923], [8], [205], [788, 502], [804], [537], [464], [826], [874, 555], [248], [583], [408], [616], [304], [185], [682], [520], [169], [769], [40], [562], [463, 434], [753], [207], [676, 248], [], [995], [871], [568], [169], [990], [840], [522], [335], [346], [479], [215], [515], [858], [230], [967], [546], [673, 526, 527, 664, 508], [940, 941, 942], [797], [939], [160], [963], [658], [251, 805], [982, 439], [524, 461], [253], [979], [277], [540], [], [407], [783, 784], [583], [544, 827], [239], [160], [245], [419], [331], [25], [22], [988], [243], [], [458], [455], [116], [986], [899, 505], [268], [416], [640], [420], [354], [739], [111], [384], [616], [810, 878], [541, 542], [910], [480], [897], [], [780], [629], [866], [185], [], [966], [898, 195], [588], [238, 207], [738], [65], [222], [646], [391, 758], [100], [521], [252], [535], [884], [232, 761], [497], [881], [457, 667], [823], [577], [330], [602], [], [725, 505], [879], [522], [49], [813], [239], [886], [347], [208], [294], [320], [87], [715, 652, 671], [929], [212], [94], [533], [903], [812], [921, 917], [583], [748], [295], [372], [], [361], [108, 973], [], [455], [49, 50], [987, 998], [919, 733], [282], [274, 277], [367], [430], [44], [81], [399], [24], [120], [357], [531], [101], [644], [283], [], [], [982], [355]] \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/results/imagenet_synsets.txt b/testbed/huggingface__pytorch-image-models/results/imagenet_synsets.txt new file mode 100644 index 0000000000000000000000000000000000000000..88aa58f966b3c4b7264a52bb71dcb25e856f6c1f --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/imagenet_synsets.txt @@ -0,0 +1,1000 @@ +n01440764 +n01443537 +n01484850 +n01491361 +n01494475 +n01496331 +n01498041 +n01514668 +n01514859 +n01518878 +n01530575 +n01531178 +n01532829 +n01534433 +n01537544 +n01558993 +n01560419 +n01580077 +n01582220 +n01592084 +n01601694 +n01608432 +n01614925 +n01616318 +n01622779 +n01629819 +n01630670 +n01631663 +n01632458 +n01632777 +n01641577 +n01644373 +n01644900 +n01664065 +n01665541 +n01667114 +n01667778 +n01669191 +n01675722 +n01677366 +n01682714 +n01685808 +n01687978 +n01688243 +n01689811 +n01692333 +n01693334 +n01694178 +n01695060 +n01697457 +n01698640 +n01704323 +n01728572 +n01728920 +n01729322 +n01729977 +n01734418 +n01735189 +n01737021 +n01739381 +n01740131 +n01742172 +n01744401 +n01748264 +n01749939 +n01751748 +n01753488 +n01755581 +n01756291 +n01768244 +n01770081 +n01770393 +n01773157 +n01773549 +n01773797 +n01774384 +n01774750 +n01775062 +n01776313 +n01784675 +n01795545 +n01796340 +n01797886 +n01798484 +n01806143 +n01806567 +n01807496 +n01817953 +n01818515 +n01819313 +n01820546 +n01824575 +n01828970 +n01829413 +n01833805 +n01843065 +n01843383 +n01847000 +n01855032 +n01855672 +n01860187 +n01871265 +n01872401 +n01873310 +n01877812 +n01882714 +n01883070 +n01910747 +n01914609 +n01917289 +n01924916 +n01930112 +n01943899 +n01944390 +n01945685 +n01950731 +n01955084 +n01968897 +n01978287 +n01978455 +n01980166 +n01981276 +n01983481 +n01984695 +n01985128 +n01986214 +n01990800 +n02002556 +n02002724 +n02006656 +n02007558 +n02009229 +n02009912 +n02011460 +n02012849 +n02013706 +n02017213 +n02018207 +n02018795 +n02025239 +n02027492 +n02028035 +n02033041 +n02037110 +n02051845 +n02056570 +n02058221 +n02066245 +n02071294 +n02074367 +n02077923 +n02085620 +n02085782 +n02085936 +n02086079 +n02086240 +n02086646 +n02086910 +n02087046 +n02087394 +n02088094 +n02088238 +n02088364 +n02088466 +n02088632 +n02089078 +n02089867 +n02089973 +n02090379 +n02090622 +n02090721 +n02091032 +n02091134 +n02091244 +n02091467 +n02091635 +n02091831 +n02092002 +n02092339 +n02093256 +n02093428 +n02093647 +n02093754 +n02093859 +n02093991 +n02094114 +n02094258 +n02094433 +n02095314 +n02095570 +n02095889 +n02096051 +n02096177 +n02096294 +n02096437 +n02096585 +n02097047 +n02097130 +n02097209 +n02097298 +n02097474 +n02097658 +n02098105 +n02098286 +n02098413 +n02099267 +n02099429 +n02099601 +n02099712 +n02099849 +n02100236 +n02100583 +n02100735 +n02100877 +n02101006 +n02101388 +n02101556 +n02102040 +n02102177 +n02102318 +n02102480 +n02102973 +n02104029 +n02104365 +n02105056 +n02105162 +n02105251 +n02105412 +n02105505 +n02105641 +n02105855 +n02106030 +n02106166 +n02106382 +n02106550 +n02106662 +n02107142 +n02107312 +n02107574 +n02107683 +n02107908 +n02108000 +n02108089 +n02108422 +n02108551 +n02108915 +n02109047 +n02109525 +n02109961 +n02110063 +n02110185 +n02110341 +n02110627 +n02110806 +n02110958 +n02111129 +n02111277 +n02111500 +n02111889 +n02112018 +n02112137 +n02112350 +n02112706 +n02113023 +n02113186 +n02113624 +n02113712 +n02113799 +n02113978 +n02114367 +n02114548 +n02114712 +n02114855 +n02115641 +n02115913 +n02116738 +n02117135 +n02119022 +n02119789 +n02120079 +n02120505 +n02123045 +n02123159 +n02123394 +n02123597 +n02124075 +n02125311 +n02127052 +n02128385 +n02128757 +n02128925 +n02129165 +n02129604 +n02130308 +n02132136 +n02133161 +n02134084 +n02134418 +n02137549 +n02138441 +n02165105 +n02165456 +n02167151 +n02168699 +n02169497 +n02172182 +n02174001 +n02177972 +n02190166 +n02206856 +n02219486 +n02226429 +n02229544 +n02231487 +n02233338 +n02236044 +n02256656 +n02259212 +n02264363 +n02268443 +n02268853 +n02276258 +n02277742 +n02279972 +n02280649 +n02281406 +n02281787 +n02317335 +n02319095 +n02321529 +n02325366 +n02326432 +n02328150 +n02342885 +n02346627 +n02356798 +n02361337 +n02363005 +n02364673 +n02389026 +n02391049 +n02395406 +n02396427 +n02397096 +n02398521 +n02403003 +n02408429 +n02410509 +n02412080 +n02415577 +n02417914 +n02422106 +n02422699 +n02423022 +n02437312 +n02437616 +n02441942 +n02442845 +n02443114 +n02443484 +n02444819 +n02445715 +n02447366 +n02454379 +n02457408 +n02480495 +n02480855 +n02481823 +n02483362 +n02483708 +n02484975 +n02486261 +n02486410 +n02487347 +n02488291 +n02488702 +n02489166 +n02490219 +n02492035 +n02492660 +n02493509 +n02493793 +n02494079 +n02497673 +n02500267 +n02504013 +n02504458 +n02509815 +n02510455 +n02514041 +n02526121 +n02536864 +n02606052 +n02607072 +n02640242 +n02641379 +n02643566 +n02655020 +n02666196 +n02667093 +n02669723 +n02672831 +n02676566 +n02687172 +n02690373 +n02692877 +n02699494 +n02701002 +n02704792 +n02708093 +n02727426 +n02730930 +n02747177 +n02749479 +n02769748 +n02776631 +n02777292 +n02782093 +n02783161 +n02786058 +n02787622 +n02788148 +n02790996 +n02791124 +n02791270 +n02793495 +n02794156 +n02795169 +n02797295 +n02799071 +n02802426 +n02804414 +n02804610 +n02807133 +n02808304 +n02808440 +n02814533 +n02814860 +n02815834 +n02817516 +n02823428 +n02823750 +n02825657 +n02834397 +n02835271 +n02837789 +n02840245 +n02841315 +n02843684 +n02859443 +n02860847 +n02865351 +n02869837 +n02870880 +n02871525 +n02877765 +n02879718 +n02883205 +n02892201 +n02892767 +n02894605 +n02895154 +n02906734 +n02909870 +n02910353 +n02916936 +n02917067 +n02927161 +n02930766 +n02939185 +n02948072 +n02950826 +n02951358 +n02951585 +n02963159 +n02965783 +n02966193 +n02966687 +n02971356 +n02974003 +n02977058 +n02978881 +n02979186 +n02980441 +n02981792 +n02988304 +n02992211 +n02992529 +n02999410 +n03000134 +n03000247 +n03000684 +n03014705 +n03016953 +n03017168 +n03018349 +n03026506 +n03028079 +n03032252 +n03041632 +n03042490 +n03045698 +n03047690 +n03062245 +n03063599 +n03063689 +n03065424 +n03075370 +n03085013 +n03089624 +n03095699 +n03100240 +n03109150 +n03110669 +n03124043 +n03124170 +n03125729 +n03126707 +n03127747 +n03127925 +n03131574 +n03133878 +n03134739 +n03141823 +n03146219 +n03160309 +n03179701 +n03180011 +n03187595 +n03188531 +n03196217 +n03197337 +n03201208 +n03207743 +n03207941 +n03208938 +n03216828 +n03218198 +n03220513 +n03223299 +n03240683 +n03249569 +n03250847 +n03255030 +n03259280 +n03271574 +n03272010 +n03272562 +n03290653 +n03291819 +n03297495 +n03314780 +n03325584 +n03337140 +n03344393 +n03345487 +n03347037 +n03355925 +n03372029 +n03376595 +n03379051 +n03384352 +n03388043 +n03388183 +n03388549 +n03393912 +n03394916 +n03400231 +n03404251 +n03417042 +n03424325 +n03425413 +n03443371 +n03444034 +n03445777 +n03445924 +n03447447 +n03447721 +n03450230 +n03452741 +n03457902 +n03459775 +n03461385 +n03467068 +n03476684 +n03476991 +n03478589 +n03481172 +n03482405 +n03483316 +n03485407 +n03485794 +n03492542 +n03494278 +n03495258 +n03496892 +n03498962 +n03527444 +n03529860 +n03530642 +n03532672 +n03534580 +n03535780 +n03538406 +n03544143 +n03584254 +n03584829 +n03590841 +n03594734 +n03594945 +n03595614 +n03598930 +n03599486 +n03602883 +n03617480 +n03623198 +n03627232 +n03630383 +n03633091 +n03637318 +n03642806 +n03649909 +n03657121 +n03658185 +n03661043 +n03662601 +n03666591 +n03670208 +n03673027 +n03676483 +n03680355 +n03690938 +n03691459 +n03692522 +n03697007 +n03706229 +n03709823 +n03710193 +n03710637 +n03710721 +n03717622 +n03720891 +n03721384 +n03724870 +n03729826 +n03733131 +n03733281 +n03733805 +n03742115 +n03743016 +n03759954 +n03761084 +n03763968 +n03764736 +n03769881 +n03770439 +n03770679 +n03773504 +n03775071 +n03775546 +n03776460 +n03777568 +n03777754 +n03781244 +n03782006 +n03785016 +n03786901 +n03787032 +n03788195 +n03788365 +n03791053 +n03792782 +n03792972 +n03793489 +n03794056 +n03796401 +n03803284 +n03804744 +n03814639 +n03814906 +n03825788 +n03832673 +n03837869 +n03838899 +n03840681 +n03841143 +n03843555 +n03854065 +n03857828 +n03866082 +n03868242 +n03868863 +n03871628 +n03873416 +n03874293 +n03874599 +n03876231 +n03877472 +n03877845 +n03884397 +n03887697 +n03888257 +n03888605 +n03891251 +n03891332 +n03895866 +n03899768 +n03902125 +n03903868 +n03908618 +n03908714 +n03916031 +n03920288 +n03924679 +n03929660 +n03929855 +n03930313 +n03930630 +n03933933 +n03935335 +n03937543 +n03938244 +n03942813 +n03944341 +n03947888 +n03950228 +n03954731 +n03956157 +n03958227 +n03961711 +n03967562 +n03970156 +n03976467 +n03976657 +n03977966 +n03980874 +n03982430 +n03983396 +n03991062 +n03992509 +n03995372 +n03998194 +n04004767 +n04005630 +n04008634 +n04009552 +n04019541 +n04023962 +n04026417 +n04033901 +n04033995 +n04037443 +n04039381 +n04040759 +n04041544 +n04044716 +n04049303 +n04065272 +n04067472 +n04069434 +n04070727 +n04074963 +n04081281 +n04086273 +n04090263 +n04099969 +n04111531 +n04116512 +n04118538 +n04118776 +n04120489 +n04125021 +n04127249 +n04131690 +n04133789 +n04136333 +n04141076 +n04141327 +n04141975 +n04146614 +n04147183 +n04149813 +n04152593 +n04153751 +n04154565 +n04162706 +n04179913 +n04192698 +n04200800 +n04201297 +n04204238 +n04204347 +n04208210 +n04209133 +n04209239 +n04228054 +n04229816 +n04235860 +n04238763 +n04239074 +n04243546 +n04251144 +n04252077 +n04252225 +n04254120 +n04254680 +n04254777 +n04258138 +n04259630 +n04263257 +n04264628 +n04265275 +n04266014 +n04270147 +n04273569 +n04275548 +n04277352 +n04285008 +n04286575 +n04296562 +n04310018 +n04311004 +n04311174 +n04317175 +n04325704 +n04326547 +n04328186 +n04330267 +n04332243 +n04335435 +n04336792 +n04344873 +n04346328 +n04347754 +n04350905 +n04355338 +n04355933 +n04356056 +n04357314 +n04366367 +n04367480 +n04370456 +n04371430 +n04371774 +n04372370 +n04376876 +n04380533 +n04389033 +n04392985 +n04398044 +n04399382 +n04404412 +n04409515 +n04417672 +n04418357 +n04423845 +n04428191 +n04429376 +n04435653 +n04442312 +n04443257 +n04447861 +n04456115 +n04458633 +n04461696 +n04462240 +n04465501 +n04467665 +n04476259 +n04479046 +n04482393 +n04483307 +n04485082 +n04486054 +n04487081 +n04487394 +n04493381 +n04501370 +n04505470 +n04507155 +n04509417 +n04515003 +n04517823 +n04522168 +n04523525 +n04525038 +n04525305 +n04532106 +n04532670 +n04536866 +n04540053 +n04542943 +n04548280 +n04548362 +n04550184 +n04552348 +n04553703 +n04554684 +n04557648 +n04560804 +n04562935 +n04579145 +n04579432 +n04584207 +n04589890 +n04590129 +n04591157 +n04591713 +n04592741 +n04596742 +n04597913 +n04599235 +n04604644 +n04606251 +n04612504 +n04613696 +n06359193 +n06596364 +n06785654 +n06794110 +n06874185 +n07248320 +n07565083 +n07579787 +n07583066 +n07584110 +n07590611 +n07613480 +n07614500 +n07615774 +n07684084 +n07693725 +n07695742 +n07697313 +n07697537 +n07711569 +n07714571 +n07714990 +n07715103 +n07716358 +n07716906 +n07717410 +n07717556 +n07718472 +n07718747 +n07720875 +n07730033 +n07734744 +n07742313 +n07745940 +n07747607 +n07749582 +n07753113 +n07753275 +n07753592 +n07754684 +n07760859 +n07768694 +n07802026 +n07831146 +n07836838 +n07860988 +n07871810 +n07873807 +n07875152 +n07880968 +n07892512 +n07920052 +n07930864 +n07932039 +n09193705 +n09229709 +n09246464 +n09256479 +n09288635 +n09332890 +n09399592 +n09421951 +n09428293 +n09468604 +n09472597 +n09835506 +n10148035 +n10565667 +n11879895 +n11939491 +n12057211 +n12144580 +n12267677 +n12620546 +n12768682 +n12985857 +n12998815 +n13037406 +n13040303 +n13044778 +n13052670 +n13054560 +n13133613 +n15075141 diff --git a/testbed/huggingface__pytorch-image-models/results/results-imagenet-a-clean.csv b/testbed/huggingface__pytorch-image-models/results/results-imagenet-a-clean.csv new file mode 100644 index 0000000000000000000000000000000000000000..e97b7b05c215dccf626a76c4771f07b65b92e282 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/results-imagenet-a-clean.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation +tf_efficientnet_l2_ns,98.550,1.450,99.820,0.180,480.31,800,0.960,bicubic +tf_efficientnet_l2_ns_475,98.500,1.500,99.830,0.170,480.31,475,0.936,bicubic +vit_large_patch16_384,98.220,1.780,99.800,0.200,304.72,384,1.000,bicubic +swin_large_patch4_window12_384,98.040,1.960,99.690,0.310,196.74,384,1.000,bicubic +tf_efficientnet_b7_ns,97.910,2.090,99.720,0.280,66.35,600,0.949,bicubic +swin_base_patch4_window12_384,97.890,2.110,99.710,0.290,87.90,384,1.000,bicubic +vit_large_r50_s32_384,97.860,2.140,99.670,0.330,329.09,384,1.000,bicubic +vit_base_patch16_384,97.840,2.160,99.670,0.330,86.86,384,1.000,bicubic +tf_efficientnetv2_l_in21ft1k,97.700,2.300,99.670,0.330,118.52,480,1.000,bicubic +swin_large_patch4_window7_224,97.650,2.350,99.580,0.420,196.53,224,0.900,bicubic +vit_large_patch16_224,97.640,2.360,99.590,0.410,304.33,224,0.900,bicubic +tf_efficientnet_b6_ns,97.630,2.370,99.580,0.420,43.04,528,0.942,bicubic +ig_resnext101_32x48d,97.620,2.380,99.700,0.300,828.41,224,0.875,bilinear +dm_nfnet_f6,97.600,2.400,99.550,0.450,438.36,576,0.956,bicubic +dm_nfnet_f4,97.580,2.420,99.510,0.490,316.07,512,0.951,bicubic +dm_nfnet_f5,97.540,2.460,99.570,0.430,377.21,544,0.954,bicubic +tf_efficientnet_b5_ns,97.500,2.500,99.630,0.370,30.39,456,0.934,bicubic +resnetv2_152x4_bitm,97.490,2.510,99.610,0.390,936.53,480,1.000,bilinear +cait_m48_448,97.480,2.520,99.550,0.450,356.46,448,1.000,bicubic +tf_efficientnetv2_m_in21ft1k,97.480,2.520,99.530,0.470,54.14,480,1.000,bicubic +cait_m36_384,97.400,2.600,99.510,0.490,271.22,384,1.000,bicubic +ig_resnext101_32x32d,97.360,2.640,99.680,0.320,468.53,224,0.875,bilinear +dm_nfnet_f3,97.350,2.650,99.560,0.440,254.92,416,0.940,bicubic +cait_s36_384,97.330,2.670,99.530,0.470,68.37,384,1.000,bicubic +tf_efficientnetv2_l,97.280,2.720,99.550,0.450,118.52,480,1.000,bicubic +swin_base_patch4_window7_224,97.250,2.750,99.530,0.470,87.77,224,0.900,bicubic +tf_efficientnet_b8,97.200,2.800,99.500,0.500,87.41,672,0.954,bicubic +swsl_resnext101_32x8d,97.200,2.800,99.570,0.430,88.79,224,0.875,bilinear +tf_efficientnet_b7_ap,97.200,2.800,99.540,0.460,66.35,600,0.949,bicubic +vit_base_r50_s16_384,97.180,2.820,99.560,0.440,98.95,384,1.000,bicubic +tf_efficientnetv2_m,97.140,2.860,99.410,0.590,54.14,480,1.000,bicubic +tf_efficientnet_b8_ap,97.110,2.890,99.660,0.340,87.41,672,0.954,bicubic +eca_nfnet_l2,97.090,2.910,99.510,0.490,56.72,384,1.000,bicubic +ecaresnet269d,97.080,2.920,99.470,0.530,102.09,352,1.000,bicubic +tf_efficientnet_b6_ap,97.080,2.920,99.620,0.380,43.04,528,0.942,bicubic +cait_s24_384,97.070,2.930,99.430,0.570,47.06,384,1.000,bicubic +dm_nfnet_f2,97.020,2.980,99.440,0.560,193.78,352,0.920,bicubic +resnetv2_152x2_bitm,97.010,2.990,99.590,0.410,236.34,448,1.000,bilinear +tf_efficientnet_b7,97.010,2.990,99.520,0.480,66.35,600,0.949,bicubic +resnetv2_101x3_bitm,96.990,3.010,99.490,0.510,387.93,448,1.000,bilinear +efficientnetv2_rw_m,96.980,3.020,99.540,0.460,53.24,416,1.000,bicubic +deit_base_distilled_patch16_384,96.960,3.040,99.480,0.520,87.63,384,1.000,bicubic +tf_efficientnet_b4_ns,96.950,3.050,99.580,0.420,19.34,380,0.922,bicubic +dm_nfnet_f1,96.920,3.080,99.410,0.590,132.63,320,0.910,bicubic +resnetrs420,96.910,3.090,99.460,0.540,191.89,416,1.000,bicubic +vit_base_patch16_224,96.880,3.120,99.530,0.470,86.57,224,0.900,bicubic +resnetv2_152x2_bit_teacher_384,96.830,3.170,99.450,0.550,236.34,384,1.000,bicubic +ig_resnext101_32x16d,96.820,3.180,99.590,0.410,194.03,224,0.875,bilinear +vit_large_r50_s32_224,96.790,3.210,99.350,0.650,328.99,224,0.900,bicubic +seresnet152d,96.770,3.230,99.450,0.550,66.84,320,1.000,bicubic +resnetrs350,96.760,3.240,99.370,0.630,163.96,384,1.000,bicubic +tf_efficientnetv2_s_in21ft1k,96.730,3.270,99.420,0.580,21.46,384,1.000,bicubic +resnet200d,96.720,3.280,99.330,0.670,64.69,320,1.000,bicubic +resnetv2_50x3_bitm,96.710,3.290,99.550,0.450,217.32,448,1.000,bilinear +eca_nfnet_l1,96.700,3.300,99.290,0.710,41.41,320,1.000,bicubic +vit_small_patch16_384,96.700,3.300,99.480,0.520,22.20,384,1.000,bicubic +resnetrs270,96.690,3.310,99.350,0.650,129.86,352,1.000,bicubic +pit_b_distilled_224,96.680,3.320,99.350,0.650,74.79,224,0.900,bicubic +tf_efficientnet_b5_ap,96.680,3.320,99.460,0.540,30.39,456,0.934,bicubic +vit_small_r26_s32_384,96.680,3.320,99.570,0.430,36.47,384,1.000,bicubic +tf_efficientnet_b6,96.670,3.330,99.370,0.630,43.04,528,0.942,bicubic +resmlp_big_24_224_in22ft1k,96.620,3.380,99.510,0.490,129.14,224,0.875,bicubic +resnest200e,96.610,3.390,99.350,0.650,70.20,320,0.909,bicubic +swsl_resnext101_32x16d,96.600,3.400,99.520,0.480,194.03,224,0.875,bilinear +resnetrs152,96.580,3.420,99.240,0.760,86.62,320,1.000,bicubic +cait_xs24_384,96.550,3.450,99.420,0.580,26.67,384,1.000,bicubic +efficientnetv2_rw_s,96.540,3.460,99.360,0.640,23.94,384,1.000,bicubic +resnetrs200,96.530,3.470,99.350,0.650,93.21,320,1.000,bicubic +resnest269e,96.520,3.480,99.350,0.650,110.93,416,0.928,bicubic +vit_base_patch32_384,96.490,3.510,99.410,0.590,88.30,384,1.000,bicubic +vit_base_patch16_224_miil,96.460,3.540,99.300,0.700,86.54,224,0.875,bilinear +resmlp_big_24_distilled_224,96.450,3.550,99.310,0.690,129.14,224,0.875,bicubic +swsl_resnext101_32x4d,96.420,3.580,99.470,0.530,44.18,224,0.875,bilinear +tf_efficientnet_b3_ns,96.390,3.610,99.350,0.650,12.23,300,0.904,bicubic +cait_s24_224,96.380,3.620,99.150,0.850,46.92,224,1.000,bicubic +resnet152d,96.360,3.640,99.390,0.610,60.21,320,1.000,bicubic +regnety_160,96.350,3.650,99.330,0.670,83.59,288,1.000,bicubic +tf_efficientnet_b5,96.350,3.650,99.310,0.690,30.39,456,0.934,bicubic +tf_efficientnetv2_s,96.340,3.660,99.200,0.800,21.46,384,1.000,bicubic +ig_resnext101_32x8d,96.320,3.680,99.430,0.570,88.79,224,0.875,bilinear +resnet101d,96.290,3.710,99.230,0.770,44.57,320,1.000,bicubic +twins_svt_large,96.270,3.730,99.170,0.830,99.27,224,0.900,bicubic +tf_efficientnet_b4_ap,96.160,3.840,99.280,0.720,19.34,380,0.922,bicubic +twins_svt_base,96.160,3.840,99.060,0.940,56.07,224,0.900,bicubic +deit_base_patch16_384,96.150,3.850,99.140,0.860,86.86,384,1.000,bicubic +dm_nfnet_f0,96.150,3.850,99.250,0.750,71.49,256,0.900,bicubic +efficientnet_b4,96.150,3.850,99.200,0.800,19.34,384,1.000,bicubic +twins_pcpvt_large,96.150,3.850,99.180,0.820,60.99,224,0.900,bicubic +resnetv2_50x1_bit_distilled,96.130,3.870,99.280,0.720,25.55,224,0.875,bicubic +nfnet_l0,96.120,3.880,99.240,0.760,35.07,288,1.000,bicubic +resnetv2_152x2_bit_teacher,96.100,3.900,99.280,0.720,236.34,224,0.875,bicubic +resnetv2_101x1_bitm,96.100,3.900,99.280,0.720,44.54,448,1.000,bilinear +deit_base_distilled_patch16_224,96.090,3.910,99.190,0.810,87.34,224,0.900,bicubic +regnety_032,95.970,4.030,99.190,0.810,19.44,288,1.000,bicubic +tresnet_xl_448,95.970,4.030,99.130,0.870,78.44,448,0.875,bilinear +eca_nfnet_l0,95.950,4.050,99.210,0.790,24.14,288,1.000,bicubic +swin_small_patch4_window7_224,95.910,4.090,99.020,0.980,49.61,224,0.900,bicubic +tf_efficientnet_b4,95.900,4.100,99.170,0.830,19.34,380,0.922,bicubic +swsl_resnext50_32x4d,95.870,4.130,99.250,0.750,25.03,224,0.875,bilinear +resnest101e,95.860,4.140,99.210,0.790,48.28,256,0.875,bilinear +resnet51q,95.860,4.140,99.120,0.880,35.70,288,1.000,bilinear +tresnet_l_448,95.860,4.140,99.120,0.880,55.99,448,0.875,bilinear +cait_xxs36_384,95.850,4.150,99.090,0.910,17.37,384,1.000,bicubic +vit_large_patch32_384,95.830,4.170,99.150,0.850,306.63,384,1.000,bicubic +ssl_resnext101_32x16d,95.800,4.200,99.180,0.820,194.03,224,0.875,bilinear +twins_pcpvt_base,95.790,4.210,99.130,0.870,43.83,224,0.900,bicubic +tf_efficientnet_b2_ns,95.770,4.230,99.120,0.880,9.11,260,0.890,bicubic +tresnet_m,95.720,4.280,99.030,0.970,31.39,224,0.875,bilinear +efficientnet_b3,95.710,4.290,99.040,0.960,12.23,320,1.000,bicubic +pnasnet5large,95.710,4.290,98.920,1.080,86.06,331,0.911,bicubic +nasnetalarge,95.680,4.320,98.930,1.070,88.75,331,0.911,bicubic +pit_b_224,95.640,4.360,98.660,1.340,73.76,224,0.900,bicubic +vit_small_r26_s32_224,95.630,4.370,99.190,0.810,36.43,224,0.900,bicubic +convit_base,95.550,4.450,98.870,1.130,86.54,224,0.875,bicubic +coat_lite_small,95.540,4.460,98.860,1.140,19.84,224,0.900,bicubic +ecaresnet101d,95.530,4.470,99.130,0.870,44.57,224,0.875,bicubic +levit_384,95.530,4.470,99.050,0.950,39.13,224,0.900,bicubic +ecaresnet50t,95.510,4.490,99.120,0.880,25.57,320,0.950,bicubic +visformer_small,95.490,4.510,98.900,1.100,40.22,224,0.900,bicubic +ssl_resnext101_32x8d,95.470,4.530,99.110,0.890,88.79,224,0.875,bilinear +deit_base_patch16_224,95.440,4.560,98.840,1.160,86.57,224,0.900,bicubic +ssl_resnext101_32x4d,95.440,4.560,99.130,0.870,44.18,224,0.875,bilinear +tresnet_xl,95.440,4.560,99.050,0.950,78.44,224,0.875,bilinear +resnetrs101,95.430,4.570,99.030,0.970,63.62,288,0.940,bicubic +swsl_resnet50,95.410,4.590,99.290,0.710,25.56,224,0.875,bilinear +vit_small_patch16_224,95.370,4.630,99.150,0.850,22.05,224,0.900,bicubic +tf_efficientnet_b3_ap,95.320,4.680,98.900,1.100,12.23,300,0.904,bicubic +mixer_b16_224_miil,95.300,4.700,98.880,1.120,59.88,224,0.875,bilinear +tresnet_l,95.290,4.710,99.010,0.990,55.99,224,0.875,bilinear +cait_xxs24_384,95.260,4.740,98.960,1.040,12.03,384,1.000,bicubic +pit_s_distilled_224,95.240,4.760,99.050,0.950,24.04,224,0.900,bicubic +twins_pcpvt_small,95.210,4.790,98.880,1.120,24.11,224,0.900,bicubic +convit_small,95.200,4.800,98.900,1.100,27.78,224,0.875,bicubic +twins_svt_small,95.200,4.800,98.880,1.120,24.06,224,0.900,bicubic +tf_efficientnet_b1_ns,95.170,4.830,99.110,0.890,7.79,240,0.882,bicubic +tf_efficientnetv2_b3,95.160,4.840,98.820,1.180,14.36,300,0.904,bicubic +swin_tiny_patch4_window7_224,95.140,4.860,98.850,1.150,28.29,224,0.900,bicubic +efficientnet_el,95.120,4.880,98.990,1.010,10.59,300,0.904,bicubic +gernet_l,95.090,4.910,98.900,1.100,31.08,256,0.875,bilinear +ecaresnet101d_pruned,95.080,4.920,98.980,1.020,24.88,224,0.875,bicubic +wide_resnet50_2,95.080,4.920,98.970,1.030,68.88,224,0.875,bicubic +legacy_senet154,95.070,4.930,98.830,1.170,115.09,224,0.875,bilinear +vit_small_patch32_384,95.050,4.950,98.990,1.010,22.92,384,1.000,bicubic +seresnext50_32x4d,95.040,4.960,98.880,1.120,27.56,224,0.875,bicubic +tnt_s_patch16_224,95.040,4.960,98.830,1.170,23.76,224,0.900,bicubic +gluon_resnet152_v1s,95.040,4.960,98.930,1.070,60.32,224,0.875,bicubic +levit_256,95.010,4.990,98.890,1.110,18.89,224,0.900,bicubic +resnetv2_50x1_bitm,95.010,4.990,99.060,0.940,25.55,448,1.000,bilinear +tf_efficientnet_b3,95.010,4.990,98.910,1.090,12.23,300,0.904,bicubic +vit_base_patch32_224,95.000,5.000,99.030,0.970,88.22,224,0.900,bicubic +tresnet_m_448,94.990,5.010,98.980,1.020,31.39,448,0.875,bilinear +coat_mini,94.970,5.030,98.780,1.220,10.34,224,0.900,bicubic +resnest50d_4s2x40d,94.960,5.040,99.070,0.930,30.42,224,0.875,bicubic +rexnet_200,94.940,5.060,99.010,0.990,16.37,224,0.875,bicubic +gluon_seresnext101_64x4d,94.930,5.070,98.830,1.170,88.23,224,0.875,bicubic +gluon_senet154,94.920,5.080,98.760,1.240,115.09,224,0.875,bicubic +gluon_seresnext101_32x4d,94.920,5.080,98.810,1.190,48.96,224,0.875,bicubic +tf_efficientnet_lite4,94.890,5.110,99.020,0.980,13.01,380,0.920,bilinear +resmlp_36_distilled_224,94.890,5.110,98.850,1.150,44.69,224,0.875,bicubic +ssl_resnext50_32x4d,94.870,5.130,98.880,1.120,25.03,224,0.875,bilinear +resnest50d,94.830,5.170,98.880,1.120,27.48,224,0.875,bilinear +ecaresnetlight,94.770,5.230,98.800,1.200,30.16,224,0.875,bicubic +resnest50d_1s4x24d,94.750,5.250,98.980,1.020,25.68,224,0.875,bicubic +gluon_resnet152_v1d,94.740,5.260,98.740,1.260,60.21,224,0.875,bicubic +gluon_resnet101_v1s,94.720,5.280,98.820,1.180,44.67,224,0.875,bicubic +deit_small_distilled_patch16_224,94.710,5.290,99.030,0.970,22.44,224,0.900,bicubic +gluon_resnext101_64x4d,94.670,5.330,98.650,1.350,83.46,224,0.875,bicubic +cspdarknet53,94.660,5.340,98.800,1.200,27.64,256,0.887,bilinear +resmlp_big_24_224,94.660,5.340,98.480,1.520,129.14,224,0.875,bicubic +ecaresnet50d,94.630,5.370,98.890,1.110,25.58,224,0.875,bicubic +efficientnet_b3_pruned,94.630,5.370,98.760,1.240,9.86,300,0.904,bicubic +gernet_m,94.620,5.380,98.860,1.140,21.14,224,0.875,bilinear +efficientnet_b2,94.610,5.390,98.710,1.290,9.11,288,1.000,bicubic +pit_s_224,94.590,5.410,98.710,1.290,23.46,224,0.900,bicubic +repvgg_b3,94.570,5.430,98.780,1.220,123.09,224,0.875,bilinear +nf_resnet50,94.560,5.440,98.790,1.210,25.56,288,0.940,bicubic +seresnet50,94.550,5.450,98.750,1.250,28.09,224,0.875,bicubic +inception_resnet_v2,94.540,5.460,98.790,1.210,55.84,299,0.897,bicubic +regnety_320,94.540,5.460,98.850,1.150,145.05,224,0.875,bicubic +gluon_resnext101_32x4d,94.530,5.470,98.630,1.370,44.18,224,0.875,bicubic +repvgg_b3g4,94.520,5.480,98.970,1.030,83.83,224,0.875,bilinear +tf_efficientnet_b2_ap,94.490,5.510,98.620,1.380,9.11,260,0.890,bicubic +regnety_120,94.480,5.520,98.810,1.190,51.82,224,0.875,bicubic +rexnet_150,94.480,5.520,98.790,1.210,9.73,224,0.875,bicubic +cspresnext50,94.480,5.520,98.680,1.320,20.57,224,0.875,bilinear +resmlp_24_distilled_224,94.460,5.540,98.770,1.230,30.02,224,0.875,bicubic +regnetx_320,94.460,5.540,98.740,1.260,107.81,224,0.875,bicubic +ssl_resnet50,94.450,5.550,98.920,1.080,25.56,224,0.875,bilinear +tf_efficientnetv2_b2,94.420,5.580,98.570,1.430,10.10,260,0.890,bicubic +tf_efficientnet_el,94.410,5.590,98.710,1.290,10.59,300,0.904,bicubic +deit_small_patch16_224,94.400,5.600,98.690,1.310,22.05,224,0.900,bicubic +efficientnet_el_pruned,94.400,5.600,98.740,1.260,10.59,300,0.904,bicubic +inception_v4,94.380,5.620,98.580,1.420,42.68,299,0.875,bicubic +legacy_seresnext101_32x4d,94.370,5.630,98.650,1.350,48.96,224,0.875,bilinear +tf_efficientnet_b2,94.360,5.640,98.610,1.390,9.11,260,0.890,bicubic +gluon_seresnext50_32x4d,94.340,5.660,98.610,1.390,27.56,224,0.875,bicubic +resnetrs50,94.310,5.690,98.640,1.360,35.69,224,0.910,bicubic +dpn107,94.310,5.690,98.480,1.520,86.92,224,0.875,bicubic +ecaresnet26t,94.310,5.690,98.720,1.280,16.01,320,0.950,bicubic +xception71,94.280,5.720,98.640,1.360,42.34,299,0.903,bicubic +cait_xxs36_224,94.260,5.740,98.720,1.280,17.30,224,1.000,bicubic +gluon_xception65,94.260,5.740,98.570,1.430,39.92,299,0.903,bicubic +resnet50d,94.260,5.740,98.720,1.280,25.58,224,0.875,bicubic +skresnext50_32x4d,94.260,5.740,98.460,1.540,27.48,224,0.875,bicubic +regnetx_120,94.240,5.760,98.650,1.350,46.11,224,0.875,bicubic +dpn92,94.230,5.770,98.730,1.270,37.67,224,0.875,bicubic +ecaresnet50d_pruned,94.220,5.780,98.730,1.270,19.94,224,0.875,bicubic +gluon_resnet101_v1d,94.220,5.780,98.550,1.450,44.57,224,0.875,bicubic +tf_efficientnet_lite3,94.200,5.800,98.640,1.360,8.20,300,0.904,bilinear +resmlp_36_224,94.190,5.810,98.660,1.340,44.69,224,0.875,bicubic +mixnet_xl,94.190,5.810,98.340,1.660,11.90,224,0.875,bicubic +resnext50d_32x4d,94.180,5.820,98.570,1.430,25.05,224,0.875,bicubic +levit_192,94.170,5.830,98.540,1.460,10.95,224,0.900,bicubic +regnety_080,94.170,5.830,98.680,1.320,39.18,224,0.875,bicubic +ens_adv_inception_resnet_v2,94.160,5.840,98.600,1.400,55.84,299,0.897,bicubic +gluon_resnet152_v1c,94.160,5.840,98.640,1.360,60.21,224,0.875,bicubic +regnety_064,94.150,5.850,98.730,1.270,30.58,224,0.875,bicubic +efficientnet_b2_pruned,94.140,5.860,98.530,1.470,8.31,260,0.890,bicubic +dpn98,94.130,5.870,98.570,1.430,61.57,224,0.875,bicubic +nf_regnet_b1,94.120,5.880,98.630,1.370,10.22,288,0.900,bicubic +regnetx_160,94.120,5.880,98.750,1.250,54.28,224,0.875,bicubic +resnext50_32x4d,94.100,5.900,98.350,1.650,25.03,224,0.875,bicubic +ese_vovnet39b,94.090,5.910,98.660,1.340,24.57,224,0.875,bicubic +gluon_resnet152_v1b,94.080,5.920,98.450,1.550,60.19,224,0.875,bicubic +coat_lite_mini,94.060,5.940,98.560,1.440,11.01,224,0.900,bicubic +resmlp_24_224,94.020,5.980,98.330,1.670,30.02,224,0.875,bicubic +dpn131,94.010,5.990,98.720,1.280,79.25,224,0.875,bicubic +hrnet_w64,94.010,5.990,98.610,1.390,128.06,224,0.875,bilinear +resnetblur50,93.960,6.040,98.590,1.410,25.56,224,0.875,bicubic +dla102x2,93.950,6.050,98.490,1.510,41.28,224,0.875,bilinear +tf_efficientnetv2_b1,93.940,6.060,98.620,1.380,8.14,240,0.882,bicubic +hrnet_w48,93.920,6.080,98.610,1.390,77.47,224,0.875,bilinear +rexnet_130,93.900,6.100,98.400,1.600,7.56,224,0.875,bicubic +tf_efficientnet_cc_b1_8e,93.900,6.100,98.260,1.740,39.72,240,0.882,bicubic +regnetx_064,93.890,6.110,98.630,1.370,26.21,224,0.875,bicubic +regnetx_080,93.870,6.130,98.520,1.480,39.57,224,0.875,bicubic +repvgg_b2g4,93.860,6.140,98.590,1.410,61.76,224,0.875,bilinear +regnety_040,93.860,6.140,98.650,1.350,20.65,224,0.875,bicubic +efficientnet_em,93.840,6.160,98.810,1.190,6.90,240,0.882,bicubic +resnext101_32x8d,93.830,6.170,98.580,1.420,88.79,224,0.875,bilinear +gluon_resnext50_32x4d,93.810,6.190,98.410,1.590,25.03,224,0.875,bicubic +pit_xs_distilled_224,93.810,6.190,98.670,1.330,11.00,224,0.900,bicubic +resnet50,93.810,6.190,98.390,1.610,25.56,224,0.875,bicubic +gluon_resnet50_v1d,93.770,6.230,98.390,1.610,25.58,224,0.875,bicubic +xception65,93.760,6.240,98.370,1.630,39.92,299,0.903,bicubic +gluon_resnet101_v1b,93.750,6.250,98.380,1.620,44.55,224,0.875,bicubic +res2net101_26w_4s,93.750,6.250,98.310,1.690,45.21,224,0.875,bilinear +cspresnet50,93.740,6.260,98.640,1.360,21.62,256,0.887,bilinear +legacy_seresnext50_32x4d,93.730,6.270,98.580,1.420,27.56,224,0.875,bilinear +wide_resnet101_2,93.720,6.280,98.540,1.460,126.89,224,0.875,bilinear +tf_efficientnet_b1_ap,93.690,6.310,98.360,1.640,7.79,240,0.882,bicubic +dpn68b,93.690,6.310,98.510,1.490,12.61,224,0.875,bicubic +gluon_resnet101_v1c,93.670,6.330,98.420,1.580,44.57,224,0.875,bicubic +vit_tiny_patch16_384,93.650,6.350,98.600,1.400,5.79,384,1.000,bicubic +tf_efficientnet_b0_ns,93.630,6.370,98.640,1.360,5.29,224,0.875,bicubic +gluon_resnet50_v1s,93.620,6.380,98.460,1.540,25.68,224,0.875,bicubic +cait_xxs24_224,93.600,6.400,98.440,1.560,11.96,224,1.000,bicubic +coat_tiny,93.590,6.410,98.430,1.570,5.50,224,0.900,bicubic +regnetx_040,93.560,6.440,98.540,1.460,22.12,224,0.875,bicubic +hrnet_w44,93.550,6.450,98.700,1.300,67.06,224,0.875,bilinear +res2net50_26w_8s,93.540,6.460,98.260,1.740,48.40,224,0.875,bilinear +hrnet_w32,93.530,6.470,98.450,1.550,41.23,224,0.875,bilinear +dla102x,93.520,6.480,98.510,1.490,26.31,224,0.875,bilinear +repvgg_b2,93.500,6.500,98.730,1.270,89.02,224,0.875,bilinear +tf_efficientnet_b1,93.500,6.500,98.360,1.640,7.79,240,0.882,bicubic +hrnet_w40,93.490,6.510,98.580,1.420,57.56,224,0.875,bilinear +gluon_inception_v3,93.460,6.540,98.570,1.430,23.83,299,0.875,bicubic +xception,93.460,6.540,98.530,1.470,22.86,299,0.897,bicubic +mixnet_l,93.450,6.550,98.220,1.780,7.33,224,0.875,bicubic +xception41,93.430,6.570,98.430,1.570,26.97,299,0.903,bicubic +res2net50_26w_6s,93.410,6.590,98.280,1.720,37.05,224,0.875,bilinear +legacy_seresnet152,93.400,6.600,98.350,1.650,66.82,224,0.875,bilinear +dla169,93.340,6.660,98.600,1.400,53.39,224,0.875,bilinear +levit_128,93.340,6.660,98.380,1.620,9.21,224,0.900,bicubic +repvgg_b1,93.330,6.670,98.510,1.490,57.42,224,0.875,bilinear +resnest26d,93.330,6.670,98.630,1.370,17.07,224,0.875,bilinear +tf_inception_v3,93.320,6.680,98.030,1.970,23.83,299,0.875,bicubic +tf_mixnet_l,93.310,6.690,98.030,1.970,7.33,224,0.875,bicubic +selecsls60b,93.300,6.700,98.280,1.720,32.77,224,0.875,bicubic +tv_resnet152,93.300,6.700,98.390,1.610,60.19,224,0.875,bilinear +legacy_seresnet101,93.280,6.720,98.510,1.490,49.33,224,0.875,bilinear +efficientnet_b1,93.250,6.750,98.290,1.710,7.79,256,1.000,bicubic +coat_lite_tiny,93.240,6.760,98.260,1.740,5.72,224,0.900,bicubic +hrnet_w30,93.200,6.800,98.410,1.590,37.71,224,0.875,bilinear +dla60_res2net,93.180,6.820,98.420,1.580,20.85,224,0.875,bilinear +dla60_res2next,93.180,6.820,98.410,1.590,17.03,224,0.875,bilinear +efficientnet_es,93.140,6.860,98.420,1.580,5.44,224,0.875,bicubic +dla60x,93.120,6.880,98.510,1.490,17.35,224,0.875,bilinear +regnetx_032,93.120,6.880,98.390,1.610,15.30,224,0.875,bicubic +tf_efficientnetv2_b0,93.110,6.890,98.390,1.610,7.14,224,0.875,bicubic +pit_xs_224,93.110,6.890,98.310,1.690,10.62,224,0.900,bicubic +dla102,93.060,6.940,98.540,1.460,33.27,224,0.875,bilinear +gluon_resnet50_v1c,93.030,6.970,98.390,1.610,25.58,224,0.875,bicubic +regnety_016,93.030,6.970,98.360,1.640,11.20,224,0.875,bicubic +rexnet_100,93.030,6.970,98.190,1.810,4.80,224,0.875,bicubic +selecsls60,93.030,6.970,98.300,1.700,30.67,224,0.875,bicubic +repvgg_b1g4,92.980,7.020,98.430,1.570,39.97,224,0.875,bilinear +legacy_seresnet50,92.960,7.040,98.190,1.810,28.09,224,0.875,bilinear +hardcorenas_f,92.950,7.050,98.160,1.840,8.20,224,0.875,bilinear +tf_efficientnet_em,92.930,7.070,98.190,1.810,6.90,240,0.882,bicubic +adv_inception_v3,92.880,7.120,98.140,1.860,23.83,299,0.875,bicubic +res2next50,92.840,7.160,98.180,1.820,24.67,224,0.875,bilinear +tf_efficientnet_cc_b0_8e,92.830,7.170,98.180,1.820,24.01,224,0.875,bicubic +resmlp_12_distilled_224,92.830,7.170,98.140,1.860,15.35,224,0.875,bicubic +gmixer_24_224,92.830,7.170,97.880,2.120,24.72,224,0.875,bicubic +seresnext26t_32x4d,92.820,7.180,98.370,1.630,16.81,224,0.875,bicubic +tv_resnet101,92.810,7.190,98.250,1.750,44.55,224,0.875,bilinear +efficientnet_b1_pruned,92.770,7.230,98.040,1.960,6.33,240,0.882,bicubic +densenet201,92.750,7.250,98.230,1.770,20.01,224,0.875,bicubic +res2net50_14w_8s,92.740,7.260,98.180,1.820,25.06,224,0.875,bilinear +tv_resnext50_32x4d,92.740,7.260,98.270,1.730,25.03,224,0.875,bilinear +inception_v3,92.720,7.280,97.970,2.030,23.83,299,0.875,bicubic +seresnext26d_32x4d,92.700,7.300,98.150,1.850,16.81,224,0.875,bicubic +efficientnet_b0,92.690,7.310,98.070,1.930,5.29,224,0.875,bicubic +resnet34d,92.680,7.320,98.310,1.690,21.82,224,0.875,bicubic +tf_efficientnet_lite2,92.650,7.350,98.230,1.770,6.09,260,0.890,bicubic +legacy_seresnext26_32x4d,92.640,7.360,98.130,1.870,16.79,224,0.875,bicubic +tf_efficientnet_lite1,92.620,7.380,98.080,1.920,5.42,240,0.882,bicubic +tf_efficientnet_cc_b0_4e,92.590,7.410,98.080,1.920,13.31,224,0.875,bicubic +hardcorenas_e,92.570,7.430,98.110,1.890,8.07,224,0.875,bilinear +res2net50_48w_2s,92.550,7.450,98.080,1.920,25.29,224,0.875,bilinear +gluon_resnet50_v1b,92.540,7.460,98.170,1.830,25.56,224,0.875,bicubic +densenet161,92.500,7.500,98.290,1.710,28.68,224,0.875,bicubic +res2net50_26w_4s,92.500,7.500,98.060,1.940,25.70,224,0.875,bilinear +mixnet_m,92.430,7.570,97.870,2.130,5.01,224,0.875,bicubic +hardcorenas_d,92.400,7.600,98.070,1.930,7.50,224,0.875,bilinear +mobilenetv2_120d,92.400,7.600,98.050,1.950,5.83,224,0.875,bicubic +skresnet34,92.390,7.610,98.150,1.850,22.28,224,0.875,bicubic +tf_mixnet_m,92.330,7.670,97.890,2.110,5.01,224,0.875,bicubic +hrnet_w18,92.320,7.680,98.240,1.760,21.30,224,0.875,bilinear +ese_vovnet19b_dw,92.290,7.710,98.090,1.910,6.54,224,0.875,bicubic +selecsls42b,92.280,7.720,98.150,1.850,32.46,224,0.875,bicubic +mobilenetv3_large_100_miil,92.260,7.740,97.640,2.360,5.48,224,0.875,bilinear +tf_efficientnet_b0,92.250,7.750,98.000,2.000,5.29,224,0.875,bicubic +dla60,92.230,7.770,98.110,1.890,22.04,224,0.875,bilinear +resmlp_12_224,92.210,7.790,98.160,1.840,15.35,224,0.875,bicubic +tf_efficientnet_b0_ap,92.200,7.800,98.020,1.980,5.29,224,0.875,bicubic +regnetx_016,92.170,7.830,98.210,1.790,9.19,224,0.875,bicubic +gernet_s,92.140,7.860,98.190,1.810,8.17,224,0.875,bilinear +resnet26d,92.070,7.930,97.960,2.040,16.01,224,0.875,bicubic +vit_small_patch32_224,92.040,7.960,98.230,1.770,22.88,224,0.900,bicubic +vit_tiny_r_s16_p8_384,92.040,7.960,98.290,1.710,6.36,384,1.000,bicubic +hardcorenas_c,92.020,7.980,97.840,2.160,5.52,224,0.875,bilinear +dpn68,92.010,7.990,98.050,1.950,12.61,224,0.875,bicubic +tf_efficientnet_es,91.980,8.020,97.860,2.140,5.44,224,0.875,bicubic +levit_128s,91.970,8.030,98.060,1.940,7.78,224,0.900,bicubic +repvgg_a2,91.940,8.060,98.150,1.850,28.21,224,0.875,bilinear +densenet169,91.930,8.070,98.100,1.900,14.15,224,0.875,bicubic +densenetblur121d,91.910,8.090,98.070,1.930,8.00,224,0.875,bicubic +tv_resnet50,91.880,8.120,98.040,1.960,25.56,224,0.875,bilinear +mixer_b16_224,91.870,8.130,97.250,2.750,59.88,224,0.875,bicubic +mixnet_s,91.830,8.170,97.690,2.310,4.13,224,0.875,bicubic +mobilenetv2_140,91.830,8.170,97.860,2.140,6.11,224,0.875,bicubic +hardcorenas_b,91.770,8.230,97.780,2.220,5.18,224,0.875,bilinear +vit_tiny_patch16_224,91.760,8.240,98.040,1.960,5.72,224,0.900,bicubic +regnety_008,91.750,8.250,98.180,1.820,6.26,224,0.875,bicubic +resnest14d,91.720,8.280,97.870,2.130,10.61,224,0.875,bilinear +densenet121,91.570,8.430,98.030,1.970,7.98,224,0.875,bicubic +tf_mixnet_s,91.510,8.490,97.620,2.380,4.13,224,0.875,bicubic +repvgg_b0,91.430,8.570,97.990,2.010,15.82,224,0.875,bilinear +regnety_006,91.370,8.630,97.710,2.290,6.06,224,0.875,bicubic +hardcorenas_a,91.350,8.650,97.860,2.140,5.26,224,0.875,bilinear +mobilenetv3_large_100,91.320,8.680,97.710,2.290,5.48,224,0.875,bicubic +semnasnet_100,91.280,8.720,97.560,2.440,3.89,224,0.875,bicubic +tf_mobilenetv3_large_100,91.240,8.760,97.660,2.340,5.48,224,0.875,bilinear +mobilenetv3_rw,91.210,8.790,97.660,2.340,5.48,224,0.875,bicubic +hrnet_w18_small_v2,91.190,8.810,97.900,2.100,15.60,224,0.875,bilinear +efficientnet_es_pruned,91.180,8.820,97.750,2.250,5.44,224,0.875,bicubic +efficientnet_lite0,91.140,8.860,97.630,2.370,4.65,224,0.875,bicubic +resnet34,91.130,8.870,97.620,2.380,21.80,224,0.875,bilinear +resnet26,91.110,8.890,97.740,2.260,16.00,224,0.875,bicubic +regnetx_008,91.050,8.950,97.710,2.290,7.26,224,0.875,bicubic +tf_efficientnet_lite0,91.040,8.960,97.590,2.410,4.65,224,0.875,bicubic +gluon_resnet34_v1b,90.960,9.040,97.630,2.370,21.80,224,0.875,bicubic +mobilenetv2_110d,90.950,9.050,97.550,2.450,4.52,224,0.875,bicubic +pit_ti_distilled_224,90.900,9.100,97.700,2.300,5.10,224,0.900,bicubic +legacy_seresnet34,90.890,9.110,97.580,2.420,21.96,224,0.875,bilinear +tv_densenet121,90.890,9.110,97.710,2.290,7.98,224,0.875,bicubic +dla34,90.760,9.240,97.660,2.340,15.74,224,0.875,bilinear +deit_tiny_distilled_patch16_224,90.700,9.300,97.570,2.430,5.91,224,0.900,bicubic +fbnetc_100,90.700,9.300,97.210,2.790,5.57,224,0.875,bilinear +swsl_resnet18,90.690,9.310,97.700,2.300,11.69,224,0.875,bilinear +convit_tiny,90.630,9.370,97.740,2.260,5.71,224,0.875,bicubic +mnasnet_100,90.510,9.490,97.470,2.530,4.38,224,0.875,bicubic +regnety_004,90.500,9.500,97.540,2.460,4.34,224,0.875,bicubic +regnetx_006,90.350,9.650,97.430,2.570,6.20,224,0.875,bicubic +spnasnet_100,90.350,9.650,97.190,2.810,4.42,224,0.875,bilinear +ssl_resnet18,90.220,9.780,97.550,2.450,11.69,224,0.875,bilinear +vgg16_bn,90.090,9.910,97.370,2.630,138.37,224,0.875,bilinear +vgg19_bn,90.080,9.920,97.580,2.420,143.68,224,0.875,bilinear +ghostnet_100,90.020,9.980,97.370,2.630,5.18,224,0.875,bilinear +pit_ti_224,89.940,10.060,97.450,2.550,4.85,224,0.900,bicubic +tv_resnet34,89.940,10.060,97.340,2.660,21.80,224,0.875,bilinear +tf_mobilenetv3_large_075,89.680,10.320,97.210,2.790,3.99,224,0.875,bilinear +deit_tiny_patch16_224,89.670,10.330,97.450,2.550,5.72,224,0.900,bicubic +skresnet18,89.660,10.340,97.230,2.770,11.96,224,0.875,bicubic +mobilenetv2_100,89.600,10.400,97.140,2.860,3.50,224,0.875,bicubic +resnet18d,89.280,10.720,97.150,2.850,11.71,224,0.875,bicubic +vit_tiny_r_s16_p8_224,89.170,10.830,97.230,2.770,6.34,224,0.900,bicubic +hrnet_w18_small,89.050,10.950,97.110,2.890,13.19,224,0.875,bilinear +vgg19,89.040,10.960,96.870,3.130,143.67,224,0.875,bilinear +tf_mobilenetv3_large_minimal_100,88.970,11.030,96.860,3.140,3.92,224,0.875,bilinear +regnetx_004,88.900,11.100,97.120,2.880,5.16,224,0.875,bicubic +legacy_seresnet18,88.880,11.120,96.980,3.020,11.78,224,0.875,bicubic +vgg13_bn,88.760,11.240,96.970,3.030,133.05,224,0.875,bilinear +vgg16,88.550,11.450,96.790,3.210,138.36,224,0.875,bilinear +gluon_resnet18_v1b,88.400,11.600,96.680,3.320,11.69,224,0.875,bicubic +vgg11_bn,87.500,12.500,96.820,3.180,132.87,224,0.875,bilinear +resnet18,87.390,12.610,96.290,3.710,11.69,224,0.875,bilinear +regnety_002,87.380,12.620,96.590,3.410,3.16,224,0.875,bicubic +mixer_l16_224,87.150,12.850,93.520,6.480,208.20,224,0.875,bicubic +vgg13,87.050,12.950,96.320,3.680,133.05,224,0.875,bilinear +vgg11,86.550,13.450,96.280,3.720,132.86,224,0.875,bilinear +dla60x_c,86.290,13.710,96.160,3.840,1.32,224,0.875,bilinear +regnetx_002,86.190,13.810,95.980,4.020,2.68,224,0.875,bicubic +tf_mobilenetv3_small_100,85.190,14.810,95.770,4.230,2.54,224,0.875,bilinear +dla46x_c,84.250,15.750,95.270,4.730,1.07,224,0.875,bilinear +dla46_c,83.650,16.350,94.920,5.080,1.30,224,0.875,bilinear +tf_mobilenetv3_small_075,83.520,16.480,94.790,5.210,2.04,224,0.875,bilinear +tf_mobilenetv3_small_minimal_100,81.380,18.620,93.670,6.330,2.04,224,0.875,bilinear diff --git a/testbed/huggingface__pytorch-image-models/results/results-imagenet-r-clean.csv b/testbed/huggingface__pytorch-image-models/results/results-imagenet-r-clean.csv new file mode 100644 index 0000000000000000000000000000000000000000..b76f0d58c9a7e7b2144a211fdb9f2f875b198b18 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/results-imagenet-r-clean.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation +tf_efficientnet_l2_ns,97.780,2.220,99.890,0.110,480.31,800,0.960,bicubic +tf_efficientnet_l2_ns_475,97.750,2.250,99.820,0.180,480.31,475,0.936,bicubic +vit_large_patch16_384,97.420,2.580,99.780,0.220,304.72,384,1.000,bicubic +tf_efficientnet_b7_ns,97.200,2.800,99.700,0.300,66.35,600,0.949,bicubic +swin_large_patch4_window12_384,97.170,2.830,99.680,0.320,196.74,384,1.000,bicubic +swin_base_patch4_window12_384,97.120,2.880,99.780,0.220,87.90,384,1.000,bicubic +tf_efficientnetv2_l_in21ft1k,97.110,2.890,99.710,0.290,118.52,480,1.000,bicubic +tf_efficientnet_b6_ns,97.020,2.980,99.710,0.290,43.04,528,0.942,bicubic +vit_base_patch16_384,97.020,2.980,99.710,0.290,86.86,384,1.000,bicubic +ig_resnext101_32x48d,96.970,3.030,99.670,0.330,828.41,224,0.875,bilinear +tf_efficientnetv2_m_in21ft1k,96.970,3.030,99.610,0.390,54.14,480,1.000,bicubic +swin_large_patch4_window7_224,96.950,3.050,99.660,0.340,196.53,224,0.900,bicubic +vit_large_r50_s32_384,96.950,3.050,99.710,0.290,329.09,384,1.000,bicubic +dm_nfnet_f6,96.920,3.080,99.720,0.280,438.36,576,0.956,bicubic +cait_m48_448,96.880,3.120,99.620,0.380,356.46,448,1.000,bicubic +resnetv2_152x4_bitm,96.870,3.130,99.660,0.340,936.53,480,1.000,bilinear +tf_efficientnet_b5_ns,96.870,3.130,99.640,0.360,30.39,456,0.934,bicubic +cait_m36_384,96.830,3.170,99.660,0.340,271.22,384,1.000,bicubic +dm_nfnet_f5,96.810,3.190,99.670,0.330,377.21,544,0.954,bicubic +dm_nfnet_f4,96.780,3.220,99.620,0.380,316.07,512,0.951,bicubic +ig_resnext101_32x32d,96.780,3.220,99.530,0.470,468.53,224,0.875,bilinear +dm_nfnet_f3,96.730,3.270,99.630,0.370,254.92,416,0.940,bicubic +tf_efficientnet_b4_ns,96.710,3.290,99.640,0.360,19.34,380,0.922,bicubic +vit_large_patch16_224,96.710,3.290,99.650,0.350,304.33,224,0.900,bicubic +tf_efficientnet_b8,96.700,3.300,99.530,0.470,87.41,672,0.954,bicubic +swin_base_patch4_window7_224,96.680,3.320,99.660,0.340,87.77,224,0.900,bicubic +tf_efficientnetv2_l,96.650,3.350,99.560,0.440,118.52,480,1.000,bicubic +cait_s36_384,96.630,3.370,99.600,0.400,68.37,384,1.000,bicubic +tf_efficientnet_b7,96.580,3.420,99.510,0.490,66.35,600,0.949,bicubic +cait_s24_384,96.570,3.430,99.550,0.450,47.06,384,1.000,bicubic +tf_efficientnet_b8_ap,96.550,3.450,99.540,0.460,87.41,672,0.954,bicubic +tf_efficientnetv2_m,96.550,3.450,99.570,0.430,54.14,480,1.000,bicubic +resnetv2_152x2_bitm,96.520,3.480,99.590,0.410,236.34,448,1.000,bilinear +deit_base_distilled_patch16_384,96.510,3.490,99.590,0.410,87.63,384,1.000,bicubic +tf_efficientnetv2_s_in21ft1k,96.470,3.530,99.570,0.430,21.46,384,1.000,bicubic +dm_nfnet_f2,96.460,3.540,99.540,0.460,193.78,352,0.920,bicubic +ecaresnet269d,96.460,3.540,99.610,0.390,102.09,352,1.000,bicubic +eca_nfnet_l2,96.450,3.550,99.620,0.380,56.72,384,1.000,bicubic +vit_base_r50_s16_384,96.450,3.550,99.660,0.340,98.95,384,1.000,bicubic +ig_resnext101_32x16d,96.440,3.560,99.540,0.460,194.03,224,0.875,bilinear +resnetrs420,96.400,3.600,99.540,0.460,191.89,416,1.000,bicubic +dm_nfnet_f1,96.390,3.610,99.470,0.530,132.63,320,0.910,bicubic +tf_efficientnet_b6_ap,96.370,3.630,99.550,0.450,43.04,528,0.942,bicubic +resmlp_big_24_224_in22ft1k,96.350,3.650,99.520,0.480,129.14,224,0.875,bicubic +tf_efficientnet_b7_ap,96.350,3.650,99.590,0.410,66.35,600,0.949,bicubic +seresnet152d,96.310,3.690,99.510,0.490,66.84,320,1.000,bicubic +vit_base_patch16_224,96.300,3.700,99.560,0.440,86.57,224,0.900,bicubic +tf_efficientnet_b6,96.290,3.710,99.520,0.480,43.04,528,0.942,bicubic +efficientnetv2_rw_m,96.270,3.730,99.560,0.440,53.24,416,1.000,bicubic +resnetv2_50x3_bitm,96.270,3.730,99.630,0.370,217.32,448,1.000,bilinear +swsl_resnext101_32x16d,96.270,3.730,99.500,0.500,194.03,224,0.875,bilinear +resnetv2_101x3_bitm,96.250,3.750,99.590,0.410,387.93,448,1.000,bilinear +swsl_resnext101_32x8d,96.240,3.760,99.590,0.410,88.79,224,0.875,bilinear +resnetrs350,96.240,3.760,99.470,0.530,163.96,384,1.000,bicubic +resnetv2_152x2_bit_teacher_384,96.190,3.810,99.500,0.500,236.34,384,1.000,bicubic +vit_large_r50_s32_224,96.180,3.820,99.530,0.470,328.99,224,0.900,bicubic +resnest269e,96.120,3.880,99.520,0.480,110.93,416,0.928,bicubic +resnet200d,96.110,3.890,99.460,0.540,64.69,320,1.000,bicubic +tf_efficientnet_b3_ns,96.100,3.900,99.480,0.520,12.23,300,0.904,bicubic +tf_efficientnet_b5_ap,96.080,3.920,99.540,0.460,30.39,456,0.934,bicubic +pit_b_distilled_224,96.070,3.930,99.380,0.620,74.79,224,0.900,bicubic +resnest200e,96.070,3.930,99.480,0.520,70.20,320,0.909,bicubic +resnetrs270,96.060,3.940,99.490,0.510,129.86,352,1.000,bicubic +vit_small_r26_s32_384,96.060,3.940,99.560,0.440,36.47,384,1.000,bicubic +swsl_resnext101_32x4d,96.050,3.950,99.530,0.470,44.18,224,0.875,bilinear +vit_base_patch16_224_miil,96.030,3.970,99.350,0.650,86.54,224,0.875,bilinear +cait_xs24_384,96.010,3.990,99.430,0.570,26.67,384,1.000,bicubic +resnetrs200,95.990,4.010,99.440,0.560,93.21,320,1.000,bicubic +tf_efficientnet_b5,95.980,4.020,99.450,0.550,30.39,456,0.934,bicubic +vit_small_patch16_384,95.980,4.020,99.590,0.410,22.20,384,1.000,bicubic +resnetrs152,95.960,4.040,99.380,0.620,86.62,320,1.000,bicubic +eca_nfnet_l1,95.940,4.060,99.490,0.510,41.41,320,1.000,bicubic +ig_resnext101_32x8d,95.930,4.070,99.380,0.620,88.79,224,0.875,bilinear +vit_base_patch32_384,95.900,4.100,99.440,0.560,88.30,384,1.000,bicubic +regnety_160,95.880,4.120,99.560,0.440,83.59,288,1.000,bicubic +resmlp_big_24_distilled_224,95.870,4.130,99.440,0.560,129.14,224,0.875,bicubic +resnet152d,95.870,4.130,99.430,0.570,60.21,320,1.000,bicubic +resnet101d,95.750,4.250,99.440,0.560,44.57,320,1.000,bicubic +resnetv2_152x2_bit_teacher,95.750,4.250,99.430,0.570,236.34,224,0.875,bicubic +deit_base_distilled_patch16_224,95.750,4.250,99.280,0.720,87.34,224,0.900,bicubic +twins_pcpvt_large,95.720,4.280,99.490,0.510,60.99,224,0.900,bicubic +twins_svt_large,95.720,4.280,99.370,0.630,99.27,224,0.900,bicubic +swin_small_patch4_window7_224,95.720,4.280,99.290,0.710,49.61,224,0.900,bicubic +efficientnetv2_rw_s,95.710,4.290,99.380,0.620,23.94,384,1.000,bicubic +tf_efficientnetv2_s,95.710,4.290,99.400,0.600,21.46,384,1.000,bicubic +dm_nfnet_f0,95.690,4.310,99.330,0.670,71.49,256,0.900,bicubic +cait_s24_224,95.650,4.350,99.390,0.610,46.92,224,1.000,bicubic +deit_base_patch16_384,95.650,4.350,99.240,0.760,86.86,384,1.000,bicubic +swsl_resnext50_32x4d,95.620,4.380,99.440,0.560,25.03,224,0.875,bilinear +tf_efficientnet_b4,95.590,4.410,99.330,0.670,19.34,380,0.922,bicubic +resnest101e,95.570,4.430,99.270,0.730,48.28,256,0.875,bilinear +twins_svt_base,95.570,4.430,99.230,0.770,56.07,224,0.900,bicubic +tf_efficientnet_b2_ns,95.520,4.480,99.340,0.660,9.11,260,0.890,bicubic +efficientnet_b4,95.520,4.480,99.390,0.610,19.34,384,1.000,bicubic +tresnet_xl_448,95.510,4.490,99.340,0.660,78.44,448,0.875,bilinear +tf_efficientnet_b4_ap,95.490,4.510,99.390,0.610,19.34,380,0.922,bicubic +regnety_032,95.470,4.530,99.320,0.680,19.44,288,1.000,bicubic +twins_pcpvt_base,95.460,4.540,99.390,0.610,43.83,224,0.900,bicubic +eca_nfnet_l0,95.450,4.550,99.390,0.610,24.14,288,1.000,bicubic +ssl_resnext101_32x16d,95.410,4.590,99.410,0.590,194.03,224,0.875,bilinear +tresnet_l_448,95.410,4.590,99.300,0.700,55.99,448,0.875,bilinear +nfnet_l0,95.390,4.610,99.420,0.580,35.07,288,1.000,bicubic +resnetv2_50x1_bit_distilled,95.390,4.610,99.430,0.570,25.55,224,0.875,bicubic +tresnet_m,95.380,4.620,99.150,0.850,31.39,224,0.875,bilinear +pnasnet5large,95.360,4.640,99.130,0.870,86.06,331,0.911,bicubic +ssl_resnext101_32x8d,95.340,4.660,99.320,0.680,88.79,224,0.875,bilinear +resnetv2_101x1_bitm,95.320,4.680,99.370,0.630,44.54,448,1.000,bilinear +resnetrs101,95.250,4.750,99.210,0.790,63.62,288,0.940,bicubic +vit_large_patch32_384,95.240,4.760,99.320,0.680,306.63,384,1.000,bicubic +cait_xxs36_384,95.220,4.780,99.320,0.680,17.37,384,1.000,bicubic +levit_384,95.210,4.790,99.160,0.840,39.13,224,0.900,bicubic +resnet51q,95.200,4.800,99.280,0.720,35.70,288,1.000,bilinear +swsl_resnet50,95.200,4.800,99.390,0.610,25.56,224,0.875,bilinear +ecaresnet101d,95.160,4.840,99.230,0.770,44.57,224,0.875,bicubic +ssl_resnext101_32x4d,95.160,4.840,99.300,0.700,44.18,224,0.875,bilinear +nasnetalarge,95.150,4.850,99.130,0.870,88.75,331,0.911,bicubic +efficientnet_b3,95.140,4.860,99.210,0.790,12.23,320,1.000,bicubic +vit_small_r26_s32_224,95.130,4.870,99.220,0.780,36.43,224,0.900,bicubic +tf_efficientnetv2_b3,95.120,4.880,99.200,0.800,14.36,300,0.904,bicubic +convit_base,95.100,4.900,99.140,0.860,86.54,224,0.875,bicubic +coat_lite_small,95.080,4.920,99.020,0.980,19.84,224,0.900,bicubic +ecaresnet50t,95.070,4.930,99.290,0.710,25.57,320,0.950,bicubic +tresnet_xl,95.060,4.940,99.260,0.740,78.44,224,0.875,bilinear +deit_base_patch16_224,95.010,4.990,98.980,1.020,86.57,224,0.900,bicubic +tf_efficientnet_b3_ap,94.970,5.030,99.110,0.890,12.23,300,0.904,bicubic +visformer_small,94.960,5.040,99.210,0.790,40.22,224,0.900,bicubic +gernet_l,94.930,5.070,99.200,0.800,31.08,256,0.875,bilinear +cait_xxs24_384,94.920,5.080,99.140,0.860,12.03,384,1.000,bicubic +convit_small,94.920,5.080,99.110,0.890,27.78,224,0.875,bicubic +tf_efficientnet_b3,94.910,5.090,99.110,0.890,12.23,300,0.904,bicubic +tresnet_l,94.900,5.100,99.030,0.970,55.99,224,0.875,bilinear +vit_small_patch16_224,94.880,5.120,99.270,0.730,22.05,224,0.900,bicubic +mixer_b16_224_miil,94.880,5.120,99.080,0.920,59.88,224,0.875,bilinear +tf_efficientnet_lite4,94.870,5.130,99.090,0.910,13.01,380,0.920,bilinear +tf_efficientnet_b1_ns,94.860,5.140,99.250,0.750,7.79,240,0.882,bicubic +seresnext50_32x4d,94.820,5.180,99.130,0.870,27.56,224,0.875,bicubic +pit_b_224,94.790,5.210,98.820,1.180,73.76,224,0.900,bicubic +coat_mini,94.770,5.230,98.950,1.050,10.34,224,0.900,bicubic +twins_svt_small,94.770,5.230,99.080,0.920,24.06,224,0.900,bicubic +legacy_senet154,94.730,5.270,99.100,0.900,115.09,224,0.875,bilinear +pit_s_distilled_224,94.730,5.270,99.190,0.810,24.04,224,0.900,bicubic +resnetv2_50x1_bitm,94.730,5.270,99.180,0.820,25.55,448,1.000,bilinear +gluon_resnet152_v1s,94.720,5.280,99.060,0.940,60.32,224,0.875,bicubic +gluon_senet154,94.710,5.290,98.970,1.030,115.09,224,0.875,bicubic +resnest50d_4s2x40d,94.710,5.290,99.130,0.870,30.42,224,0.875,bicubic +ssl_resnext50_32x4d,94.700,5.300,99.240,0.760,25.03,224,0.875,bilinear +efficientnet_el,94.670,5.330,99.130,0.870,10.59,300,0.904,bicubic +wide_resnet50_2,94.670,5.330,99.050,0.950,68.88,224,0.875,bicubic +rexnet_200,94.660,5.340,99.090,0.910,16.37,224,0.875,bicubic +tresnet_m_448,94.660,5.340,99.150,0.850,31.39,448,0.875,bilinear +gluon_seresnext101_64x4d,94.650,5.350,98.980,1.020,88.23,224,0.875,bicubic +resnest50d,94.620,5.380,99.030,0.970,27.48,224,0.875,bilinear +swin_tiny_patch4_window7_224,94.620,5.380,99.120,0.880,28.29,224,0.900,bicubic +twins_pcpvt_small,94.600,5.400,99.150,0.850,24.11,224,0.900,bicubic +deit_small_distilled_patch16_224,94.590,5.410,99.100,0.900,22.44,224,0.900,bicubic +pit_s_224,94.590,5.410,98.930,1.070,23.46,224,0.900,bicubic +vit_small_patch32_384,94.590,5.410,99.140,0.860,22.92,384,1.000,bicubic +tnt_s_patch16_224,94.580,5.420,99.180,0.820,23.76,224,0.900,bicubic +efficientnet_b3_pruned,94.580,5.420,99.070,0.930,9.86,300,0.904,bicubic +resmlp_36_distilled_224,94.570,5.430,99.160,0.840,44.69,224,0.875,bicubic +gernet_m,94.550,5.450,98.930,1.070,21.14,224,0.875,bilinear +repvgg_b3,94.550,5.450,98.910,1.090,123.09,224,0.875,bilinear +regnety_320,94.520,5.480,99.170,0.830,145.05,224,0.875,bicubic +repvgg_b3g4,94.490,5.510,99.020,0.980,83.83,224,0.875,bilinear +ecaresnet101d_pruned,94.450,5.550,99.100,0.900,24.88,224,0.875,bicubic +gluon_seresnext101_32x4d,94.450,5.550,99.090,0.910,48.96,224,0.875,bicubic +gluon_resnet152_v1d,94.440,5.560,99.010,0.990,60.21,224,0.875,bicubic +levit_256,94.400,5.600,99.060,0.940,18.89,224,0.900,bicubic +nf_resnet50,94.400,5.600,99.070,0.930,25.56,288,0.940,bicubic +vit_base_patch32_224,94.390,5.610,99.060,0.940,88.22,224,0.900,bicubic +resnest50d_1s4x24d,94.390,5.610,99.070,0.930,25.68,224,0.875,bicubic +inception_v4,94.380,5.620,98.820,1.180,42.68,299,0.875,bicubic +efficientnet_b2,94.370,5.630,99.050,0.950,9.11,288,1.000,bicubic +tf_efficientnet_el,94.360,5.640,99.100,0.900,10.59,300,0.904,bicubic +gluon_resnext101_64x4d,94.350,5.650,98.880,1.120,83.46,224,0.875,bicubic +inception_resnet_v2,94.340,5.660,98.800,1.200,55.84,299,0.897,bicubic +resmlp_24_distilled_224,94.330,5.670,99.090,0.910,30.02,224,0.875,bicubic +ssl_resnet50,94.310,5.690,99.150,0.850,25.56,224,0.875,bilinear +regnetx_120,94.270,5.730,99.190,0.810,46.11,224,0.875,bicubic +rexnet_150,94.270,5.730,99.080,0.920,9.73,224,0.875,bicubic +tf_efficientnet_b2_ap,94.270,5.730,98.950,1.050,9.11,260,0.890,bicubic +resmlp_big_24_224,94.260,5.740,98.820,1.180,129.14,224,0.875,bicubic +mixnet_xl,94.230,5.770,98.820,1.180,11.90,224,0.875,bicubic +tf_efficientnet_b2,94.210,5.790,99.030,0.970,9.11,260,0.890,bicubic +regnetx_320,94.210,5.790,99.050,0.950,107.81,224,0.875,bicubic +dpn92,94.190,5.810,98.930,1.070,37.67,224,0.875,bicubic +ecaresnet50d,94.190,5.810,99.020,0.980,25.58,224,0.875,bicubic +gluon_resnet101_v1d,94.170,5.830,98.940,1.060,44.57,224,0.875,bicubic +gluon_resnet101_v1s,94.170,5.830,99.010,0.990,44.67,224,0.875,bicubic +gluon_seresnext50_32x4d,94.170,5.830,98.910,1.090,27.56,224,0.875,bicubic +ecaresnetlight,94.140,5.860,98.950,1.050,30.16,224,0.875,bicubic +regnety_064,94.140,5.860,99.030,0.970,30.58,224,0.875,bicubic +ens_adv_inception_resnet_v2,94.130,5.870,98.790,1.210,55.84,299,0.897,bicubic +legacy_seresnext101_32x4d,94.130,5.870,98.970,1.030,48.96,224,0.875,bilinear +tf_efficientnet_lite3,94.130,5.870,98.960,1.040,8.20,300,0.904,bilinear +gluon_resnext101_32x4d,94.120,5.880,98.930,1.070,44.18,224,0.875,bicubic +efficientnet_el_pruned,94.090,5.910,99.010,0.990,10.59,300,0.904,bicubic +cspdarknet53,94.090,5.910,98.980,1.020,27.64,256,0.887,bilinear +seresnet50,94.080,5.920,98.970,1.030,28.09,224,0.875,bicubic +resnet50d,94.070,5.930,98.920,1.080,25.58,224,0.875,bicubic +tf_efficientnetv2_b2,94.070,5.930,98.930,1.070,10.10,260,0.890,bicubic +gluon_resnet152_v1b,94.030,5.970,98.740,1.260,60.19,224,0.875,bicubic +hrnet_w48,94.030,5.970,99.040,0.960,77.47,224,0.875,bilinear +resnetrs50,94.020,5.980,98.850,1.150,35.69,224,0.910,bicubic +gluon_xception65,94.010,5.990,99.020,0.980,39.92,299,0.903,bicubic +regnety_120,94.010,5.990,99.030,0.970,51.82,224,0.875,bicubic +deit_small_patch16_224,94.000,6.000,98.960,1.040,22.05,224,0.900,bicubic +dla102x2,94.000,6.000,99.030,0.970,41.28,224,0.875,bilinear +dpn107,93.960,6.040,98.840,1.160,86.92,224,0.875,bicubic +skresnext50_32x4d,93.950,6.050,98.820,1.180,27.48,224,0.875,bicubic +ecaresnet26t,93.940,6.060,98.920,1.080,16.01,320,0.950,bicubic +cait_xxs36_224,93.940,6.060,98.890,1.110,17.30,224,1.000,bicubic +dpn98,93.940,6.060,98.920,1.080,61.57,224,0.875,bicubic +xception71,93.890,6.110,98.950,1.050,42.34,299,0.903,bicubic +regnety_080,93.890,6.110,99.000,1.000,39.18,224,0.875,bicubic +gluon_resnet152_v1c,93.880,6.120,98.800,1.200,60.21,224,0.875,bicubic +regnetx_160,93.880,6.120,99.090,0.910,54.28,224,0.875,bicubic +nf_regnet_b1,93.880,6.120,98.740,1.260,10.22,288,0.900,bicubic +cspresnet50,93.860,6.140,98.870,1.130,21.62,256,0.887,bilinear +ese_vovnet39b,93.850,6.150,98.900,1.100,24.57,224,0.875,bicubic +resnext50_32x4d,93.840,6.160,98.830,1.170,25.03,224,0.875,bicubic +hrnet_w64,93.830,6.170,98.930,1.070,128.06,224,0.875,bilinear +ecaresnet50d_pruned,93.820,6.180,99.000,1.000,19.94,224,0.875,bicubic +repvgg_b2g4,93.820,6.180,98.930,1.070,61.76,224,0.875,bilinear +resnext50d_32x4d,93.810,6.190,98.740,1.260,25.05,224,0.875,bicubic +efficientnet_b2_pruned,93.800,6.200,98.910,1.090,8.31,260,0.890,bicubic +dla169,93.800,6.200,98.840,1.160,53.39,224,0.875,bilinear +regnetx_080,93.790,6.210,98.910,1.090,39.57,224,0.875,bicubic +resnext101_32x8d,93.770,6.230,98.950,1.050,88.79,224,0.875,bilinear +cspresnext50,93.760,6.240,98.840,1.160,20.57,224,0.875,bilinear +dpn131,93.760,6.240,98.800,1.200,79.25,224,0.875,bicubic +gluon_resnet101_v1b,93.760,6.240,98.700,1.300,44.55,224,0.875,bicubic +xception65,93.760,6.240,98.860,1.140,39.92,299,0.903,bicubic +efficientnet_em,93.740,6.260,98.930,1.070,6.90,240,0.882,bicubic +tf_efficientnet_b0_ns,93.740,6.260,98.980,1.020,5.29,224,0.875,bicubic +wide_resnet101_2,93.730,6.270,98.810,1.190,126.89,224,0.875,bilinear +resnetblur50,93.710,6.290,98.810,1.190,25.56,224,0.875,bicubic +tf_efficientnetv2_b1,93.710,6.290,98.820,1.180,8.14,240,0.882,bicubic +tf_efficientnet_b1,93.710,6.290,98.800,1.200,7.79,240,0.882,bicubic +levit_192,93.710,6.290,98.790,1.210,10.95,224,0.900,bicubic +hrnet_w40,93.710,6.290,98.800,1.200,57.56,224,0.875,bilinear +gluon_resnet101_v1c,93.690,6.310,98.760,1.240,44.57,224,0.875,bicubic +regnetx_040,93.680,6.320,98.940,1.060,22.12,224,0.875,bicubic +rexnet_130,93.670,6.330,98.710,1.290,7.56,224,0.875,bicubic +gluon_resnext50_32x4d,93.650,6.350,98.690,1.310,25.03,224,0.875,bicubic +resmlp_36_224,93.650,6.350,98.950,1.050,44.69,224,0.875,bicubic +xception,93.640,6.360,98.770,1.230,22.86,299,0.897,bicubic +regnetx_064,93.630,6.370,99.050,0.950,26.21,224,0.875,bicubic +tf_efficientnet_b1_ap,93.630,6.370,98.800,1.200,7.79,240,0.882,bicubic +hrnet_w44,93.620,6.380,98.960,1.040,67.06,224,0.875,bilinear +regnety_040,93.620,6.380,98.950,1.050,20.65,224,0.875,bicubic +dpn68b,93.620,6.380,98.700,1.300,12.61,224,0.875,bicubic +gluon_resnet50_v1s,93.590,6.410,98.840,1.160,25.68,224,0.875,bicubic +repvgg_b2,93.590,6.410,99.070,0.930,89.02,224,0.875,bilinear +res2net50_26w_6s,93.590,6.410,98.750,1.250,37.05,224,0.875,bilinear +dla60_res2next,93.570,6.430,98.800,1.200,17.03,224,0.875,bilinear +tf_efficientnet_cc_b1_8e,93.570,6.430,98.690,1.310,39.72,240,0.882,bicubic +gluon_inception_v3,93.540,6.460,98.830,1.170,23.83,299,0.875,bicubic +dla102x,93.530,6.470,98.850,1.150,26.31,224,0.875,bilinear +gluon_resnet50_v1d,93.530,6.470,98.710,1.290,25.58,224,0.875,bicubic +res2net101_26w_4s,93.520,6.480,98.600,1.400,45.21,224,0.875,bilinear +coat_tiny,93.510,6.490,98.690,1.310,5.50,224,0.900,bicubic +selecsls60b,93.500,6.500,98.840,1.160,32.77,224,0.875,bicubic +cait_xxs24_224,93.490,6.510,98.770,1.230,11.96,224,1.000,bicubic +xception41,93.480,6.520,98.750,1.250,26.97,299,0.903,bicubic +resnet50,93.460,6.540,98.600,1.400,25.56,224,0.875,bicubic +res2net50_26w_8s,93.450,6.550,98.700,1.300,48.40,224,0.875,bilinear +coat_lite_mini,93.450,6.550,98.780,1.220,11.01,224,0.900,bicubic +legacy_seresnet152,93.440,6.560,98.850,1.150,66.82,224,0.875,bilinear +resmlp_24_224,93.440,6.560,98.810,1.190,30.02,224,0.875,bicubic +legacy_seresnext50_32x4d,93.430,6.570,98.800,1.200,27.56,224,0.875,bilinear +vit_tiny_patch16_384,93.420,6.580,98.830,1.170,5.79,384,1.000,bicubic +repvgg_b1,93.410,6.590,98.790,1.210,57.42,224,0.875,bilinear +dla60_res2net,93.380,6.620,98.860,1.140,20.85,224,0.875,bilinear +hrnet_w30,93.370,6.630,98.830,1.170,37.71,224,0.875,bilinear +dla102,93.260,6.740,98.780,1.220,33.27,224,0.875,bilinear +legacy_seresnet101,93.260,6.740,98.740,1.260,49.33,224,0.875,bilinear +mixnet_l,93.260,6.740,98.700,1.300,7.33,224,0.875,bicubic +regnetx_032,93.250,6.750,98.730,1.270,15.30,224,0.875,bicubic +tv_resnet152,93.240,6.760,98.750,1.250,60.19,224,0.875,bilinear +pit_xs_distilled_224,93.240,6.760,98.820,1.180,11.00,224,0.900,bicubic +resnest26d,93.240,6.760,98.850,1.150,17.07,224,0.875,bilinear +tf_inception_v3,93.200,6.800,98.480,1.520,23.83,299,0.875,bicubic +dla60x,93.190,6.810,98.710,1.290,17.35,224,0.875,bilinear +res2net50_26w_4s,93.180,6.820,98.670,1.330,25.70,224,0.875,bilinear +tf_efficientnet_em,93.170,6.830,98.670,1.330,6.90,240,0.882,bicubic +res2next50,93.150,6.850,98.660,1.340,24.67,224,0.875,bilinear +tf_efficientnetv2_b0,93.060,6.940,98.700,1.300,7.14,224,0.875,bicubic +levit_128,93.050,6.950,98.690,1.310,9.21,224,0.900,bicubic +tf_mixnet_l,93.040,6.960,98.540,1.460,7.33,224,0.875,bicubic +res2net50_14w_8s,93.030,6.970,98.700,1.300,25.06,224,0.875,bilinear +repvgg_b1g4,93.030,6.970,98.820,1.180,39.97,224,0.875,bilinear +efficientnet_b1,93.030,6.970,98.710,1.290,7.79,256,1.000,bicubic +adv_inception_v3,93.010,6.990,98.490,1.510,23.83,299,0.875,bicubic +selecsls60,93.010,6.990,98.830,1.170,30.67,224,0.875,bicubic +regnety_016,93.000,7.000,98.680,1.320,11.20,224,0.875,bicubic +efficientnet_b1_pruned,92.980,7.020,98.530,1.470,6.33,240,0.882,bicubic +hardcorenas_f,92.980,7.020,98.620,1.380,8.20,224,0.875,bilinear +hardcorenas_e,92.950,7.050,98.570,1.430,8.07,224,0.875,bilinear +hrnet_w32,92.950,7.050,98.840,1.160,41.23,224,0.875,bilinear +efficientnet_es,92.910,7.090,98.690,1.310,5.44,224,0.875,bicubic +gluon_resnet50_v1c,92.910,7.090,98.710,1.290,25.58,224,0.875,bicubic +pit_xs_224,92.910,7.090,98.780,1.220,10.62,224,0.900,bicubic +tv_resnext50_32x4d,92.900,7.100,98.720,1.280,25.03,224,0.875,bilinear +inception_v3,92.900,7.100,98.330,1.670,23.83,299,0.875,bicubic +densenet161,92.900,7.100,98.810,1.190,28.68,224,0.875,bicubic +tv_resnet101,92.880,7.120,98.660,1.340,44.55,224,0.875,bilinear +resmlp_12_distilled_224,92.870,7.130,98.630,1.370,15.35,224,0.875,bicubic +tf_efficientnet_cc_b0_8e,92.870,7.130,98.460,1.540,24.01,224,0.875,bicubic +coat_lite_tiny,92.850,7.150,98.640,1.360,5.72,224,0.900,bicubic +rexnet_100,92.850,7.150,98.620,1.380,4.80,224,0.875,bicubic +tf_efficientnet_cc_b0_4e,92.840,7.160,98.440,1.560,13.31,224,0.875,bicubic +seresnext26t_32x4d,92.820,7.180,98.560,1.440,16.81,224,0.875,bicubic +res2net50_48w_2s,92.790,7.210,98.470,1.530,25.29,224,0.875,bilinear +hrnet_w18,92.760,7.240,98.660,1.340,21.30,224,0.875,bilinear +densenet201,92.690,7.310,98.650,1.350,20.01,224,0.875,bicubic +repvgg_a2,92.680,7.320,98.520,1.480,28.21,224,0.875,bilinear +gmixer_24_224,92.680,7.320,98.280,1.720,24.72,224,0.875,bicubic +dla60,92.670,7.330,98.630,1.370,22.04,224,0.875,bilinear +legacy_seresnet50,92.670,7.330,98.650,1.350,28.09,224,0.875,bilinear +resnet34d,92.640,7.360,98.420,1.580,21.82,224,0.875,bicubic +mobilenetv2_120d,92.610,7.390,98.510,1.490,5.83,224,0.875,bicubic +tf_efficientnet_b0_ap,92.610,7.390,98.370,1.630,5.29,224,0.875,bicubic +hardcorenas_d,92.600,7.400,98.430,1.570,7.50,224,0.875,bilinear +tf_efficientnet_lite2,92.590,7.410,98.550,1.450,6.09,260,0.890,bicubic +legacy_seresnext26_32x4d,92.570,7.430,98.420,1.580,16.79,224,0.875,bicubic +skresnet34,92.570,7.430,98.520,1.480,22.28,224,0.875,bicubic +gluon_resnet50_v1b,92.560,7.440,98.550,1.450,25.56,224,0.875,bicubic +regnetx_016,92.540,7.460,98.550,1.450,9.19,224,0.875,bicubic +selecsls42b,92.480,7.520,98.440,1.560,32.46,224,0.875,bicubic +efficientnet_b0,92.480,7.520,98.680,1.320,5.29,224,0.875,bicubic +gernet_s,92.440,7.560,98.500,1.500,8.17,224,0.875,bilinear +seresnext26d_32x4d,92.440,7.560,98.540,1.460,16.81,224,0.875,bicubic +densenetblur121d,92.400,7.600,98.410,1.590,8.00,224,0.875,bicubic +tf_efficientnet_b0,92.400,7.600,98.470,1.530,5.29,224,0.875,bicubic +hardcorenas_c,92.330,7.670,98.340,1.660,5.52,224,0.875,bilinear +tf_efficientnet_lite1,92.310,7.690,98.490,1.510,5.42,240,0.882,bicubic +densenet169,92.300,7.700,98.590,1.410,14.15,224,0.875,bicubic +mixnet_m,92.270,7.730,98.350,1.650,5.01,224,0.875,bicubic +mobilenetv3_large_100_miil,92.250,7.750,98.250,1.750,5.48,224,0.875,bilinear +dpn68,92.240,7.760,98.610,1.390,12.61,224,0.875,bicubic +resnet26d,92.230,7.770,98.450,1.550,16.01,224,0.875,bicubic +tf_mixnet_m,92.200,7.800,98.420,1.580,5.01,224,0.875,bicubic +vit_small_patch32_224,92.150,7.850,98.510,1.490,22.88,224,0.900,bicubic +tv_resnet50,92.140,7.860,98.420,1.580,25.56,224,0.875,bilinear +resmlp_12_224,92.120,7.880,98.570,1.430,15.35,224,0.875,bicubic +tf_efficientnet_es,92.100,7.900,98.440,1.560,5.44,224,0.875,bicubic +mobilenetv2_140,92.030,7.970,98.250,1.750,6.11,224,0.875,bicubic +ese_vovnet19b_dw,92.010,7.990,98.510,1.490,6.54,224,0.875,bicubic +densenet121,91.940,8.060,98.280,1.720,7.98,224,0.875,bicubic +hardcorenas_b,91.940,8.060,98.400,1.600,5.18,224,0.875,bilinear +vit_tiny_patch16_224,91.930,8.070,98.340,1.660,5.72,224,0.900,bicubic +regnety_008,91.900,8.100,98.420,1.580,6.26,224,0.875,bicubic +mixnet_s,91.780,8.220,98.300,1.700,4.13,224,0.875,bicubic +vit_tiny_r_s16_p8_384,91.730,8.270,98.430,1.570,6.36,384,1.000,bicubic +efficientnet_es_pruned,91.700,8.300,98.420,1.580,5.44,224,0.875,bicubic +tf_mixnet_s,91.680,8.320,98.240,1.760,4.13,224,0.875,bicubic +repvgg_b0,91.680,8.320,98.450,1.550,15.82,224,0.875,bilinear +semnasnet_100,91.660,8.340,98.270,1.730,3.89,224,0.875,bicubic +hardcorenas_a,91.620,8.380,98.170,1.830,5.26,224,0.875,bilinear +regnety_006,91.570,8.430,98.430,1.570,6.06,224,0.875,bicubic +mobilenetv3_rw,91.550,8.450,98.270,1.730,5.48,224,0.875,bicubic +levit_128s,91.500,8.500,98.400,1.600,7.78,224,0.900,bicubic +legacy_seresnet34,91.480,8.520,98.200,1.800,21.96,224,0.875,bilinear +mobilenetv3_large_100,91.480,8.520,98.320,1.680,5.48,224,0.875,bicubic +resnet26,91.440,8.560,98.280,1.720,16.00,224,0.875,bicubic +tf_mobilenetv3_large_100,91.420,8.580,98.260,1.740,5.48,224,0.875,bilinear +tv_densenet121,91.400,8.600,98.250,1.750,7.98,224,0.875,bicubic +mobilenetv2_110d,91.350,8.650,98.190,1.810,4.52,224,0.875,bicubic +tf_efficientnet_lite0,91.300,8.700,98.090,1.910,4.65,224,0.875,bicubic +fbnetc_100,91.270,8.730,97.830,2.170,5.57,224,0.875,bilinear +efficientnet_lite0,91.260,8.740,98.250,1.750,4.65,224,0.875,bicubic +dla34,91.240,8.760,98.180,1.820,15.74,224,0.875,bilinear +mnasnet_100,91.200,8.800,98.050,1.950,4.38,224,0.875,bicubic +resnet34,91.200,8.800,98.240,1.760,21.80,224,0.875,bilinear +regnetx_008,91.180,8.820,98.380,1.620,7.26,224,0.875,bicubic +hrnet_w18_small_v2,91.170,8.830,98.340,1.660,15.60,224,0.875,bilinear +mixer_b16_224,91.140,8.860,97.400,2.600,59.88,224,0.875,bicubic +resnest14d,91.130,8.870,98.330,1.670,10.61,224,0.875,bilinear +deit_tiny_distilled_patch16_224,91.100,8.900,98.270,1.730,5.91,224,0.900,bicubic +gluon_resnet34_v1b,91.100,8.900,98.180,1.820,21.80,224,0.875,bicubic +swsl_resnet18,91.090,8.910,98.210,1.790,11.69,224,0.875,bilinear +vgg19_bn,91.000,9.000,98.110,1.890,143.68,224,0.875,bilinear +pit_ti_distilled_224,90.900,9.100,98.220,1.780,5.10,224,0.900,bicubic +regnety_004,90.780,9.220,98.080,1.920,4.34,224,0.875,bicubic +regnetx_006,90.760,9.240,98.100,1.900,6.20,224,0.875,bicubic +ssl_resnet18,90.700,9.300,98.020,1.980,11.69,224,0.875,bilinear +spnasnet_100,90.610,9.390,97.950,2.050,4.42,224,0.875,bilinear +vgg16_bn,90.540,9.460,97.990,2.010,138.37,224,0.875,bilinear +convit_tiny,90.530,9.470,98.210,1.790,5.71,224,0.875,bicubic +ghostnet_100,90.440,9.560,97.830,2.170,5.18,224,0.875,bilinear +pit_ti_224,90.420,9.580,98.010,1.990,4.85,224,0.900,bicubic +tf_mobilenetv3_large_075,90.320,9.680,97.870,2.130,3.99,224,0.875,bilinear +tv_resnet34,90.290,9.710,97.980,2.020,21.80,224,0.875,bilinear +skresnet18,90.160,9.840,97.780,2.220,11.96,224,0.875,bicubic +resnet18d,89.990,10.010,97.830,2.170,11.71,224,0.875,bicubic +hrnet_w18_small,89.880,10.120,97.900,2.100,13.19,224,0.875,bilinear +mobilenetv2_100,89.830,10.170,97.830,2.170,3.50,224,0.875,bicubic +vgg19,89.680,10.320,97.550,2.450,143.67,224,0.875,bilinear +deit_tiny_patch16_224,89.620,10.380,97.960,2.040,5.72,224,0.900,bicubic +regnetx_004,89.460,10.540,97.770,2.230,5.16,224,0.875,bicubic +vgg16,89.360,10.640,97.520,2.480,138.36,224,0.875,bilinear +vit_tiny_r_s16_p8_224,89.340,10.660,97.700,2.300,6.34,224,0.900,bicubic +legacy_seresnet18,89.270,10.730,97.680,2.320,11.78,224,0.875,bicubic +vgg13_bn,89.200,10.800,97.530,2.470,133.05,224,0.875,bilinear +tf_mobilenetv3_large_minimal_100,89.180,10.820,97.320,2.680,3.92,224,0.875,bilinear +gluon_resnet18_v1b,88.660,11.340,97.100,2.900,11.69,224,0.875,bicubic +vgg11_bn,88.390,11.610,97.270,2.730,132.87,224,0.875,bilinear +regnety_002,88.200,11.800,97.430,2.570,3.16,224,0.875,bicubic +resnet18,88.150,11.850,97.120,2.880,11.69,224,0.875,bilinear +vgg13,87.570,12.430,97.120,2.880,133.05,224,0.875,bilinear +regnetx_002,87.380,12.620,96.990,3.010,2.68,224,0.875,bicubic +vgg11,87.340,12.660,97.110,2.890,132.86,224,0.875,bilinear +dla60x_c,87.110,12.890,97.140,2.860,1.32,224,0.875,bilinear +mixer_l16_224,86.970,13.030,94.060,5.940,208.20,224,0.875,bicubic +tf_mobilenetv3_small_100,85.960,14.040,96.400,3.600,2.54,224,0.875,bilinear +dla46x_c,85.480,14.520,96.440,3.560,1.07,224,0.875,bilinear +dla46_c,84.660,15.340,96.200,3.800,1.30,224,0.875,bilinear +tf_mobilenetv3_small_075,84.530,15.470,95.890,4.110,2.04,224,0.875,bilinear +tf_mobilenetv3_small_minimal_100,82.670,17.330,95.000,5.000,2.04,224,0.875,bilinear diff --git a/testbed/huggingface__pytorch-image-models/results/results-imagenet-r.csv b/testbed/huggingface__pytorch-image-models/results/results-imagenet-r.csv new file mode 100644 index 0000000000000000000000000000000000000000..bf922167be32f1fcc0f92c1ca9dbdbb0e1d84ef8 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/results-imagenet-r.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation,top1_diff,top5_diff,rank_diff +ig_resnext101_32x48d,79.650,20.350,89.393,10.607,828.41,224,0.875,bilinear,-17.320,-10.277,+9 +ig_resnext101_32x32d,79.457,20.543,89.183,10.817,468.53,224,0.875,bilinear,-17.323,-10.347,+19 +ig_resnext101_32x16d,78.837,21.163,88.480,11.520,194.03,224,0.875,bilinear,-17.603,-11.060,+37 +tf_efficientnet_l2_ns_475,76.480,23.520,88.653,11.347,480.31,475,0.936,bicubic,-21.270,-11.167,-2 +swsl_resnext101_32x16d,76.303,23.697,87.733,12.267,194.03,224,0.875,bilinear,-19.967,-11.767,+46 +ig_resnext101_32x8d,75.813,24.187,86.200,13.800,88.79,224,0.875,bilinear,-20.117,-13.180,+67 +swsl_resnext101_32x8d,75.590,24.410,86.937,13.063,88.79,224,0.875,bilinear,-20.650,-12.533,+46 +tf_efficientnet_l2_ns,74.650,25.350,87.543,12.457,480.31,800,0.960,bicubic,-23.130,-12.347,-7 +swsl_resnext101_32x4d,72.660,27.340,85.157,14.843,44.18,224,0.875,bilinear,-23.390,-14.373,+56 +swsl_resnext50_32x4d,68.977,31.023,82.810,17.190,25.03,224,0.875,bilinear,-26.643,-16.630,+79 +swsl_resnet50,68.297,31.703,83.313,16.687,25.56,224,0.875,bilinear,-26.903,-16.077,+102 +tf_efficientnet_b7_ns,67.510,32.490,81.383,18.617,66.35,600,0.949,bicubic,-29.690,-18.317,-8 +vit_large_patch16_384,67.053,32.947,78.707,21.293,304.72,384,1.000,bicubic,-30.367,-21.073,-10 +swin_large_patch4_window12_384,66.283,33.717,79.783,20.217,196.74,384,1.000,bicubic,-30.887,-19.897,-9 +tf_efficientnet_b6_ns,65.587,34.413,79.553,20.447,43.04,528,0.942,bicubic,-31.433,-20.157,-7 +vit_large_patch16_224,64.347,35.653,76.190,23.810,304.33,224,0.900,bicubic,-32.363,-23.460,+8 +vit_large_r50_s32_384,64.100,35.900,75.850,24.150,329.09,384,1.000,bicubic,-32.850,-23.860,-4 +swin_large_patch4_window7_224,63.870,36.130,78.180,21.820,196.53,224,0.900,bicubic,-33.080,-21.480,-6 +swin_base_patch4_window12_384,63.470,36.530,78.063,21.937,87.90,384,1.000,bicubic,-33.650,-21.717,-13 +tf_efficientnet_b5_ns,63.047,36.953,77.777,22.223,30.39,456,0.934,bicubic,-33.823,-21.863,-3 +tf_efficientnet_b4_ns,61.230,38.770,76.173,23.827,19.34,380,0.922,bicubic,-35.480,-23.467,+2 +tf_efficientnetv2_l_in21ft1k,60.953,39.047,75.847,24.153,118.52,480,1.000,bicubic,-36.157,-23.863,-15 +vit_base_patch16_384,60.180,39.820,73.843,26.157,86.86,384,1.000,bicubic,-36.840,-25.867,-14 +swin_base_patch4_window7_224,59.537,40.463,74.247,25.753,87.77,224,0.900,bicubic,-37.143,-25.413,+2 +tf_efficientnetv2_m_in21ft1k,58.647,41.353,73.983,26.017,54.14,480,1.000,bicubic,-38.323,-25.627,-14 +vit_large_r50_s32_224,58.633,41.367,71.720,28.280,328.99,224,0.900,bicubic,-37.547,-27.810,+30 +tf_efficientnet_b8_ap,57.830,42.170,72.957,27.043,87.41,672,0.954,bicubic,-38.720,-26.583,+4 +cait_m48_448,57.470,42.530,71.860,28.140,356.46,448,1.000,bicubic,-39.410,-27.760,-13 +cait_m36_384,57.467,42.533,72.313,27.687,271.22,384,1.000,bicubic,-39.363,-27.347,-11 +tf_efficientnet_b3_ns,57.417,42.583,72.387,27.613,12.23,300,0.904,bicubic,-38.683,-27.093,+29 +vit_base_patch16_224,56.823,43.177,70.633,29.367,86.57,224,0.900,bicubic,-39.477,-28.927,+16 +vit_base_r50_s16_384,54.403,45.597,69.560,30.440,98.95,384,1.000,bicubic,-42.047,-30.100,+7 +resnetv2_152x4_bitm,54.320,45.680,70.167,29.833,936.53,480,1.000,bilinear,-42.550,-29.493,-17 +vit_small_r26_s32_384,54.197,45.803,68.757,31.243,36.47,384,1.000,bicubic,-41.863,-30.803,+30 +tf_efficientnet_b5_ap,53.870,46.130,69.160,30.840,30.39,456,0.934,bicubic,-42.210,-30.380,+25 +tf_efficientnet_b2_ns,53.600,46.400,70.270,29.730,9.11,260,0.890,bicubic,-41.920,-29.120,+57 +tf_efficientnet_b6_ap,53.560,46.440,68.550,31.450,43.04,528,0.942,bicubic,-42.810,-31.000,+6 +cait_s36_384,53.550,46.450,68.000,32.000,68.37,384,1.000,bicubic,-43.080,-31.600,-10 +tf_efficientnet_b8,53.410,46.590,69.090,30.910,87.41,672,0.954,bicubic,-43.290,-30.440,-14 +vit_base_patch32_384,53.307,46.693,68.047,31.953,88.30,384,1.000,bicubic,-42.593,-31.393,+34 +tf_efficientnet_b7_ap,53.260,46.740,68.873,31.127,66.35,600,0.949,bicubic,-43.090,-30.717,+4 +tf_efficientnetv2_s_in21ft1k,53.150,46.850,69.000,31.000,21.46,384,1.000,bicubic,-43.320,-30.570,-7 +tf_efficientnet_b4_ap,53.090,46.910,68.210,31.790,19.34,380,0.922,bicubic,-42.400,-31.180,+53 +dm_nfnet_f5,52.870,47.130,67.430,32.570,377.21,544,0.954,bicubic,-43.940,-32.240,-25 +dm_nfnet_f6,52.447,47.553,67.120,32.880,438.36,576,0.956,bicubic,-44.473,-32.600,-31 +tf_efficientnet_b7,52.393,47.607,68.233,31.767,66.35,600,0.949,bicubic,-44.187,-31.277,-17 +tf_efficientnetv2_l,52.377,47.623,67.237,32.763,118.52,480,1.000,bicubic,-44.273,-32.323,-20 +swsl_resnet18,52.327,47.673,70.480,29.520,11.69,224,0.875,bilinear,-38.763,-27.730,+334 +efficientnetv2_rw_m,52.323,47.677,67.210,32.790,53.24,416,1.000,bicubic,-43.947,-32.350,0 +deit_base_distilled_patch16_384,52.257,47.743,67.733,32.267,87.63,384,1.000,bicubic,-44.253,-31.857,-16 +dm_nfnet_f3,52.130,47.870,66.743,33.257,254.92,416,0.940,bicubic,-44.600,-32.887,-29 +resnetv2_152x2_bit_teacher_384,51.937,48.063,68.670,31.330,236.34,384,1.000,bicubic,-44.253,-30.830,+3 +resmlp_big_24_224_in22ft1k,51.903,48.097,68.463,31.537,129.14,224,0.875,bicubic,-44.447,-31.057,-9 +cait_s24_384,51.783,48.217,66.313,33.687,47.06,384,1.000,bicubic,-44.787,-33.237,-24 +resnetv2_152x2_bitm,51.753,48.247,69.250,30.750,236.34,448,1.000,bilinear,-44.767,-30.340,-22 +ecaresnet269d,51.670,48.330,66.047,33.953,102.09,352,1.000,bicubic,-44.790,-33.563,-19 +vit_base_patch16_224_miil,51.557,48.443,65.207,34.793,86.54,224,0.875,bilinear,-44.473,-34.143,+9 +pit_b_distilled_224,51.153,48.847,66.770,33.230,74.79,224,0.900,bicubic,-44.917,-32.610,+3 +dm_nfnet_f4,50.900,49.100,65.557,34.443,316.07,512,0.951,bicubic,-45.880,-34.063,-39 +tf_efficientnet_b1_ns,50.883,49.117,67.910,32.090,7.79,240,0.882,bicubic,-43.977,-31.340,+75 +tf_efficientnetv2_m,50.557,49.443,66.010,33.990,54.14,480,1.000,bicubic,-45.993,-33.560,-29 +efficientnet_b4,50.510,49.490,65.703,34.297,19.34,384,1.000,bicubic,-45.010,-33.637,+32 +resnetv2_101x3_bitm,50.407,49.593,67.790,32.210,387.93,448,1.000,bilinear,-45.843,-31.800,-11 +ssl_resnext101_32x16d,50.257,49.743,66.033,33.967,194.03,224,0.875,bilinear,-45.153,-33.377,+36 +cait_s24_224,50.243,49.757,65.027,34.973,46.92,224,1.000,bicubic,-45.407,-34.363,+22 +eca_nfnet_l2,50.237,49.763,65.450,34.550,56.72,384,1.000,bicubic,-46.213,-34.170,-28 +vit_small_patch16_384,50.160,49.840,65.807,34.193,22.20,384,1.000,bicubic,-45.820,-33.783,+3 +resnest269e,50.153,49.847,64.670,35.330,110.93,416,0.928,bicubic,-45.967,-34.850,-11 +deit_base_distilled_patch16_224,50.063,49.937,66.227,33.773,87.34,224,0.900,bicubic,-45.687,-33.203,+11 +tf_efficientnet_b3_ap,50.057,49.943,65.210,34.790,12.23,300,0.904,bicubic,-44.913,-33.900,+55 +resnest200e,49.873,50.127,64.743,35.257,70.20,320,0.909,bicubic,-46.197,-34.737,-9 +cait_xs24_384,49.527,50.473,64.900,35.100,26.67,384,1.000,bicubic,-46.483,-34.530,-5 +tf_efficientnet_b5,49.510,50.490,65.657,34.343,30.39,456,0.934,bicubic,-46.470,-33.793,-4 +resnetv2_152x2_bit_teacher,49.480,50.520,65.617,34.383,236.34,224,0.875,bicubic,-46.270,-33.823,+5 +resnet200d,49.470,50.530,64.330,35.670,64.69,320,1.000,bicubic,-46.640,-35.130,-17 +resnest101e,49.367,50.633,65.587,34.413,48.28,256,0.875,bilinear,-46.203,-33.683,+15 +resnet152d,49.253,50.747,64.413,35.587,60.21,320,1.000,bicubic,-46.617,-35.017,0 +vit_base_patch32_224,49.253,50.747,64.340,35.660,88.22,224,0.900,bicubic,-45.137,-34.730,+92 +seresnet152d,49.247,50.753,64.170,35.830,66.84,320,1.000,bicubic,-47.063,-35.340,-33 +resmlp_big_24_distilled_224,49.097,50.903,65.470,34.530,129.14,224,0.875,bicubic,-46.773,-33.970,-4 +ssl_resnext101_32x8d,49.067,50.933,65.480,34.520,88.79,224,0.875,bilinear,-46.273,-33.840,+25 +repvgg_b3,48.917,51.083,64.887,35.113,123.09,224,0.875,bilinear,-45.633,-34.023,+80 +resnetrs420,48.857,51.143,63.427,36.573,191.89,416,1.000,bicubic,-47.543,-36.113,-42 +efficientnetv2_rw_s,48.603,51.397,63.840,36.160,23.94,384,1.000,bicubic,-47.107,-35.540,0 +efficientnet_b3,48.563,51.437,64.250,35.750,12.23,320,1.000,bicubic,-46.577,-34.960,+32 +ecaresnet101d,48.527,51.473,64.100,35.900,44.57,224,0.875,bicubic,-46.633,-35.130,+28 +dm_nfnet_f2,48.373,51.627,63.233,36.767,193.78,352,0.920,bicubic,-48.087,-36.307,-51 +vit_small_r26_s32_224,48.363,51.637,63.797,36.203,36.43,224,0.900,bicubic,-46.767,-35.423,+30 +repvgg_b3g4,48.310,51.690,64.800,35.200,83.83,224,0.875,bilinear,-46.180,-34.220,+75 +vit_large_patch32_384,48.250,51.750,61.830,38.170,306.63,384,1.000,bicubic,-46.990,-37.490,+19 +convit_base,48.217,51.783,63.000,37.000,86.54,224,0.875,bicubic,-46.883,-36.140,+29 +resnetrs350,48.050,51.950,62.653,37.347,163.96,384,1.000,bicubic,-48.190,-36.937,-38 +twins_svt_large,47.947,52.053,62.907,37.093,99.27,224,0.900,bicubic,-47.773,-36.583,-11 +mixer_b16_224_miil,47.790,52.210,63.400,36.600,59.88,224,0.875,bilinear,-47.090,-35.870,+39 +repvgg_b2g4,47.787,52.213,64.390,35.610,61.76,224,0.875,bilinear,-46.033,-34.540,+129 +eca_nfnet_l1,47.650,52.350,62.763,37.237,41.41,320,1.000,bicubic,-48.290,-36.727,-24 +resnetv2_50x3_bitm,47.593,52.407,65.603,34.397,217.32,448,1.000,bilinear,-48.677,-34.027,-47 +pit_s_distilled_224,47.543,52.457,63.493,36.507,24.04,224,0.900,bicubic,-47.187,-35.697,+43 +resnest50d_4s2x40d,47.483,52.517,63.807,36.193,30.42,224,0.875,bicubic,-47.227,-35.323,+46 +efficientnet_b3_pruned,47.447,52.553,62.793,37.207,9.86,300,0.904,bicubic,-47.133,-36.387,+59 +tresnet_m,47.230,52.770,61.993,38.007,31.39,224,0.875,bilinear,-48.150,-37.157,+3 +tf_efficientnet_b6,47.213,52.787,63.110,36.890,43.04,528,0.942,bicubic,-49.077,-36.410,-54 +ssl_resnext101_32x4d,47.177,52.823,63.367,36.633,44.18,224,0.875,bilinear,-47.983,-35.933,+12 +resnetrs270,47.107,52.893,62.010,37.990,129.86,352,1.000,bicubic,-48.953,-37.480,-41 +tf_efficientnet_b4,47.083,52.917,62.867,37.133,19.34,380,0.922,bicubic,-48.507,-36.463,-15 +resnet101d,46.893,53.107,62.317,37.683,44.57,320,1.000,bicubic,-48.857,-36.963,-28 +resnetrs200,46.837,53.163,62.487,37.513,93.21,320,1.000,bicubic,-49.153,-36.953,-39 +gluon_seresnext101_64x4d,46.677,53.323,61.303,38.697,88.23,224,0.875,bicubic,-47.973,-37.677,+43 +twins_pcpvt_large,46.637,53.363,62.240,37.760,60.99,224,0.900,bicubic,-49.083,-37.050,-28 +dm_nfnet_f1,46.547,53.453,61.407,38.593,132.63,320,0.910,bicubic,-49.843,-38.063,-68 +tresnet_xl,46.283,53.717,61.943,38.057,78.44,224,0.875,bilinear,-48.777,-37.317,+12 +deit_small_distilled_patch16_224,46.160,53.840,62.417,37.583,22.44,224,0.900,bicubic,-48.430,-36.683,+43 +regnety_160,46.153,53.847,61.837,38.163,83.59,288,1.000,bicubic,-49.727,-37.723,-38 +gernet_m,46.150,53.850,62.700,37.300,21.14,224,0.875,bilinear,-48.400,-36.230,+47 +resnest50d_1s4x24d,46.083,53.917,62.377,37.623,25.68,224,0.875,bicubic,-48.307,-36.683,+56 +tf_efficientnet_b0_ns,46.047,53.953,63.253,36.747,5.29,224,0.875,bicubic,-47.693,-35.727,+119 +resnet51q,46.027,53.973,60.910,39.090,35.70,288,1.000,bilinear,-49.173,-38.370,-5 +vit_small_patch16_224,45.990,54.010,61.820,38.180,22.05,224,0.900,bicubic,-48.890,-37.260,+14 +resnest50d,45.937,54.063,62.623,37.377,27.48,224,0.875,bilinear,-48.683,-36.407,+33 +twins_pcpvt_base,45.893,54.107,61.337,38.663,43.83,224,0.900,bicubic,-49.567,-38.053,-23 +regnety_032,45.893,54.107,61.537,38.463,19.44,288,1.000,bicubic,-49.577,-37.783,-23 +levit_384,45.877,54.123,61.693,38.307,39.13,224,0.900,bicubic,-49.333,-37.467,-11 +twins_svt_base,45.877,54.123,60.967,39.033,56.07,224,0.900,bicubic,-49.693,-38.263,-31 +gluon_seresnext101_32x4d,45.590,54.410,61.143,38.857,48.96,224,0.875,bicubic,-48.860,-37.947,+42 +dm_nfnet_f0,45.483,54.517,60.983,39.017,71.49,256,0.900,bicubic,-50.207,-38.347,-39 +gluon_resnet152_v1d,45.430,54.570,60.077,39.923,60.21,224,0.875,bicubic,-49.010,-38.933,+41 +nfnet_l0,45.420,54.580,62.080,37.920,35.07,288,1.000,bicubic,-49.970,-37.340,-25 +ssl_resnext50_32x4d,45.407,54.593,62.047,37.953,25.03,224,0.875,bilinear,-49.293,-37.193,+18 +resnetv2_50x1_bit_distilled,45.393,54.607,62.303,37.697,25.55,224,0.875,bicubic,-49.997,-37.127,-26 +tresnet_xl_448,45.223,54.777,61.437,38.563,78.44,448,0.875,bilinear,-50.287,-37.903,-35 +nasnetalarge,45.210,54.790,57.883,42.117,88.75,331,0.911,bicubic,-49.940,-41.247,-15 +convit_small,45.203,54.797,60.510,39.490,27.78,224,0.875,bicubic,-49.717,-38.600,-3 +swin_small_patch4_window7_224,45.163,54.837,60.330,39.670,49.61,224,0.900,bicubic,-50.557,-39.040,-50 +tf_efficientnet_b3,45.107,54.893,60.650,39.350,12.23,300,0.904,bicubic,-49.803,-38.460,-4 +rexnet_200,45.047,54.953,62.317,37.683,16.37,224,0.875,bicubic,-49.613,-36.773,+14 +resnetrs152,44.943,55.057,59.713,40.287,86.62,320,1.000,bicubic,-51.017,-39.667,-65 +ecaresnetlight,44.890,55.110,60.770,39.230,30.16,224,0.875,bicubic,-49.250,-38.180,+54 +deit_base_patch16_224,44.870,55.130,59.177,40.823,86.57,224,0.900,bicubic,-50.140,-39.803,-14 +deit_base_patch16_384,44.777,55.223,59.617,40.383,86.86,384,1.000,bicubic,-50.873,-39.623,-51 +cait_xxs36_384,44.773,55.227,59.380,40.620,17.37,384,1.000,bicubic,-50.447,-39.940,-30 +resmlp_36_distilled_224,44.757,55.243,61.073,38.927,44.69,224,0.875,bicubic,-49.813,-38.087,+19 +gernet_l,44.740,55.260,58.943,41.057,31.08,256,0.875,bilinear,-50.190,-40.257,-15 +resmlp_24_distilled_224,44.707,55.293,61.467,38.533,30.02,224,0.875,bicubic,-49.623,-37.623,+34 +tf_efficientnet_b2_ap,44.700,55.300,60.680,39.320,9.11,260,0.890,bicubic,-49.570,-38.270,+37 +ens_adv_inception_resnet_v2,44.393,55.607,58.117,41.883,55.84,299,0.897,bicubic,-49.737,-40.673,+48 +tresnet_l,44.363,55.637,59.953,40.047,55.99,224,0.875,bilinear,-50.537,-39.077,-15 +gluon_resnext101_32x4d,44.290,55.710,59.090,40.910,44.18,224,0.875,bicubic,-49.830,-39.840,+49 +wide_resnet50_2,44.177,55.823,59.727,40.273,68.88,224,0.875,bicubic,-50.493,-39.323,0 +cspresnext50,44.147,55.853,60.533,39.467,20.57,224,0.875,bilinear,-49.613,-38.307,+81 +resnetv2_101x1_bitm,44.127,55.873,61.983,38.017,44.54,448,1.000,bilinear,-51.193,-37.387,-43 +seresnext50_32x4d,44.127,55.873,59.490,40.510,27.56,224,0.875,bicubic,-50.693,-39.640,-15 +gluon_resnet152_v1s,44.073,55.927,58.703,41.297,60.32,224,0.875,bicubic,-50.647,-40.357,-9 +pit_b_224,44.070,55.930,58.017,41.983,73.76,224,0.900,bicubic,-50.720,-40.803,-16 +ssl_resnet50,44.010,55.990,61.887,38.113,25.56,224,0.875,bilinear,-50.300,-37.263,+24 +inception_resnet_v2,44.003,55.997,57.907,42.093,55.84,299,0.897,bicubic,-50.337,-40.893,+21 +pnasnet5large,43.950,56.050,56.730,43.270,86.06,331,0.911,bicubic,-51.410,-42.400,-51 +pit_s_224,43.890,56.110,58.627,41.373,23.46,224,0.900,bicubic,-50.700,-40.303,-1 +gluon_resnext101_64x4d,43.877,56.123,58.710,41.290,83.46,224,0.875,bicubic,-50.473,-40.170,+17 +coat_lite_small,43.823,56.177,57.147,42.853,19.84,224,0.900,bicubic,-51.257,-41.873,-38 +tnt_s_patch16_224,43.773,56.227,59.197,40.803,23.76,224,0.900,bicubic,-50.807,-39.873,-2 +cait_xxs36_224,43.760,56.240,58.720,41.280,17.30,224,1.000,bicubic,-50.180,-40.200,+51 +ecaresnet50d,43.750,56.250,60.387,39.613,25.58,224,0.875,bicubic,-50.440,-38.633,+25 +ecaresnet101d_pruned,43.737,56.263,59.607,40.393,24.88,224,0.875,bicubic,-50.713,-39.493,+2 +tf_efficientnetv2_s,43.710,56.290,58.597,41.403,21.46,384,1.000,bicubic,-52.000,-40.803,-79 +rexnet_150,43.690,56.310,60.897,39.103,9.73,224,0.875,bicubic,-50.580,-38.183,+15 +pit_xs_distilled_224,43.663,56.337,60.703,39.297,11.00,224,0.900,bicubic,-49.577,-38.147,+115 +gluon_resnet101_v1d,43.440,56.560,58.613,41.387,44.57,224,0.875,bicubic,-50.730,-40.327,+21 +ecaresnet50t,43.407,56.593,59.300,40.700,25.57,320,0.950,bicubic,-51.663,-39.990,-46 +gluon_resnet101_v1s,43.363,56.637,58.503,41.497,44.67,224,0.875,bicubic,-50.807,-40.507,+20 +cspdarknet53,43.357,56.643,59.430,40.570,27.64,256,0.887,bilinear,-50.733,-39.580,+28 +dpn68b,43.287,56.713,58.673,41.327,12.61,224,0.875,bicubic,-50.333,-40.277,+81 +visformer_small,43.253,56.747,57.993,42.007,40.22,224,0.900,bicubic,-51.707,-41.217,-46 +eca_nfnet_l0,43.233,56.767,59.913,40.087,24.14,288,1.000,bicubic,-52.217,-39.477,-74 +vit_small_patch32_384,43.143,56.857,59.293,40.707,22.92,384,1.000,bicubic,-51.447,-39.847,-17 +resnest26d,43.140,56.860,60.623,39.377,17.07,224,0.875,bilinear,-50.100,-38.127,+107 +twins_pcpvt_small,43.090,56.910,58.873,41.127,24.11,224,0.900,bicubic,-51.510,-40.277,-22 +resmlp_36_224,43.050,56.950,59.310,40.690,44.69,224,0.875,bicubic,-50.600,-39.640,+69 +dpn131,43.047,56.953,57.440,42.560,79.25,224,0.875,bicubic,-50.713,-41.360,+53 +cspresnet50,43.030,56.970,59.153,40.847,21.62,256,0.887,bilinear,-50.830,-39.717,+40 +tf_efficientnet_lite4,42.967,57.033,57.620,42.380,13.01,380,0.920,bilinear,-51.903,-41.470,-46 +twins_svt_small,42.923,57.077,58.453,41.547,24.06,224,0.900,bicubic,-51.847,-40.627,-42 +gluon_resnet152_v1b,42.903,57.097,57.750,42.250,60.19,224,0.875,bicubic,-51.127,-40.990,+20 +dpn107,42.857,57.143,57.367,42.633,86.92,224,0.875,bicubic,-51.103,-41.473,+26 +levit_256,42.823,57.177,57.897,42.103,18.89,224,0.900,bicubic,-51.577,-41.163,-16 +tf_efficientnet_b1_ap,42.803,57.197,58.813,41.187,7.79,240,0.882,bicubic,-50.827,-39.987,+64 +gluon_resnet152_v1c,42.800,57.200,57.737,42.263,60.21,224,0.875,bicubic,-51.080,-41.063,+30 +gluon_xception65,42.793,57.207,58.820,41.180,39.92,299,0.903,bicubic,-51.217,-40.200,+18 +tresnet_l_448,42.753,57.247,58.947,41.053,55.99,448,0.875,bilinear,-52.657,-40.353,-87 +resnet50d,42.707,57.293,58.697,41.303,25.58,224,0.875,bicubic,-51.363,-40.223,+11 +gluon_seresnext50_32x4d,42.683,57.317,58.710,41.290,27.56,224,0.875,bicubic,-51.487,-40.200,0 +resnext101_32x8d,42.557,57.443,58.317,41.683,88.79,224,0.875,bilinear,-51.213,-40.633,+38 +nf_resnet50,42.510,57.490,59.520,40.480,25.56,288,0.940,bicubic,-51.890,-39.550,-23 +seresnet50,42.510,57.490,58.667,41.333,28.09,224,0.875,bicubic,-51.570,-40.303,+6 +resnetrs101,42.437,57.563,57.300,42.700,63.62,288,0.940,bicubic,-52.813,-41.910,-86 +tf_efficientnetv2_b3,42.313,57.687,57.940,42.060,14.36,300,0.904,bicubic,-52.807,-41.260,-76 +dpn98,42.280,57.720,56.880,43.120,61.57,224,0.875,bicubic,-51.660,-42.040,+17 +deit_small_patch16_224,42.263,57.737,58.020,41.980,22.05,224,0.900,bicubic,-51.737,-40.940,+10 +tf_efficientnet_cc_b1_8e,42.233,57.767,58.420,41.580,39.72,240,0.882,bicubic,-51.337,-40.270,+59 +legacy_senet154,42.207,57.793,56.597,43.403,115.09,224,0.875,bilinear,-52.523,-42.503,-59 +cait_xxs24_384,42.187,57.813,57.460,42.540,12.03,384,1.000,bicubic,-52.733,-41.680,-72 +tf_efficientnet_b2,42.120,57.880,58.197,41.803,9.11,260,0.890,bicubic,-52.090,-40.853,-17 +gluon_resnext50_32x4d,42.043,57.957,57.667,42.333,25.03,224,0.875,bicubic,-51.607,-41.023,+43 +resnet50,42.013,57.987,56.000,44.000,25.56,224,0.875,bicubic,-51.447,-42.600,+63 +ecaresnet50d_pruned,41.953,58.047,58.217,41.783,19.94,224,0.875,bicubic,-51.867,-40.783,+19 +efficientnet_b2,41.933,58.067,58.300,41.700,9.11,288,1.000,bicubic,-52.437,-40.750,-32 +dla102x2,41.647,58.353,57.967,42.033,41.28,224,0.875,bilinear,-52.353,-41.063,+2 +hrnet_w64,41.637,58.363,57.130,42.870,128.06,224,0.875,bilinear,-52.193,-41.800,+15 +gluon_senet154,41.627,58.373,56.373,43.627,115.09,224,0.875,bicubic,-53.083,-42.597,-64 +inception_v4,41.577,58.423,55.383,44.617,42.68,299,0.875,bicubic,-52.803,-43.437,-37 +efficientnet_el,41.497,58.503,58.303,41.697,10.59,300,0.904,bicubic,-53.173,-40.827,-63 +efficientnet_em,41.493,58.507,58.877,41.123,6.90,240,0.882,bicubic,-52.247,-40.053,+23 +tf_efficientnet_cc_b0_8e,41.487,58.513,57.377,42.623,24.01,224,0.875,bicubic,-51.383,-41.083,+97 +swin_tiny_patch4_window7_224,41.457,58.543,57.303,42.697,28.29,224,0.900,bicubic,-53.163,-41.817,-60 +resnext50_32x4d,41.443,58.557,56.997,43.003,25.03,224,0.875,bicubic,-52.397,-41.833,+7 +cait_xxs24_224,41.383,58.617,57.527,42.473,11.96,224,1.000,bicubic,-52.107,-41.243,+49 +tv_resnet152,41.327,58.673,57.520,42.480,60.19,224,0.875,bilinear,-51.913,-41.300,+64 +xception71,41.270,58.730,55.873,44.127,42.34,299,0.903,bicubic,-52.620,-43.127,-3 +dpn92,41.267,58.733,56.333,43.667,37.67,224,0.875,bicubic,-52.923,-42.597,-32 +adv_inception_v3,41.263,58.737,56.317,43.683,23.83,299,0.875,bicubic,-51.747,-42.173,+75 +gernet_s,41.247,58.753,58.830,41.170,8.17,224,0.875,bilinear,-51.193,-39.670,+112 +resnetblur50,41.053,58.947,57.077,42.923,25.56,224,0.875,bicubic,-52.657,-41.723,+16 +nf_regnet_b1,41.013,58.987,58.120,41.880,10.22,288,0.900,bicubic,-52.867,-40.970,-4 +gluon_resnet50_v1d,40.970,59.030,57.137,42.863,25.58,224,0.875,bicubic,-52.560,-41.573,+37 +gluon_inception_v3,40.907,59.093,55.617,44.383,23.83,299,0.875,bicubic,-52.633,-43.213,+34 +ese_vovnet39b,40.867,59.133,56.947,43.053,24.57,224,0.875,bicubic,-52.983,-41.953,-5 +levit_192,40.847,59.153,56.687,43.313,10.95,224,0.900,bicubic,-52.863,-42.113,+14 +regnety_320,40.813,59.187,56.117,43.883,145.05,224,0.875,bicubic,-53.707,-43.053,-64 +resnet34d,40.810,59.190,56.530,43.470,21.82,224,0.875,bicubic,-51.830,-41.890,+93 +xception,40.763,59.237,56.387,43.613,22.86,299,0.897,bicubic,-52.877,-42.383,+18 +skresnext50_32x4d,40.700,59.300,56.023,43.977,27.48,224,0.875,bicubic,-53.250,-42.797,-20 +gluon_resnet101_v1b,40.680,59.320,56.117,43.883,44.55,224,0.875,bicubic,-53.080,-42.583,+1 +hrnet_w40,40.660,59.340,56.753,43.247,57.56,224,0.875,bilinear,-53.050,-42.067,+9 +resmlp_24_224,40.653,59.347,56.573,43.427,30.02,224,0.875,bicubic,-52.787,-42.237,+37 +repvgg_b1,40.593,59.407,57.837,42.163,57.42,224,0.875,bilinear,-52.817,-40.953,+39 +tf_efficientnet_lite3,40.563,59.437,56.477,43.523,8.20,300,0.904,bilinear,-53.567,-42.483,-40 +tresnet_m_448,40.530,59.470,56.700,43.300,31.39,448,0.875,bilinear,-54.130,-42.450,-86 +pit_xs_224,40.497,59.503,56.530,43.470,10.62,224,0.900,bicubic,-52.413,-42.250,+66 +dla169,40.493,59.507,57.263,42.737,53.39,224,0.875,bilinear,-53.307,-41.647,-11 +repvgg_b2,40.467,59.533,57.780,42.220,89.02,224,0.875,bilinear,-53.123,-41.290,+15 +regnetx_320,40.443,59.557,55.660,44.340,107.81,224,0.875,bicubic,-53.767,-43.370,-55 +coat_mini,40.420,59.580,55.167,44.833,10.34,224,0.900,bicubic,-54.350,-43.783,-103 +skresnet34,40.397,59.603,56.737,43.263,22.28,224,0.875,bicubic,-52.173,-41.783,+85 +efficientnet_el_pruned,40.390,59.610,56.903,43.097,10.59,300,0.904,bicubic,-53.700,-42.077,-46 +efficientnet_b2_pruned,40.383,59.617,56.537,43.463,8.31,260,0.890,bicubic,-53.417,-42.303,-18 +coat_lite_mini,40.360,59.640,55.717,44.283,11.01,224,0.900,bicubic,-53.090,-42.983,+23 +legacy_seresnext101_32x4d,40.360,59.640,54.817,45.183,48.96,224,0.875,bilinear,-53.770,-44.153,-52 +wide_resnet101_2,40.360,59.640,55.780,44.220,126.89,224,0.875,bilinear,-53.370,-43.030,-11 +tf_efficientnet_b0_ap,40.337,59.663,56.787,43.213,5.29,224,0.875,bicubic,-52.273,-41.583,+75 +xception65,40.273,59.727,55.283,44.717,39.92,299,0.903,bicubic,-53.487,-43.577,-16 +regnetx_160,40.270,59.730,56.050,43.950,54.28,224,0.875,bicubic,-53.610,-42.690,-33 +densenet201,40.267,59.733,56.710,43.290,20.01,224,0.875,bicubic,-52.423,-41.940,+65 +resnext50d_32x4d,40.170,59.830,55.487,44.513,25.05,224,0.875,bicubic,-53.640,-43.253,-27 +hrnet_w48,40.093,59.907,56.640,43.360,77.47,224,0.875,bilinear,-53.937,-42.400,-50 +legacy_seresnet152,40.043,59.957,55.820,44.180,66.82,224,0.875,bilinear,-53.397,-43.030,+15 +hrnet_w30,40.030,59.970,57.093,42.907,37.71,224,0.875,bilinear,-53.340,-41.737,+20 +regnetx_080,40.000,60.000,55.977,44.023,39.57,224,0.875,bicubic,-53.790,-42.933,-28 +tf_efficientnet_b1,39.977,60.023,56.137,43.863,7.79,240,0.882,bicubic,-53.733,-42.673,-18 +gluon_resnet101_v1c,39.953,60.047,55.300,44.700,44.57,224,0.875,bicubic,-53.737,-43.460,-16 +resmlp_12_distilled_224,39.843,60.157,57.440,42.560,15.35,224,0.875,bicubic,-53.027,-41.190,+49 +tf_efficientnetv2_b0,39.787,60.213,56.283,43.717,7.14,224,0.875,bicubic,-53.273,-42.417,+28 +res2net101_26w_4s,39.717,60.283,54.550,45.450,45.21,224,0.875,bilinear,-53.803,-44.050,0 +regnetx_120,39.687,60.313,55.633,44.367,46.11,224,0.875,bicubic,-54.583,-43.557,-83 +hrnet_w44,39.677,60.323,55.333,44.667,67.06,224,0.875,bilinear,-53.943,-43.367,-13 +vit_small_patch32_224,39.667,60.333,55.253,44.747,22.88,224,0.900,bicubic,-52.483,-43.257,+80 +densenet161,39.620,60.380,56.133,43.867,28.68,224,0.875,bicubic,-53.280,-42.587,+41 +resmlp_big_24_224,39.620,60.380,54.817,45.183,129.14,224,0.875,bicubic,-54.640,-44.003,-84 +mixnet_xl,39.617,60.383,55.887,44.113,11.90,224,0.875,bicubic,-54.613,-42.933,-84 +xception41,39.610,60.390,55.037,44.963,26.97,299,0.903,bicubic,-53.870,-43.713,-3 +res2net50_26w_8s,39.603,60.397,54.550,45.450,48.40,224,0.875,bilinear,-53.847,-44.230,-2 +tf_efficientnetv2_b1,39.570,60.430,55.343,44.657,8.14,240,0.882,bicubic,-54.140,-43.447,-32 +dla102x,39.553,60.447,56.323,43.677,26.31,224,0.875,bilinear,-53.977,-42.527,-12 +rexnet_130,39.487,60.513,56.640,43.360,7.56,224,0.875,bicubic,-54.183,-42.070,-28 +hrnet_w32,39.463,60.537,56.123,43.877,41.23,224,0.875,bilinear,-53.487,-42.717,+27 +resnetv2_50x1_bitm,39.440,60.560,57.847,42.153,25.55,448,1.000,bilinear,-55.290,-41.333,-132 +levit_128,39.433,60.567,55.350,44.650,9.21,224,0.900,bicubic,-53.617,-43.340,+14 +regnety_064,39.403,60.597,55.773,44.227,30.58,224,0.875,bicubic,-54.737,-43.257,-84 +densenetblur121d,39.380,60.620,56.640,43.360,8.00,224,0.875,bicubic,-53.020,-41.770,+57 +regnety_120,39.347,60.653,55.277,44.723,51.82,224,0.875,bicubic,-54.663,-43.753,-72 +tv_resnet101,39.307,60.693,55.803,44.197,44.55,224,0.875,bilinear,-53.573,-42.857,+28 +tf_efficientnet_el,39.303,60.697,55.387,44.613,10.59,300,0.904,bicubic,-55.057,-43.713,-106 +tf_inception_v3,39.237,60.763,54.300,45.700,23.83,299,0.875,bicubic,-53.963,-44.180,+2 +gluon_resnet50_v1s,39.233,60.767,55.010,44.990,25.68,224,0.875,bicubic,-54.357,-43.830,-29 +tf_efficientnetv2_b2,39.180,60.820,54.570,45.430,10.10,260,0.890,bicubic,-54.890,-44.360,-82 +densenet169,39.167,60.833,55.843,44.157,14.15,224,0.875,bicubic,-53.133,-42.747,+54 +legacy_seresnet101,39.037,60.963,55.003,44.997,49.33,224,0.875,bilinear,-54.223,-43.737,-8 +efficientnet_b1_pruned,39.010,60.990,55.647,44.353,6.33,240,0.882,bicubic,-53.970,-42.883,+11 +repvgg_b1g4,38.990,61.010,56.350,43.650,39.97,224,0.875,bilinear,-54.040,-42.470,+5 +inception_v3,38.960,61.040,53.853,46.147,23.83,299,0.875,bicubic,-53.940,-44.477,+17 +dpn68,38.933,61.067,54.933,45.067,12.61,224,0.875,bicubic,-53.307,-43.677,+52 +regnety_080,38.917,61.083,55.213,44.787,39.18,224,0.875,bicubic,-54.973,-43.737,-75 +legacy_seresnext50_32x4d,38.877,61.123,54.593,45.407,27.56,224,0.875,bilinear,-54.553,-44.207,-20 +dla102,38.833,61.167,55.323,44.677,33.27,224,0.875,bilinear,-54.427,-43.457,-16 +regnety_040,38.820,61.180,55.557,44.443,20.65,224,0.875,bicubic,-54.800,-43.403,-42 +densenet121,38.783,61.217,56.273,43.727,7.98,224,0.875,bicubic,-53.157,-42.007,+56 +res2net50_14w_8s,38.710,61.290,54.077,45.923,25.06,224,0.875,bilinear,-54.320,-44.633,-4 +regnetx_040,38.703,61.297,55.340,44.660,22.12,224,0.875,bicubic,-54.977,-43.600,-53 +res2net50_26w_6s,38.687,61.313,53.743,46.257,37.05,224,0.875,bilinear,-54.903,-45.007,-42 +regnetx_032,38.680,61.320,55.157,44.843,15.30,224,0.875,bicubic,-54.570,-43.573,-19 +selecsls60,38.623,61.377,55.630,44.370,30.67,224,0.875,bicubic,-54.387,-43.200,-4 +dla60x,38.617,61.383,55.383,44.617,17.35,224,0.875,bilinear,-54.573,-43.327,-16 +tf_efficientnet_b0,38.600,61.400,55.957,44.043,5.29,224,0.875,bicubic,-53.800,-42.513,+34 +dla60_res2net,38.590,61.410,54.560,45.440,20.85,224,0.875,bilinear,-54.790,-44.300,-28 +selecsls60b,38.573,61.427,55.307,44.693,32.77,224,0.875,bicubic,-54.927,-43.533,-40 +repvgg_a2,38.563,61.437,55.770,44.230,28.21,224,0.875,bilinear,-54.117,-42.510,+13 +hardcorenas_f,38.500,61.500,55.657,44.343,8.20,224,0.875,bilinear,-54.480,-42.963,-7 +dla60_res2next,38.450,61.550,54.950,45.050,17.03,224,0.875,bilinear,-55.120,-43.850,-50 +resmlp_12_224,38.443,61.557,56.327,43.673,15.35,224,0.875,bicubic,-53.677,-42.243,+39 +regnetx_064,38.430,61.570,54.990,45.010,26.21,224,0.875,bicubic,-55.200,-44.060,-60 +tf_efficientnet_cc_b0_4e,38.413,61.587,55.150,44.850,13.31,224,0.875,bicubic,-54.427,-43.290,+3 +gluon_resnet50_v1b,38.407,61.593,54.833,45.167,25.56,224,0.875,bicubic,-54.153,-43.717,+18 +hrnet_w18,38.277,61.723,55.643,44.357,21.30,224,0.875,bilinear,-54.483,-43.017,+4 +mixnet_l,38.160,61.840,54.757,45.243,7.33,224,0.875,bicubic,-55.100,-43.943,-34 +hardcorenas_e,38.137,61.863,55.173,44.827,8.07,224,0.875,bilinear,-54.813,-43.397,-14 +efficientnet_b1,38.087,61.913,54.010,45.990,7.79,256,1.000,bicubic,-54.943,-44.690,-21 +coat_lite_tiny,38.070,61.930,53.453,46.547,5.72,224,0.900,bicubic,-54.780,-45.187,-5 +gmixer_24_224,38.050,61.950,52.083,47.917,24.72,224,0.875,bicubic,-54.630,-46.437,+2 +resnetrs50,37.957,62.043,53.310,46.690,35.69,224,0.910,bicubic,-56.063,-45.540,-113 +hardcorenas_c,37.883,62.117,55.717,44.283,5.52,224,0.875,bilinear,-54.447,-42.623,+18 +gluon_resnet50_v1c,37.843,62.157,54.123,45.877,25.58,224,0.875,bicubic,-55.067,-44.587,-17 +res2net50_26w_4s,37.827,62.173,53.073,46.927,25.70,224,0.875,bilinear,-55.353,-45.597,-35 +efficientnet_es,37.770,62.230,54.967,45.033,5.44,224,0.875,bicubic,-55.140,-43.723,-20 +resnest14d,37.767,62.233,56.470,43.530,10.61,224,0.875,bilinear,-53.363,-41.860,+57 +tv_resnext50_32x4d,37.750,62.250,54.113,45.887,25.03,224,0.875,bilinear,-55.150,-44.697,-19 +ecaresnet26t,37.650,62.350,54.350,45.650,16.01,320,0.950,bicubic,-56.290,-44.540,-113 +hardcorenas_d,37.550,62.450,54.723,45.277,7.50,224,0.875,bilinear,-55.050,-43.707,-1 +res2next50,37.477,62.523,52.853,47.147,24.67,224,0.875,bilinear,-55.673,-45.807,-39 +resnet34,37.443,62.557,54.297,45.703,21.80,224,0.875,bilinear,-53.757,-43.943,+48 +pit_ti_distilled_224,37.337,62.663,55.137,44.863,5.10,224,0.900,bicubic,-53.563,-43.083,+56 +hardcorenas_b,37.243,62.757,55.073,44.927,5.18,224,0.875,bilinear,-54.697,-43.327,+22 +mobilenetv3_large_100_miil,37.210,62.790,53.513,46.487,5.48,224,0.875,bilinear,-55.040,-44.737,+10 +res2net50_48w_2s,37.117,62.883,53.333,46.667,25.29,224,0.875,bilinear,-55.673,-45.137,-17 +dla60,37.073,62.927,54.200,45.800,22.04,224,0.875,bilinear,-55.597,-44.430,-13 +rexnet_100,37.063,62.937,54.020,45.980,4.80,224,0.875,bicubic,-55.787,-44.600,-22 +regnety_016,37.017,62.983,54.093,45.907,11.20,224,0.875,bicubic,-55.983,-44.587,-38 +tf_mixnet_l,36.987,63.013,52.583,47.417,7.33,224,0.875,bicubic,-56.053,-45.957,-45 +legacy_seresnet50,36.873,63.127,53.487,46.513,28.09,224,0.875,bilinear,-55.797,-45.163,-16 +tv_densenet121,36.810,63.190,54.033,45.967,7.98,224,0.875,bicubic,-54.590,-44.217,+31 +tf_efficientnet_lite2,36.807,63.193,53.320,46.680,6.09,260,0.890,bicubic,-55.783,-45.230,-13 +mobilenetv2_120d,36.780,63.220,54.047,45.953,5.83,224,0.875,bicubic,-55.830,-44.463,-17 +tf_efficientnet_lite1,36.737,63.263,53.590,46.410,5.42,240,0.882,bicubic,-55.573,-44.900,-3 +regnetx_016,36.683,63.317,53.297,46.703,9.19,224,0.875,bicubic,-55.857,-45.253,-12 +hardcorenas_a,36.640,63.360,54.910,45.090,5.26,224,0.875,bilinear,-54.980,-43.260,+18 +levit_128s,36.620,63.380,53.117,46.883,7.78,224,0.900,bicubic,-54.880,-45.283,+20 +efficientnet_b0,36.600,63.400,53.497,46.503,5.29,224,0.875,bicubic,-55.880,-44.943,-13 +tf_efficientnet_em,36.380,63.620,52.840,47.160,6.90,240,0.882,bicubic,-56.790,-45.830,-59 +skresnet18,36.320,63.680,54.197,45.803,11.96,224,0.875,bicubic,-53.840,-43.583,+49 +repvgg_b0,36.287,63.713,54.057,45.943,15.82,224,0.875,bilinear,-55.393,-44.183,+11 +tv_resnet50,36.177,63.823,52.803,47.197,25.56,224,0.875,bilinear,-55.963,-45.617,-3 +legacy_seresnet34,36.143,63.857,52.553,47.447,21.96,224,0.875,bilinear,-55.337,-45.647,+15 +coat_tiny,36.123,63.877,51.063,48.937,5.50,224,0.900,bicubic,-57.387,-47.627,-88 +tv_resnet34,36.087,63.913,53.533,46.467,21.80,224,0.875,bilinear,-54.203,-44.447,+43 +deit_tiny_distilled_patch16_224,36.023,63.977,54.240,45.760,5.91,224,0.900,bicubic,-55.077,-44.030,+28 +mobilenetv2_140,36.000,64.000,53.943,46.057,6.11,224,0.875,bicubic,-56.030,-44.307,-5 +tf_efficientnet_lite0,35.930,64.070,53.480,46.520,4.65,224,0.875,bicubic,-55.370,-44.610,+16 +selecsls42b,35.813,64.187,52.487,47.513,32.46,224,0.875,bicubic,-56.667,-46.193,-25 +gluon_resnet34_v1b,35.760,64.240,52.187,47.813,21.80,224,0.875,bicubic,-55.340,-45.993,+25 +dla34,35.643,64.357,52.783,47.217,15.74,224,0.875,bilinear,-55.597,-45.397,+16 +mixnet_m,35.640,64.360,52.430,47.570,5.01,224,0.875,bicubic,-56.630,-45.920,-19 +efficientnet_lite0,35.620,64.380,53.657,46.343,4.65,224,0.875,bicubic,-55.640,-44.593,+13 +ssl_resnet18,35.597,64.403,53.740,46.260,11.69,224,0.875,bilinear,-55.103,-44.280,+27 +mobilenetv3_rw,35.547,64.453,53.713,46.287,5.48,224,0.875,bicubic,-56.003,-44.557,+1 +efficientnet_es_pruned,35.390,64.610,52.850,47.150,5.44,224,0.875,bicubic,-56.310,-45.570,-6 +mobilenetv2_110d,35.293,64.707,52.830,47.170,4.52,224,0.875,bicubic,-56.057,-45.360,+6 +tf_mixnet_m,35.180,64.820,50.987,49.013,5.01,224,0.875,bicubic,-57.020,-47.433,-21 +hrnet_w18_small_v2,35.173,64.827,52.440,47.560,15.60,224,0.875,bilinear,-55.997,-45.900,+12 +resnet18d,35.127,64.873,52.890,47.110,11.71,224,0.875,bicubic,-54.863,-44.940,+30 +convit_tiny,35.047,64.953,51.787,48.213,5.71,224,0.875,bicubic,-55.483,-46.423,+23 +ese_vovnet19b_dw,34.840,65.160,52.030,47.970,6.54,224,0.875,bicubic,-57.170,-46.480,-19 +regnety_008,34.807,65.193,51.743,48.257,6.26,224,0.875,bicubic,-57.093,-46.677,-16 +pit_ti_224,34.670,65.330,52.170,47.830,4.85,224,0.900,bicubic,-55.750,-45.840,+22 +mobilenetv3_large_100,34.603,65.397,52.860,47.140,5.48,224,0.875,bicubic,-56.877,-45.460,-6 +seresnext26d_32x4d,34.543,65.457,51.543,48.457,16.81,224,0.875,bicubic,-57.897,-46.997,-39 +seresnext26t_32x4d,34.540,65.460,51.377,48.623,16.81,224,0.875,bicubic,-58.280,-47.183,-60 +mixer_b16_224,34.423,65.577,48.093,51.907,59.88,224,0.875,bicubic,-56.717,-49.307,+4 +resnet26d,34.273,65.727,51.687,48.313,16.01,224,0.875,bicubic,-57.957,-46.763,-33 +tf_efficientnet_es,34.263,65.737,51.350,48.650,5.44,224,0.875,bicubic,-57.837,-47.090,-29 +fbnetc_100,34.253,65.747,51.180,48.820,5.57,224,0.875,bilinear,-57.017,-46.650,-6 +regnety_006,34.150,65.850,51.277,48.723,6.06,224,0.875,bicubic,-57.420,-47.153,-17 +tf_mobilenetv3_large_100,33.950,66.050,51.490,48.510,5.48,224,0.875,bilinear,-57.470,-46.770,-12 +regnetx_008,33.770,66.230,50.547,49.453,7.26,224,0.875,bicubic,-57.410,-47.833,-4 +mnasnet_100,33.763,66.237,51.170,48.830,4.38,224,0.875,bicubic,-57.437,-46.880,-7 +vit_tiny_r_s16_p8_384,33.650,66.350,50.683,49.317,6.36,384,1.000,bicubic,-58.080,-47.747,-27 +vit_tiny_patch16_384,33.550,66.450,51.077,48.923,5.79,384,1.000,bicubic,-59.870,-47.753,-111 +semnasnet_100,33.520,66.480,50.787,49.213,3.89,224,0.875,bicubic,-58.140,-47.483,-25 +resnet26,33.500,66.500,50.927,49.073,16.00,224,0.875,bicubic,-57.940,-47.353,-19 +mixnet_s,33.480,66.520,50.997,49.003,4.13,224,0.875,bicubic,-58.300,-47.303,-32 +spnasnet_100,33.477,66.523,51.267,48.733,4.42,224,0.875,bilinear,-57.133,-46.683,+1 +vgg19_bn,33.230,66.770,50.803,49.197,143.68,224,0.875,bilinear,-57.770,-47.307,-5 +ghostnet_100,33.207,66.793,51.163,48.837,5.18,224,0.875,bilinear,-57.233,-46.667,+2 +regnetx_006,33.157,66.843,50.250,49.750,6.20,224,0.875,bicubic,-57.603,-47.850,-4 +resnet18,33.067,66.933,51.170,48.830,11.69,224,0.875,bilinear,-55.083,-45.950,+19 +legacy_seresnext26_32x4d,32.757,67.243,49.237,50.763,16.79,224,0.875,bicubic,-59.813,-49.183,-66 +deit_tiny_patch16_224,32.667,67.333,50.273,49.727,5.72,224,0.900,bicubic,-56.953,-47.687,+7 +hrnet_w18_small,32.667,67.333,50.587,49.413,13.19,224,0.875,bilinear,-57.213,-47.313,+3 +legacy_seresnet18,32.600,67.400,50.340,49.660,11.78,224,0.875,bicubic,-56.670,-47.340,+9 +mobilenetv2_100,32.523,67.477,50.800,49.200,3.50,224,0.875,bicubic,-57.307,-47.030,+2 +regnetx_004,32.517,67.483,49.343,50.657,5.16,224,0.875,bicubic,-56.943,-48.427,+4 +gluon_resnet18_v1b,32.407,67.593,49.727,50.273,11.69,224,0.875,bicubic,-56.253,-47.373,+9 +regnety_004,32.333,67.667,49.453,50.547,4.34,224,0.875,bicubic,-58.447,-48.627,-14 +tf_mixnet_s,32.183,67.817,48.493,51.507,4.13,224,0.875,bicubic,-59.497,-49.957,-43 +vit_tiny_patch16_224,32.023,67.977,49.017,50.983,5.72,224,0.900,bicubic,-59.907,-49.323,-49 +tf_mobilenetv3_large_075,31.867,68.133,49.110,50.890,3.99,224,0.875,bilinear,-58.453,-48.760,-9 +tf_mobilenetv3_large_minimal_100,31.597,68.403,49.337,50.663,3.92,224,0.875,bilinear,-57.583,-47.983,+3 +vit_tiny_r_s16_p8_224,30.807,69.193,47.657,52.343,6.34,224,0.900,bicubic,-58.533,-50.043,-1 +vgg16_bn,30.357,69.643,47.260,52.740,138.37,224,0.875,bilinear,-60.183,-50.730,-16 +regnety_002,29.687,70.313,46.787,53.213,3.16,224,0.875,bicubic,-58.513,-50.643,+3 +vgg13_bn,28.883,71.117,46.737,53.263,133.05,224,0.875,bilinear,-60.317,-50.793,-2 +regnetx_002,28.860,71.140,45.420,54.580,2.68,224,0.875,bicubic,-58.520,-51.570,+4 +vgg19,28.580,71.420,45.170,54.830,143.67,224,0.875,bilinear,-61.100,-52.380,-10 +dla60x_c,28.447,71.553,46.193,53.807,1.32,224,0.875,bilinear,-58.663,-50.947,+4 +vgg11_bn,28.423,71.577,46.453,53.547,132.87,224,0.875,bilinear,-59.967,-50.817,-3 +vgg16,27.877,72.123,44.673,55.327,138.36,224,0.875,bilinear,-61.483,-52.847,-10 +tf_mobilenetv3_small_100,27.297,72.703,44.420,55.580,2.54,224,0.875,bilinear,-58.663,-51.980,+3 +mixer_l16_224,26.853,73.147,37.923,62.077,208.20,224,0.875,bicubic,-60.117,-56.137,+1 +vgg11,26.533,73.467,43.460,56.540,132.86,224,0.875,bilinear,-60.807,-53.650,-2 +vgg13,26.267,73.733,43.370,56.630,133.05,224,0.875,bilinear,-61.303,-53.750,-5 +dla46x_c,26.217,73.783,43.780,56.220,1.07,224,0.875,bilinear,-59.263,-52.660,0 +tf_mobilenetv3_small_075,26.200,73.800,43.637,56.363,2.04,224,0.875,bilinear,-58.330,-52.253,+1 +dla46_c,25.490,74.510,43.800,56.200,1.30,224,0.875,bilinear,-59.170,-52.400,-1 +tf_mobilenetv3_small_minimal_100,25.087,74.913,42.930,57.070,2.04,224,0.875,bilinear,-57.583,-52.070,0 diff --git a/testbed/huggingface__pytorch-image-models/results/results-imagenet-real.csv b/testbed/huggingface__pytorch-image-models/results/results-imagenet-real.csv new file mode 100644 index 0000000000000000000000000000000000000000..4433bd1070c287ca8b7ae974ee86dd4d2d7cd3a2 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/results-imagenet-real.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation,top1_diff,top5_diff,rank_diff +tf_efficientnet_l2_ns,90.563,9.437,98.779,1.221,480.31,800,0.960,bicubic,+2.211,+0.129,0 +tf_efficientnet_l2_ns_475,90.537,9.463,98.710,1.290,480.31,475,0.936,bicubic,+2.303,+0.164,0 +cait_m48_448,90.196,9.804,98.484,1.516,356.46,448,1.000,bicubic,+3.712,+0.730,+3 +vit_large_patch16_384,90.196,9.804,98.661,1.339,304.72,384,1.000,bicubic,+3.116,+0.361,0 +tf_efficientnet_b7_ns,90.100,9.900,98.614,1.386,66.35,600,0.949,bicubic,+3.260,+0.520,0 +cait_m36_384,90.046,9.954,98.493,1.507,271.22,384,1.000,bicubic,+3.992,+0.763,+8 +dm_nfnet_f6,90.046,9.954,98.546,1.454,438.36,576,0.956,bicubic,+3.902,+0.816,+5 +swin_large_patch4_window12_384,90.027,9.973,98.657,1.343,196.74,384,1.000,bicubic,+2.879,+0.423,-5 +tf_efficientnetv2_l_in21ft1k,90.008,9.992,98.619,1.381,118.52,480,1.000,bicubic,+3.704,+0.641,+1 +swin_base_patch4_window12_384,89.995,10.005,98.695,1.304,87.90,384,1.000,bicubic,+3.563,+0.637,-2 +vit_base_patch16_384,89.989,10.011,98.678,1.322,86.86,384,1.000,bicubic,+3.983,+0.678,+4 +cait_s36_384,89.844,10.156,98.427,1.573,68.37,384,1.000,bicubic,+4.384,+0.947,+10 +swin_large_patch4_window7_224,89.796,10.204,98.640,1.360,196.53,224,0.900,bicubic,+3.477,+0.744,-4 +vit_large_r50_s32_384,89.794,10.206,98.514,1.486,329.09,384,1.000,bicubic,+3.610,+0.596,-3 +tf_efficientnet_b6_ns,89.782,10.218,98.510,1.490,43.04,528,0.942,bicubic,+3.330,+0.628,-8 +tf_efficientnetv2_m_in21ft1k,89.775,10.225,98.503,1.497,54.14,480,1.000,bicubic,+4.187,+0.751,+3 +tf_efficientnet_b5_ns,89.651,10.349,98.482,1.518,30.39,456,0.934,bicubic,+3.563,+0.730,-4 +tf_efficientnet_b8_ap,89.581,10.419,98.305,1.695,87.41,672,0.954,bicubic,+4.211,+1.011,+8 +dm_nfnet_f4,89.557,10.443,98.303,1.697,316.07,512,0.951,bicubic,+3.843,+0.783,-1 +cait_s24_384,89.502,10.498,98.362,1.638,47.06,384,1.000,bicubic,+4.456,+1.016,+12 +dm_nfnet_f3,89.485,10.515,98.399,1.601,254.92,416,0.940,bicubic,+3.963,+0.937,-1 +dm_nfnet_f5,89.461,10.539,98.324,1.676,377.21,544,0.954,bicubic,+3.647,+0.836,-5 +deit_base_distilled_patch16_384,89.429,10.571,98.441,1.559,87.63,384,1.000,bicubic,+4.007,+1.109,+1 +tf_efficientnet_b7_ap,89.429,10.571,98.347,1.653,66.35,600,0.949,bicubic,+4.309,+1.096,+5 +tf_efficientnetv2_l,89.367,10.633,98.275,1.725,118.52,480,1.000,bicubic,+3.877,+0.903,-4 +tf_efficientnet_b8,89.355,10.645,98.303,1.697,87.41,672,0.954,bicubic,+3.985,+0.913,-1 +tf_efficientnet_b6_ap,89.342,10.658,98.281,1.719,43.04,528,0.942,bicubic,+4.554,+1.143,+13 +vit_large_patch16_224,89.314,10.686,98.392,1.608,304.33,224,0.900,bicubic,+3.472,+0.568,-12 +tf_efficientnet_b4_ns,89.305,10.694,98.347,1.653,19.34,380,0.922,bicubic,+4.143,+0.877,-1 +tf_efficientnetv2_m,89.284,10.716,98.236,1.764,54.14,480,1.000,bicubic,+4.240,+0.958,+3 +swin_base_patch4_window7_224,89.145,10.855,98.429,1.571,87.77,224,0.900,bicubic,+3.893,+0.867,-4 +eca_nfnet_l2,89.141,10.859,98.315,1.685,56.72,384,1.000,bicubic,+4.443,+1.051,+10 +cait_xs24_384,89.139,10.861,98.290,1.710,26.67,384,1.000,bicubic,+5.077,+1.402,+27 +ig_resnext101_32x48d,89.120,10.880,98.130,1.870,828.41,224,0.875,bilinear,+3.692,+0.558,-11 +ig_resnext101_32x32d,89.111,10.889,98.181,1.819,468.53,224,0.875,bilinear,+4.017,+0.743,-5 +tf_efficientnet_b7,89.086,10.914,98.183,1.817,66.35,600,0.949,bicubic,+4.150,+0.979,+1 +ecaresnet269d,89.069,10.931,98.234,1.766,102.09,352,1.000,bicubic,+4.093,+1.008,-2 +resmlp_big_24_224_in22ft1k,89.011,10.989,98.215,1.785,129.14,224,0.875,bicubic,+4.617,+1.095,+12 +dm_nfnet_f2,89.009,10.991,98.189,1.810,193.78,352,0.920,bicubic,+3.945,+0.950,-8 +efficientnetv2_rw_m,88.987,11.013,98.213,1.787,53.24,416,1.000,bicubic,+4.179,+1.065,-1 +tf_efficientnet_b5_ap,88.938,11.062,98.164,1.836,30.39,456,0.934,bicubic,+4.686,+1.190,+14 +dm_nfnet_f1,88.925,11.075,98.115,1.885,132.63,320,0.910,bicubic,+4.299,+1.015,+1 +tf_efficientnetv2_s_in21ft1k,88.904,11.096,98.277,1.723,21.46,384,1.000,bicubic,+4.602,+1.025,+9 +vit_base_patch16_224,88.866,11.134,98.230,1.770,86.57,224,0.900,bicubic,+4.334,+0.936,0 +resnetrs420,88.840,11.160,98.034,1.966,191.89,416,1.000,bicubic,+3.832,+0.910,-11 +ig_resnext101_32x16d,88.834,11.166,98.049,1.951,194.03,224,0.875,bilinear,+4.664,+0.853,+10 +resnetrs270,88.834,11.166,98.136,1.864,129.86,352,1.000,bicubic,+4.400,+1.166,+1 +vit_small_r26_s32_384,88.819,11.181,98.337,1.663,36.47,384,1.000,bicubic,+4.773,+1.009,+14 +vit_base_r50_s16_384,88.808,11.192,98.232,1.768,98.95,384,1.000,bicubic,+3.836,+0.944,-13 +seresnet152d,88.795,11.205,98.172,1.828,66.84,320,1.000,bicubic,+4.433,+1.132,+1 +swsl_resnext101_32x8d,88.770,11.230,98.147,1.853,88.79,224,0.875,bilinear,+4.486,+0.971,+2 +tf_efficientnet_b6,88.761,11.239,98.064,1.937,43.04,528,0.942,bicubic,+4.651,+1.178,+6 +resnetrs350,88.759,11.241,98.029,1.971,163.96,384,1.000,bicubic,+4.039,+1.041,-12 +vit_base_patch16_224_miil,88.737,11.262,98.027,1.973,86.54,224,0.875,bilinear,+4.469,+1.225,0 +resnetv2_152x2_bitm,88.725,11.275,98.307,1.693,236.34,448,1.000,bilinear,+4.215,+0.875,-9 +regnety_160,88.697,11.303,98.068,1.932,83.59,288,1.000,bicubic,+5.011,+1.292,+17 +pit_b_distilled_224,88.676,11.324,98.093,1.907,74.79,224,0.900,bicubic,+4.532,+1.237,0 +vit_small_patch16_384,88.652,11.348,98.232,1.768,22.20,384,1.000,bicubic,+4.850,+1.130,+13 +eca_nfnet_l1,88.624,11.376,98.132,1.868,41.41,320,1.000,bicubic,+4.614,+1.104,+5 +resnetrs200,88.605,11.395,98.034,1.966,93.21,320,1.000,bicubic,+4.539,+1.160,-1 +resnetv2_152x4_bitm,88.545,11.455,98.192,1.808,936.53,480,1.000,bilinear,+3.629,+0.750,-23 +resnet200d,88.543,11.457,97.959,2.041,64.69,320,1.000,bicubic,+4.581,+1.135,+3 +resnest269e,88.522,11.478,98.027,1.973,110.93,416,0.928,bicubic,+4.004,+1.041,-18 +efficientnetv2_rw_s,88.473,11.527,97.974,2.026,23.94,384,1.000,bicubic,+4.665,+1.250,+6 +resnetv2_101x3_bitm,88.464,11.536,98.157,1.843,387.93,448,1.000,bilinear,+4.024,+0.775,-18 +cait_s24_224,88.447,11.553,97.957,2.043,46.92,224,1.000,bicubic,+4.995,+1.393,+11 +resnetv2_50x3_bitm,88.443,11.557,98.200,1.800,217.32,448,1.000,bilinear,+4.429,+1.076,-5 +resmlp_big_24_distilled_224,88.443,11.557,97.940,2.060,129.14,224,0.875,bicubic,+4.853,+1.292,+9 +resnest200e,88.432,11.568,98.042,1.958,70.20,320,0.909,bicubic,+4.600,+1.148,-1 +tf_efficientnet_b3_ns,88.426,11.574,98.029,1.971,12.23,300,0.904,bicubic,+4.378,+1.119,-9 +vit_large_r50_s32_224,88.426,11.574,98.085,1.915,328.99,224,0.900,bicubic,+3.992,+0.921,-22 +tf_efficientnetv2_s,88.402,11.598,97.927,2.073,21.46,384,1.000,bicubic,+4.508,+1.229,-6 +efficientnet_b4,88.372,11.628,97.961,2.039,19.34,384,1.000,bicubic,+4.944,+1.365,+5 +resnet152d,88.355,11.645,97.935,2.065,60.21,320,1.000,bicubic,+4.675,+1.197,0 +tf_efficientnet_b4_ap,88.349,11.651,97.893,2.107,19.34,380,0.922,bicubic,+5.101,+1.501,+8 +tf_efficientnet_b5,88.321,11.679,97.912,2.088,30.39,456,0.934,bicubic,+4.509,+1.164,-7 +resnetrs152,88.251,11.749,97.737,2.263,86.62,320,1.000,bicubic,+4.539,+1.123,-5 +deit_base_distilled_patch16_224,88.214,11.786,97.914,2.086,87.34,224,0.900,bicubic,+4.826,+1.426,+1 +resnetv2_152x2_bit_teacher_384,88.150,11.850,98.051,1.949,236.34,384,1.000,bicubic,+4.306,+0.933,-12 +ig_resnext101_32x8d,88.146,11.854,97.856,2.144,88.79,224,0.875,bilinear,+5.458,+1.220,+20 +cait_xxs36_384,88.140,11.860,97.908,2.092,17.37,384,1.000,bicubic,+5.946,+1.760,+34 +dm_nfnet_f0,88.125,11.875,97.854,2.146,71.49,256,0.900,bicubic,+4.739,+1.282,-2 +swsl_resnext101_32x4d,88.099,11.901,97.967,2.033,44.18,224,0.875,bilinear,+4.869,+1.207,+1 +eca_nfnet_l0,87.980,12.020,97.871,2.129,24.14,288,1.000,bicubic,+5.400,+1.381,+19 +nfnet_l0,87.967,12.033,97.867,2.133,35.07,288,1.000,bicubic,+5.217,+1.351,+12 +tf_efficientnet_b4,87.963,12.037,97.739,2.261,19.34,380,0.922,bicubic,+4.941,+1.439,+6 +resnet101d,87.941,12.059,97.908,2.092,44.57,320,1.000,bicubic,+4.919,+1.462,+4 +regnety_032,87.937,12.063,97.891,2.109,19.44,288,1.000,bicubic,+5.213,+1.467,+10 +vit_base_patch32_384,87.909,12.091,98.012,1.988,88.30,384,1.000,bicubic,+4.559,+1.176,-8 +twins_svt_large,87.901,12.099,97.581,2.419,99.27,224,0.900,bicubic,+4.223,+0.987,-15 +twins_pcpvt_large,87.877,12.123,97.856,2.144,60.99,224,0.900,bicubic,+4.737,+1.258,-5 +deit_base_patch16_384,87.845,12.155,97.510,2.490,86.86,384,1.000,bicubic,+4.739,+1.138,-4 +tresnet_xl_448,87.796,12.204,97.459,2.541,78.44,448,0.875,bilinear,+4.746,+1.285,-3 +resnetv2_50x1_bit_distilled,87.787,12.213,97.899,2.101,25.55,224,0.875,bicubic,+4.969,+1.377,+1 +tresnet_m,87.736,12.264,97.523,2.477,31.39,224,0.875,bilinear,+4.656,+1.405,-6 +twins_pcpvt_base,87.736,12.264,97.726,2.274,43.83,224,0.900,bicubic,+5.028,+1.380,+3 +resnetv2_101x1_bitm,87.681,12.319,97.940,2.060,44.54,448,1.000,bilinear,+5.349,+1.422,+11 +swin_small_patch4_window7_224,87.664,12.336,97.566,2.434,49.61,224,0.900,bicubic,+4.452,+1.244,-13 +twins_svt_base,87.638,12.362,97.523,2.477,56.07,224,0.900,bicubic,+4.502,+1.105,-12 +pnasnet5large,87.636,12.364,97.485,2.515,86.06,331,0.911,bicubic,+4.854,+1.445,-4 +swsl_resnext101_32x16d,87.615,12.386,97.820,2.180,194.03,224,0.875,bilinear,+4.269,+0.974,-19 +swsl_resnext50_32x4d,87.600,12.400,97.651,2.349,25.03,224,0.875,bilinear,+5.418,+1.421,+14 +tf_efficientnet_b2_ns,87.557,12.443,97.628,2.372,9.11,260,0.890,bicubic,+5.177,+1.380,+2 +levit_384,87.553,12.447,97.545,2.455,39.13,224,0.900,bicubic,+4.967,+1.529,-2 +ecaresnet50t,87.538,12.462,97.643,2.357,25.57,320,0.950,bicubic,+5.192,+1.505,+2 +resnetv2_152x2_bit_teacher,87.493,12.507,97.812,2.188,236.34,224,0.875,bicubic,+4.631,+1.244,-12 +efficientnet_b3,87.435,12.565,97.681,2.319,12.23,320,1.000,bicubic,+5.193,+1.567,+7 +cait_xxs24_384,87.416,12.584,97.619,2.381,12.03,384,1.000,bicubic,+6.450,+1.973,+49 +resnet51q,87.395,12.605,97.587,2.413,35.70,288,1.000,bilinear,+5.035,+1.407,-3 +coat_lite_small,87.380,12.620,97.365,2.635,19.84,224,0.900,bicubic,+5.072,+1.515,-1 +tresnet_l_448,87.377,12.623,97.485,2.515,55.99,448,0.875,bilinear,+5.109,+1.509,+2 +nasnetalarge,87.350,12.650,97.417,2.583,88.75,331,0.911,bicubic,+4.730,+1.371,-11 +ecaresnet101d,87.288,12.712,97.562,2.438,44.57,224,0.875,bicubic,+5.116,+1.516,+4 +resnest101e,87.284,12.716,97.560,2.440,48.28,256,0.875,bilinear,+4.394,+1.240,-21 +pit_s_distilled_224,87.277,12.723,97.500,2.500,24.04,224,0.900,bicubic,+5.281,+1.702,+6 +resnetrs101,87.247,12.753,97.457,2.543,63.62,288,0.940,bicubic,+4.959,+1.449,-4 +mixer_b16_224_miil,87.226,12.774,97.410,2.590,59.88,224,0.875,bilinear,+4.918,+1.694,-7 +tresnet_xl,87.224,12.776,97.400,2.600,78.44,224,0.875,bilinear,+5.170,+1.463,+1 +convit_base,87.200,12.800,97.286,2.714,86.54,224,0.875,bicubic,+4.910,+1.348,-8 +tf_efficientnet_b3_ap,87.192,12.808,97.380,2.620,12.23,300,0.904,bicubic,+5.370,+1.756,+5 +visformer_small,87.181,12.819,97.323,2.677,40.22,224,0.900,bicubic,+5.075,+1.451,-3 +convit_small,87.053,12.947,97.350,2.650,27.78,224,0.875,bicubic,+5.627,+1.606,+15 +tf_efficientnetv2_b3,87.032,12.968,97.303,2.697,14.36,300,0.904,bicubic,+5.062,+1.521,-1 +deit_small_distilled_patch16_224,86.993,13.007,97.316,2.684,22.44,224,0.900,bicubic,+5.793,+1.938,+23 +resmlp_36_distilled_224,86.993,13.007,97.278,2.722,44.69,224,0.875,bicubic,+5.833,+1.790,+24 +tnt_s_patch16_224,86.903,13.097,97.368,2.632,23.76,224,0.900,bicubic,+5.385,+1.620,+6 +vit_small_patch16_224,86.869,13.131,97.613,2.387,22.05,224,0.900,bicubic,+5.467,+1.479,+11 +vit_small_r26_s32_224,86.863,13.137,97.528,2.472,36.43,224,0.900,bicubic,+5.005,+1.506,-5 +ssl_resnext101_32x16d,86.856,13.143,97.517,2.483,194.03,224,0.875,bilinear,+5.013,+1.421,-5 +rexnet_200,86.846,13.154,97.276,2.724,16.37,224,0.875,bicubic,+5.214,+1.608,-1 +tf_efficientnet_b3,86.835,13.165,97.297,2.703,12.23,300,0.904,bicubic,+5.199,+1.579,-3 +deit_base_patch16_224,86.829,13.171,97.049,2.951,86.57,224,0.900,bicubic,+4.831,+1.315,-12 +tresnet_m_448,86.820,13.180,97.212,2.788,31.39,448,0.875,bilinear,+5.106,+1.640,-7 +ssl_resnext101_32x8d,86.807,13.193,97.466,2.534,88.79,224,0.875,bilinear,+5.191,+1.428,-4 +swsl_resnet50,86.807,13.193,97.498,2.502,25.56,224,0.875,bilinear,+5.641,+1.526,+13 +tf_efficientnet_lite4,86.803,13.197,97.263,2.737,13.01,380,0.920,bilinear,+5.267,+1.595,-5 +coat_mini,86.793,13.207,97.162,2.837,10.34,224,0.900,bicubic,+5.525,+1.770,+7 +tresnet_l,86.767,13.233,97.271,2.729,55.99,224,0.875,bilinear,+5.277,+1.647,-3 +twins_svt_small,86.756,13.244,97.175,2.825,24.06,224,0.900,bicubic,+5.074,+1.505,-12 +levit_256,86.728,13.272,97.259,2.741,18.89,224,0.900,bicubic,+5.218,+1.769,-7 +seresnext50_32x4d,86.699,13.301,97.214,2.786,27.56,224,0.875,bicubic,+5.433,+1.594,+4 +pit_b_224,86.686,13.314,96.898,3.102,73.76,224,0.900,bicubic,+4.240,+1.188,-38 +tf_efficientnet_b1_ns,86.669,13.331,97.378,2.622,7.79,240,0.882,bicubic,+5.281,+1.640,-4 +swin_tiny_patch4_window7_224,86.664,13.336,97.197,2.803,28.29,224,0.900,bicubic,+5.286,+1.657,-4 +gernet_l,86.654,13.346,97.186,2.814,31.08,256,0.875,bilinear,+5.300,+1.650,-4 +wide_resnet50_2,86.647,13.353,97.214,2.786,68.88,224,0.875,bicubic,+5.191,+1.682,-10 +efficientnet_el,86.635,13.366,97.175,2.825,10.59,300,0.904,bicubic,+5.319,+1.649,-5 +resmlp_24_distilled_224,86.622,13.378,97.135,2.865,30.02,224,0.875,bicubic,+5.856,+1.917,+16 +twins_pcpvt_small,86.620,13.380,97.340,2.660,24.11,224,0.900,bicubic,+5.532,+1.698,+3 +nf_resnet50,86.609,13.391,97.293,2.707,25.56,288,0.940,bicubic,+5.949,+1.957,+17 +resnest50d_4s2x40d,86.592,13.408,97.269,2.731,30.42,224,0.875,bicubic,+5.484,+1.711,-1 +efficientnet_b3_pruned,86.581,13.419,97.190,2.810,9.86,300,0.904,bicubic,+5.723,+1.948,+9 +repvgg_b3,86.566,13.434,97.139,2.861,123.09,224,0.875,bilinear,+6.074,+1.879,+19 +ssl_resnext101_32x4d,86.479,13.521,97.468,2.532,44.18,224,0.875,bilinear,+5.555,+1.740,+4 +ecaresnet50d,86.470,13.530,97.186,2.814,25.58,224,0.875,bicubic,+5.878,+1.866,+15 +gluon_resnet152_v1s,86.468,13.532,97.109,2.891,60.32,224,0.875,bicubic,+5.452,+1.697,-2 +resnest50d_1s4x24d,86.447,13.553,97.148,2.852,25.68,224,0.875,bicubic,+5.459,+1.826,-2 +resnetv2_50x1_bitm,86.436,13.564,97.602,2.398,25.55,448,1.000,bilinear,+6.094,+1.918,+22 +repvgg_b3g4,86.361,13.639,97.054,2.946,83.83,224,0.875,bilinear,+6.149,+1.944,+32 +legacy_senet154,86.342,13.658,96.928,3.072,115.09,224,0.875,bilinear,+5.032,+1.432,-17 +cait_xxs36_224,86.340,13.660,97.111,2.889,17.30,224,1.000,bicubic,+6.590,+2.245,+54 +gernet_m,86.319,13.681,97.096,2.904,21.14,224,0.875,bilinear,+5.587,+1.912,+3 +pit_s_224,86.316,13.684,97.045,2.955,23.46,224,0.900,bicubic,+5.222,+1.713,-12 +vit_small_patch32_384,86.312,13.688,97.417,2.583,22.92,384,1.000,bicubic,+5.832,+1.819,+9 +efficientnet_b2,86.304,13.696,96.990,3.010,9.11,288,1.000,bicubic,+5.692,+1.672,+3 +gluon_senet154,86.278,13.722,96.949,3.051,115.09,224,0.875,bicubic,+5.044,+1.601,-20 +resnest50d,86.240,13.761,97.073,2.927,27.48,224,0.875,bilinear,+5.266,+1.695,-11 +ecaresnet101d_pruned,86.210,13.790,97.335,2.665,24.88,224,0.875,bicubic,+5.392,+1.707,-6 +efficientnet_el_pruned,86.192,13.807,97.026,2.974,10.59,300,0.904,bicubic,+5.892,+1.998,+17 +cspdarknet53,86.182,13.818,97.013,2.987,27.64,256,0.887,bilinear,+6.124,+1.929,+27 +inception_v4,86.169,13.831,96.919,3.081,42.68,299,0.875,bicubic,+6.001,+1.951,+22 +rexnet_150,86.154,13.846,97.058,2.942,9.73,224,0.875,bicubic,+5.844,+1.892,+11 +inception_resnet_v2,86.133,13.867,97.043,2.957,55.84,299,0.897,bicubic,+5.675,+1.737,+4 +ssl_resnext50_32x4d,86.086,13.914,97.212,2.788,25.03,224,0.875,bilinear,+5.768,+1.806,+8 +tf_efficientnet_el,86.084,13.916,96.964,3.036,10.59,300,0.904,bicubic,+5.834,+1.836,+13 +gluon_resnet101_v1s,86.054,13.946,97.022,2.978,44.67,224,0.875,bicubic,+5.752,+1.862,+8 +ecaresnetlight,86.052,13.948,97.069,2.931,30.16,224,0.875,bicubic,+5.590,+1.819,-1 +gluon_seresnext101_32x4d,86.032,13.968,96.977,3.023,48.96,224,0.875,bicubic,+5.128,+1.683,-19 +resnet50d,86.009,13.991,96.979,3.021,25.58,224,0.875,bicubic,+5.479,+1.819,-8 +ecaresnet26t,85.983,14.017,97.041,2.959,16.01,320,0.950,bicubic,+6.129,+1.957,+29 +tf_efficientnet_b2_ap,85.975,14.025,96.810,3.190,9.11,260,0.890,bicubic,+5.675,+1.592,+4 +gluon_seresnext101_64x4d,85.960,14.040,96.979,3.021,88.23,224,0.875,bicubic,+5.066,+1.671,-22 +vit_base_patch32_224,85.956,14.044,97.130,2.869,88.22,224,0.900,bicubic,+5.231,+1.562,-17 +gluon_resnet152_v1d,85.917,14.083,96.812,3.188,60.21,224,0.875,bicubic,+5.443,+1.606,-9 +vit_large_patch32_384,85.909,14.091,97.368,2.632,306.63,384,1.000,bicubic,+4.403,+1.276,-51 +tf_efficientnet_b2,85.902,14.098,96.862,3.139,9.11,260,0.890,bicubic,+5.816,+1.954,+10 +tf_efficientnetv2_b2,85.900,14.100,96.889,3.111,10.10,260,0.890,bicubic,+5.692,+1.847,+5 +seresnet50,85.857,14.143,97.004,2.995,28.09,224,0.875,bicubic,+5.583,+1.934,-1 +repvgg_b2g4,85.855,14.145,96.812,3.188,61.76,224,0.875,bilinear,+6.489,+2.124,+42 +gluon_resnet101_v1d,85.849,14.151,96.663,3.337,44.57,224,0.875,bicubic,+5.435,+1.649,-12 +resnet50,85.804,14.196,96.712,3.288,25.56,224,0.875,bicubic,+6.766,+2.322,+63 +mixnet_xl,85.798,14.202,96.712,3.288,11.90,224,0.875,bicubic,+5.322,+1.776,-18 +ens_adv_inception_resnet_v2,85.781,14.220,96.759,3.241,55.84,299,0.897,bicubic,+5.799,+1.821,+7 +tf_efficientnet_lite3,85.755,14.245,96.887,3.113,8.20,300,0.904,bilinear,+5.935,+1.973,+18 +ese_vovnet39b,85.751,14.249,96.891,3.109,24.57,224,0.875,bicubic,+6.431,+2.179,+38 +gluon_resnext101_32x4d,85.746,14.254,96.635,3.365,44.18,224,0.875,bicubic,+5.412,+1.709,-15 +legacy_seresnext101_32x4d,85.746,14.254,96.757,3.243,48.96,224,0.875,bilinear,+5.518,+1.739,-7 +cspresnext50,85.740,14.260,96.840,3.160,20.57,224,0.875,bilinear,+5.700,+1.896,0 +regnety_320,85.727,14.273,96.725,3.275,145.05,224,0.875,bicubic,+4.915,+1.481,-36 +cspresnet50,85.721,14.279,96.795,3.205,21.62,256,0.887,bilinear,+6.147,+2.083,+23 +xception71,85.697,14.303,96.776,3.224,42.34,299,0.903,bicubic,+5.823,+1.854,+5 +resmlp_big_24_224,85.695,14.305,96.426,3.574,129.14,224,0.875,bicubic,+4.667,+1.404,-49 +gluon_resnext101_64x4d,85.693,14.307,96.644,3.356,83.46,224,0.875,bicubic,+5.089,+1.656,-34 +efficientnet_em,85.684,14.316,96.938,3.062,6.90,240,0.882,bicubic,+6.432,+2.144,+38 +deit_small_patch16_224,85.678,14.322,96.906,3.094,22.05,224,0.900,bicubic,+5.822,+1.854,+2 +pit_xs_distilled_224,85.657,14.343,96.667,3.333,11.00,224,0.900,bicubic,+6.351,+2.303,+31 +efficientnet_b2_pruned,85.642,14.358,96.746,3.254,8.31,260,0.890,bicubic,+5.726,+1.890,-5 +dpn107,85.640,14.360,96.729,3.271,86.92,224,0.875,bicubic,+5.484,+1.819,-14 +resmlp_36_224,85.620,14.380,96.795,3.205,44.69,224,0.875,bicubic,+5.850,+1.909,+4 +levit_192,85.580,14.420,96.740,3.260,10.95,224,0.900,bicubic,+5.738,+1.954,-2 +gluon_resnet152_v1c,85.580,14.420,96.646,3.354,60.21,224,0.875,bicubic,+5.670,+1.806,-8 +ecaresnet50d_pruned,85.580,14.420,96.936,3.064,19.94,224,0.875,bicubic,+5.864,+2.056,+7 +resnext50d_32x4d,85.569,14.431,96.748,3.252,25.05,224,0.875,bicubic,+5.893,+1.882,+7 +tf_efficientnetv2_b1,85.561,14.439,96.727,3.273,8.14,240,0.882,bicubic,+6.099,+2.005,+14 +regnety_120,85.543,14.457,96.785,3.215,51.82,224,0.875,bicubic,+5.177,+1.659,-36 +regnetx_320,85.524,14.476,96.669,3.331,107.81,224,0.875,bicubic,+5.278,+1.643,-27 +nf_regnet_b1,85.505,14.495,96.791,3.209,10.22,288,0.900,bicubic,+6.213,+2.043,+22 +dpn92,85.494,14.506,96.635,3.365,37.67,224,0.875,bicubic,+5.486,+1.799,-19 +gluon_resnet152_v1b,85.475,14.525,96.550,3.450,60.19,224,0.875,bicubic,+5.789,+1.814,0 +rexnet_130,85.473,14.527,96.684,3.316,7.56,224,0.875,bicubic,+5.973,+2.002,+6 +resnetrs50,85.462,14.538,96.736,3.264,35.69,224,0.910,bicubic,+5.570,+1.767,-17 +dpn131,85.398,14.602,96.639,3.361,79.25,224,0.875,bicubic,+5.576,+1.929,-11 +regnetx_160,85.390,14.610,96.637,3.363,54.28,224,0.875,bicubic,+5.534,+1.807,-15 +dla102x2,85.366,14.634,96.629,3.371,41.28,224,0.875,bilinear,+5.918,+1.989,+5 +gluon_seresnext50_32x4d,85.336,14.664,96.667,3.333,27.56,224,0.875,bicubic,+5.418,+1.845,-24 +xception65,85.315,14.685,96.637,3.363,39.92,299,0.903,bicubic,+5.763,+1.983,-2 +skresnext50_32x4d,85.313,14.687,96.390,3.610,27.48,224,0.875,bicubic,+5.157,+1.748,-32 +dpn98,85.311,14.689,96.469,3.531,61.57,224,0.875,bicubic,+5.669,+1.871,-7 +gluon_resnet101_v1c,85.304,14.696,96.405,3.595,44.57,224,0.875,bicubic,+5.770,+1.827,-4 +dpn68b,85.291,14.709,96.464,3.536,12.61,224,0.875,bicubic,+6.076,+2.050,+15 +regnety_064,85.283,14.717,96.639,3.361,30.58,224,0.875,bicubic,+5.561,+1.871,-15 +resnetblur50,85.283,14.717,96.531,3.470,25.56,224,0.875,bicubic,+5.997,+1.892,+8 +resmlp_24_224,85.268,14.732,96.492,3.508,30.02,224,0.875,bicubic,+5.894,+1.946,-3 +coat_lite_mini,85.251,14.749,96.680,3.320,11.01,224,0.900,bicubic,+6.163,+2.076,+15 +regnety_080,85.245,14.755,96.633,3.367,39.18,224,0.875,bicubic,+5.369,+1.803,-30 +cait_xxs24_224,85.228,14.773,96.712,3.288,11.96,224,1.000,bicubic,+6.842,+2.402,+44 +resnext50_32x4d,85.221,14.779,96.526,3.474,25.03,224,0.875,bicubic,+5.453,+1.928,-23 +resnext101_32x8d,85.187,14.813,96.445,3.555,88.79,224,0.875,bilinear,+5.879,+1.927,-4 +gluon_inception_v3,85.183,14.817,96.526,3.474,23.83,299,0.875,bicubic,+6.377,+2.156,+21 +hrnet_w48,85.151,14.849,96.492,3.508,77.47,224,0.875,bilinear,+5.851,+1.980,-2 +gluon_xception65,85.148,14.851,96.597,3.403,39.92,299,0.903,bicubic,+5.433,+1.737,-23 +gluon_resnet101_v1b,85.142,14.858,96.366,3.634,44.55,224,0.875,bicubic,+5.836,+1.842,-6 +regnetx_120,85.131,14.869,96.477,3.523,46.11,224,0.875,bicubic,+5.535,+1.739,-21 +xception,85.129,14.871,96.471,3.529,22.86,299,0.897,bicubic,+6.077,+2.079,+9 +tf_efficientnet_b1_ap,85.127,14.873,96.405,3.595,7.79,240,0.882,bicubic,+5.847,+2.099,-4 +hrnet_w64,85.119,14.881,96.744,3.256,128.06,224,0.875,bilinear,+5.645,+2.092,-19 +ssl_resnet50,85.097,14.903,96.866,3.134,25.56,224,0.875,bilinear,+5.875,+2.034,-4 +res2net101_26w_4s,85.093,14.907,96.381,3.619,45.21,224,0.875,bilinear,+5.895,+1.949,-1 +tf_efficientnet_cc_b1_8e,85.063,14.937,96.422,3.578,39.72,240,0.882,bicubic,+5.755,+2.052,-14 +res2net50_26w_8s,85.029,14.971,96.419,3.580,48.40,224,0.875,bilinear,+5.831,+2.052,-4 +resnest26d,85.008,14.992,96.637,3.363,17.07,224,0.875,bilinear,+6.530,+2.339,+22 +gluon_resnext50_32x4d,84.995,15.005,96.426,3.574,25.03,224,0.875,bicubic,+5.641,+2.000,-20 +tf_efficientnet_b0_ns,84.984,15.016,96.503,3.497,5.29,224,0.875,bicubic,+6.326,+2.127,+14 +coat_tiny,84.976,15.024,96.409,3.591,5.50,224,0.900,bicubic,+6.542,+2.371,+23 +regnety_040,84.948,15.052,96.612,3.388,20.65,224,0.875,bicubic,+5.728,+1.956,-11 +dla169,84.920,15.080,96.535,3.465,53.39,224,0.875,bilinear,+6.232,+2.199,+9 +tf_efficientnet_b1,84.918,15.082,96.364,3.636,7.79,240,0.882,bicubic,+6.092,+2.166,+2 +legacy_seresnext50_32x4d,84.901,15.099,96.434,3.566,27.56,224,0.875,bilinear,+5.823,+1.998,-8 +hrnet_w44,84.884,15.116,96.434,3.566,67.06,224,0.875,bilinear,+5.988,+2.066,-2 +gluon_resnet50_v1s,84.862,15.138,96.443,3.557,25.68,224,0.875,bicubic,+6.150,+2.205,+4 +regnetx_080,84.862,15.138,96.434,3.566,39.57,224,0.875,bicubic,+5.668,+1.874,-13 +levit_128,84.843,15.157,96.360,3.640,9.21,224,0.900,bicubic,+6.357,+2.350,+9 +gluon_resnet50_v1d,84.832,15.168,96.398,3.602,25.58,224,0.875,bicubic,+5.758,+1.928,-12 +dla60_res2next,84.830,15.170,96.411,3.589,17.03,224,0.875,bilinear,+6.390,+2.259,+12 +vit_tiny_patch16_384,84.828,15.172,96.708,3.292,5.79,384,1.000,bicubic,+6.398,+2.166,+13 +mixnet_l,84.822,15.178,96.328,3.672,7.33,224,0.875,bicubic,+5.846,+2.146,-11 +tv_resnet152,84.815,15.185,96.225,3.775,60.19,224,0.875,bilinear,+6.503,+2.187,+17 +dla60_res2net,84.813,15.187,96.481,3.519,20.85,224,0.875,bilinear,+6.349,+2.275,+6 +dla102x,84.813,15.187,96.552,3.448,26.31,224,0.875,bilinear,+6.303,+2.324,+1 +pit_xs_224,84.792,15.208,96.492,3.508,10.62,224,0.900,bicubic,+6.610,+2.324,+19 +xception41,84.792,15.208,96.413,3.587,26.97,299,0.903,bicubic,+6.276,+2.135,-2 +regnetx_064,84.781,15.219,96.490,3.510,26.21,224,0.875,bicubic,+5.709,+2.032,-20 +hrnet_w40,84.743,15.257,96.554,3.446,57.56,224,0.875,bilinear,+5.823,+2.084,-17 +res2net50_26w_6s,84.726,15.274,96.281,3.719,37.05,224,0.875,bilinear,+6.156,+2.157,-6 +repvgg_b2,84.724,15.276,96.469,3.531,89.02,224,0.875,bilinear,+5.932,+2.055,-13 +resmlp_12_distilled_224,84.713,15.287,96.225,3.775,15.35,224,0.875,bicubic,+6.769,+2.667,+25 +legacy_seresnet152,84.704,15.296,96.417,3.583,66.82,224,0.875,bilinear,+6.044,+2.047,-11 +selecsls60b,84.657,15.343,96.300,3.700,32.77,224,0.875,bicubic,+6.245,+2.126,+1 +hrnet_w32,84.651,15.349,96.407,3.593,41.23,224,0.875,bilinear,+6.201,+2.221,-4 +tf_efficientnetv2_b0,84.625,15.375,96.274,3.726,7.14,224,0.875,bicubic,+6.269,+2.250,+3 +efficientnet_b1,84.608,15.392,96.332,3.668,7.79,256,1.000,bicubic,+5.814,+1.990,-20 +regnetx_040,84.600,15.400,96.383,3.617,22.12,224,0.875,bicubic,+6.118,+2.139,-10 +efficientnet_es,84.591,15.409,96.311,3.689,5.44,224,0.875,bicubic,+6.525,+2.385,+11 +hrnet_w30,84.572,15.428,96.388,3.612,37.71,224,0.875,bilinear,+6.366,+2.166,+4 +tf_mixnet_l,84.564,15.437,96.244,3.756,7.33,224,0.875,bicubic,+5.790,+2.246,-22 +wide_resnet101_2,84.557,15.443,96.349,3.651,126.89,224,0.875,bilinear,+5.701,+2.067,-28 +dla60x,84.523,15.477,96.285,3.715,17.35,224,0.875,bilinear,+6.277,+2.267,-2 +legacy_seresnet101,84.504,15.496,96.330,3.670,49.33,224,0.875,bilinear,+6.122,+2.066,-7 +tf_efficientnet_em,84.450,15.550,96.180,3.820,6.90,240,0.882,bicubic,+6.320,+2.136,+2 +coat_lite_tiny,84.450,15.550,96.368,3.632,5.72,224,0.900,bicubic,+6.938,+2.452,+27 +repvgg_b1,84.416,15.584,96.221,3.779,57.42,224,0.875,bilinear,+6.050,+2.123,-9 +efficientnet_b1_pruned,84.393,15.607,96.140,3.860,6.33,240,0.882,bicubic,+6.157,+2.306,-5 +res2net50_26w_4s,84.365,15.635,96.082,3.918,25.70,224,0.875,bilinear,+6.401,+2.228,+7 +hardcorenas_f,84.326,15.674,96.025,3.975,8.20,224,0.875,bilinear,+6.222,+2.222,-1 +res2net50_14w_8s,84.309,15.691,96.072,3.929,25.06,224,0.875,bilinear,+6.159,+2.224,-4 +selecsls60,84.288,15.712,96.095,3.905,30.67,224,0.875,bicubic,+6.306,+2.267,+3 +regnetx_032,84.237,15.763,96.247,3.753,15.30,224,0.875,bicubic,+6.065,+2.159,-7 +res2next50,84.226,15.774,95.997,4.003,24.67,224,0.875,bilinear,+5.980,+2.105,-12 +gluon_resnet50_v1c,84.207,15.793,96.161,3.839,25.58,224,0.875,bicubic,+6.195,+2.173,-2 +dla102,84.190,15.810,96.206,3.794,33.27,224,0.875,bilinear,+6.158,+2.260,-4 +rexnet_100,84.162,15.838,96.255,3.745,4.80,224,0.875,bicubic,+6.304,+2.385,+5 +tf_inception_v3,84.132,15.868,95.920,4.080,23.83,299,0.875,bicubic,+6.270,+2.280,+3 +res2net50_48w_2s,84.126,15.874,95.965,4.035,25.29,224,0.875,bilinear,+6.604,+2.411,+12 +resnet34d,84.098,15.902,95.978,4.022,21.82,224,0.875,bicubic,+6.982,+2.596,+23 +tf_efficientnet_lite2,84.094,15.906,96.069,3.931,6.09,260,0.890,bicubic,+6.626,+2.315,+12 +efficientnet_b0,84.038,15.962,95.956,4.044,5.29,224,0.875,bicubic,+6.340,+2.424,+2 +gmixer_24_224,83.968,16.032,95.849,4.151,24.72,224,0.875,bicubic,+5.932,+2.185,-12 +hardcorenas_e,83.968,16.032,95.898,4.101,8.07,224,0.875,bilinear,+6.174,+2.204,-1 +tf_efficientnet_cc_b0_8e,83.966,16.034,96.065,3.935,24.01,224,0.875,bicubic,+6.058,+2.411,-6 +tv_resnext50_32x4d,83.959,16.041,95.960,4.040,25.03,224,0.875,bilinear,+6.339,+2.264,0 +regnety_016,83.955,16.045,96.005,3.995,11.20,224,0.875,bicubic,+6.093,+2.285,-7 +gluon_resnet50_v1b,83.940,16.060,96.012,3.988,25.56,224,0.875,bicubic,+6.360,+2.296,+2 +densenet161,83.906,16.094,96.010,3.990,28.68,224,0.875,bicubic,+6.548,+2.372,+8 +adv_inception_v3,83.902,16.098,95.935,4.065,23.83,299,0.875,bicubic,+6.320,+2.199,-1 +mobilenetv2_120d,83.893,16.107,95.909,4.091,5.83,224,0.875,bicubic,+6.609,+2.417,+9 +seresnext26t_32x4d,83.878,16.122,95.931,4.069,16.81,224,0.875,bicubic,+5.892,+2.185,-18 +tv_resnet101,83.848,16.152,95.892,4.108,44.55,224,0.875,bilinear,+6.474,+2.352,+3 +inception_v3,83.761,16.239,95.879,4.121,23.83,299,0.875,bicubic,+6.323,+2.405,0 +hardcorenas_d,83.759,16.241,95.734,4.266,7.50,224,0.875,bilinear,+6.327,+2.250,0 +seresnext26d_32x4d,83.754,16.246,95.849,4.151,16.81,224,0.875,bicubic,+6.152,+2.241,-9 +dla60,83.729,16.271,95.933,4.067,22.04,224,0.875,bilinear,+6.697,+2.615,+10 +repvgg_b1g4,83.699,16.301,96.020,3.980,39.97,224,0.875,bilinear,+6.105,+2.194,-10 +legacy_seresnet50,83.662,16.337,95.973,4.027,28.09,224,0.875,bilinear,+6.032,+2.225,-14 +tf_efficientnet_b0_ap,83.650,16.350,95.779,4.221,5.29,224,0.875,bicubic,+6.564,+2.523,+5 +skresnet34,83.641,16.359,95.933,4.067,22.28,224,0.875,bicubic,+6.729,+2.611,+10 +tf_efficientnet_cc_b0_4e,83.639,16.361,95.740,4.260,13.31,224,0.875,bicubic,+6.333,+2.406,-4 +resmlp_12_224,83.571,16.429,95.760,4.240,15.35,224,0.875,bicubic,+6.917,+2.580,+13 +densenet201,83.556,16.444,95.811,4.189,20.01,224,0.875,bicubic,+6.270,+2.333,-5 +mobilenetv3_large_100_miil,83.556,16.444,95.452,4.548,5.48,224,0.875,bilinear,+5.640,+2.542,-27 +gernet_s,83.522,16.478,95.794,4.206,8.17,224,0.875,bilinear,+6.606,+2.662,+4 +legacy_seresnext26_32x4d,83.517,16.483,95.719,4.281,16.79,224,0.875,bicubic,+6.413,+2.403,-3 +mixnet_m,83.515,16.485,95.689,4.311,5.01,224,0.875,bicubic,+6.255,+2.265,-7 +tf_efficientnet_b0,83.515,16.485,95.719,4.281,5.29,224,0.875,bicubic,+6.667,+2.491,+3 +hrnet_w18,83.500,16.500,95.907,4.093,21.30,224,0.875,bilinear,+6.742,+2.463,+4 +densenetblur121d,83.472,16.527,95.822,4.178,8.00,224,0.875,bicubic,+6.885,+2.630,+9 +selecsls42b,83.457,16.543,95.745,4.255,32.46,224,0.875,bicubic,+6.283,+2.355,-10 +tf_efficientnet_lite1,83.344,16.656,95.642,4.358,5.42,240,0.882,bicubic,+6.702,+2.416,+4 +hardcorenas_c,83.342,16.658,95.706,4.294,5.52,224,0.875,bilinear,+6.288,+2.548,-8 +regnetx_016,83.195,16.805,95.740,4.260,9.19,224,0.875,bicubic,+6.245,+2.320,-7 +mobilenetv2_140,83.182,16.818,95.689,4.311,6.11,224,0.875,bicubic,+6.666,+2.693,+7 +dpn68,83.178,16.822,95.597,4.402,12.61,224,0.875,bicubic,+6.860,+2.620,+8 +tf_efficientnet_es,83.178,16.822,95.585,4.415,5.44,224,0.875,bicubic,+6.584,+2.383,+1 +tf_mixnet_m,83.176,16.824,95.461,4.539,5.01,224,0.875,bicubic,+6.234,+2.309,-10 +ese_vovnet19b_dw,83.109,16.890,95.779,4.221,6.54,224,0.875,bicubic,+6.311,+2.511,-7 +levit_128s,83.069,16.931,95.531,4.469,7.78,224,0.900,bicubic,+6.539,+2.665,+1 +resnet26d,83.050,16.950,95.604,4.396,16.01,224,0.875,bicubic,+6.354,+2.454,-7 +repvgg_a2,83.001,16.999,95.589,4.411,28.21,224,0.875,bilinear,+6.541,+2.585,+1 +tv_resnet50,82.958,17.042,95.467,4.533,25.56,224,0.875,bilinear,+6.820,+2.603,+3 +hardcorenas_b,82.873,17.128,95.392,4.607,5.18,224,0.875,bilinear,+6.335,+2.638,-4 +densenet121,82.823,17.177,95.585,4.415,7.98,224,0.875,bicubic,+7.245,+2.933,+10 +vit_tiny_r_s16_p8_384,82.691,17.309,95.845,4.155,6.36,384,1.000,bicubic,+6.739,+2.585,+3 +densenet169,82.683,17.317,95.600,4.400,14.15,224,0.875,bicubic,+6.776,+2.574,+4 +mixnet_s,82.525,17.476,95.356,4.644,4.13,224,0.875,bicubic,+6.532,+2.560,-1 +vit_small_patch32_224,82.514,17.486,95.670,4.330,22.88,224,0.900,bicubic,+6.524,+2.398,-1 +regnety_008,82.493,17.508,95.487,4.513,6.26,224,0.875,bicubic,+6.177,+2.421,-5 +efficientnet_lite0,82.382,17.619,95.279,4.721,4.65,224,0.875,bicubic,+6.898,+2.769,+7 +resnest14d,82.349,17.651,95.339,4.661,10.61,224,0.875,bilinear,+6.843,+2.821,+5 +hardcorenas_a,82.313,17.687,95.294,4.706,5.26,224,0.875,bilinear,+6.397,+2.780,-3 +efficientnet_es_pruned,82.296,17.704,95.303,4.697,5.44,224,0.875,bicubic,+7.296,+2.855,+15 +mobilenetv3_rw,82.275,17.725,95.234,4.766,5.48,224,0.875,bicubic,+6.641,+2.526,-1 +semnasnet_100,82.251,17.749,95.230,4.770,3.89,224,0.875,bicubic,+6.803,+2.626,+4 +mobilenetv3_large_100,82.177,17.823,95.196,4.804,5.48,224,0.875,bicubic,+6.410,+2.654,-5 +resnet34,82.138,17.862,95.130,4.870,21.80,224,0.875,bilinear,+7.028,+2.846,+8 +mobilenetv2_110d,82.070,17.930,95.076,4.923,4.52,224,0.875,bicubic,+7.034,+2.890,+9 +vit_tiny_patch16_224,82.066,17.934,95.489,4.511,5.72,224,0.900,bicubic,+6.612,+2.641,-1 +tf_mixnet_s,82.038,17.962,95.121,4.879,4.13,224,0.875,bicubic,+6.388,+2.493,-8 +repvgg_b0,82.001,17.999,95.100,4.900,15.82,224,0.875,bilinear,+6.849,+2.682,+1 +deit_tiny_distilled_patch16_224,81.997,18.003,95.141,4.859,5.91,224,0.900,bicubic,+7.487,+3.251,+14 +mixer_b16_224,81.978,18.022,94.449,5.551,59.88,224,0.875,bicubic,+5.376,+2.221,-27 +pit_ti_distilled_224,81.967,18.033,95.145,4.855,5.10,224,0.900,bicubic,+7.437,+3.049,+11 +hrnet_w18_small_v2,81.961,18.039,95.164,4.836,15.60,224,0.875,bilinear,+6.847,+2.748,-1 +tf_efficientnet_lite0,81.952,18.048,95.168,4.832,4.65,224,0.875,bicubic,+7.122,+2.992,+3 +resnet26,81.944,18.056,95.241,4.759,16.00,224,0.875,bicubic,+6.652,+2.671,-7 +tf_mobilenetv3_large_100,81.848,18.152,95.070,4.930,5.48,224,0.875,bilinear,+6.330,+2.464,-13 +tv_densenet121,81.726,18.274,95.034,4.966,7.98,224,0.875,bicubic,+6.988,+2.884,+2 +regnety_006,81.700,18.300,95.115,4.885,6.06,224,0.875,bicubic,+6.454,+2.583,-9 +dla34,81.658,18.342,94.878,5.122,15.74,224,0.875,bilinear,+7.028,+2.800,+2 +fbnetc_100,81.559,18.441,94.970,5.030,5.57,224,0.875,bilinear,+6.436,+2.584,-9 +legacy_seresnet34,81.534,18.466,94.899,5.101,21.96,224,0.875,bilinear,+6.726,+2.775,-3 +gluon_resnet34_v1b,81.500,18.500,94.810,5.190,21.80,224,0.875,bicubic,+6.912,+2.820,0 +regnetx_008,81.485,18.515,95.059,4.941,7.26,224,0.875,bicubic,+6.447,+2.724,-9 +mnasnet_100,81.459,18.541,94.899,5.101,4.38,224,0.875,bicubic,+6.801,+2.785,-4 +vgg19_bn,81.444,18.556,94.763,5.237,143.68,224,0.875,bilinear,+7.230,+2.921,0 +convit_tiny,81.126,18.874,95.044,4.955,5.71,224,0.875,bicubic,+8.010,+3.331,+8 +spnasnet_100,80.878,19.122,94.526,5.474,4.42,224,0.875,bilinear,+6.794,+2.708,-1 +ghostnet_100,80.699,19.301,94.291,5.709,5.18,224,0.875,bilinear,+6.721,+2.835,0 +regnety_004,80.659,19.341,94.686,5.314,4.34,224,0.875,bicubic,+6.624,+2.934,-2 +skresnet18,80.637,19.363,94.378,5.622,11.96,224,0.875,bicubic,+7.599,+3.210,+5 +regnetx_006,80.629,19.371,94.524,5.476,6.20,224,0.875,bicubic,+6.777,+2.852,-2 +pit_ti_224,80.605,19.395,94.618,5.383,4.85,224,0.900,bicubic,+7.693,+3.216,+5 +swsl_resnet18,80.575,19.425,94.743,5.256,11.69,224,0.875,bilinear,+7.299,+3.010,0 +vgg16_bn,80.556,19.444,94.592,5.408,138.37,224,0.875,bilinear,+7.206,+3.086,-3 +tv_resnet34,80.389,19.611,94.436,5.564,21.80,224,0.875,bilinear,+7.077,+3.010,-3 +resnet18d,80.387,19.613,94.252,5.748,11.71,224,0.875,bicubic,+8.127,+3.556,+6 +mobilenetv2_100,80.257,19.743,94.195,5.805,3.50,224,0.875,bicubic,+7.287,+3.179,-1 +ssl_resnet18,80.101,19.899,94.590,5.410,11.69,224,0.875,bilinear,+7.491,+3.174,0 +tf_mobilenetv3_large_075,80.093,19.907,94.184,5.816,3.99,224,0.875,bilinear,+6.655,+2.834,-9 +deit_tiny_patch16_224,80.018,19.982,94.449,5.551,5.72,224,0.900,bicubic,+7.850,+3.331,+4 +hrnet_w18_small,79.557,20.443,93.898,6.102,13.19,224,0.875,bilinear,+7.215,+3.220,0 +vgg19,79.480,20.520,93.870,6.130,143.67,224,0.875,bilinear,+7.112,+2.998,-2 +regnetx_004,79.435,20.565,93.853,6.147,5.16,224,0.875,bicubic,+7.039,+3.023,-4 +tf_mobilenetv3_large_minimal_100,79.222,20.778,93.706,6.294,3.92,224,0.875,bilinear,+6.974,+3.076,-1 +legacy_seresnet18,79.153,20.847,93.783,6.217,11.78,224,0.875,bicubic,+7.411,+3.449,+2 +vgg16,79.038,20.962,93.646,6.354,138.36,224,0.875,bilinear,+7.444,+3.264,+3 +vgg13_bn,79.006,20.994,93.655,6.345,133.05,224,0.875,bilinear,+7.412,+3.279,+1 +vit_tiny_r_s16_p8_224,78.991,21.009,93.902,6.098,6.34,224,0.900,bicubic,+7.203,+3.074,-2 +gluon_resnet18_v1b,78.372,21.628,93.138,6.862,11.69,224,0.875,bicubic,+7.536,+3.376,+1 +vgg11_bn,77.926,22.074,93.230,6.770,132.87,224,0.875,bilinear,+7.566,+3.428,+1 +regnety_002,77.405,22.595,92.914,7.086,3.16,224,0.875,bicubic,+7.153,+3.374,+1 +mixer_l16_224,77.285,22.715,90.582,9.418,208.20,224,0.875,bicubic,+5.227,+2.914,-7 +resnet18,77.276,22.724,92.756,7.244,11.69,224,0.875,bilinear,+7.528,+3.678,+1 +vgg13,77.230,22.770,92.689,7.311,133.05,224,0.875,bilinear,+7.303,+3.444,-1 +vgg11,76.384,23.616,92.154,7.846,132.86,224,0.875,bilinear,+7.360,+3.526,0 +regnetx_002,76.124,23.876,92.211,7.789,2.68,224,0.875,bicubic,+7.362,+3.655,0 +dla60x_c,75.637,24.363,92.177,7.823,1.32,224,0.875,bilinear,+7.745,+3.751,+1 +tf_mobilenetv3_small_100,74.717,25.283,91.257,8.743,2.54,224,0.875,bilinear,+6.795,+3.593,-1 +dla46x_c,73.647,26.353,91.095,8.905,1.07,224,0.875,bilinear,+7.677,+4.115,0 +tf_mobilenetv3_small_075,72.812,27.188,90.036,9.964,2.04,224,0.875,bilinear,+7.096,+3.906,0 +dla46_c,72.601,27.399,90.499,9.501,1.30,224,0.875,bilinear,+7.735,+4.207,0 +tf_mobilenetv3_small_minimal_100,70.111,29.889,88.505,11.495,2.04,224,0.875,bilinear,+7.205,+4.275,0 diff --git a/testbed/huggingface__pytorch-image-models/results/results-imagenet.csv b/testbed/huggingface__pytorch-image-models/results/results-imagenet.csv new file mode 100644 index 0000000000000000000000000000000000000000..a5081bf0b09ff0f3a725589ee10e4a65fabe3ca7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/results-imagenet.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation +tf_efficientnet_l2_ns,88.352,11.648,98.650,1.350,480.31,800,0.960,bicubic +tf_efficientnet_l2_ns_475,88.234,11.766,98.546,1.454,480.31,475,0.936,bicubic +swin_large_patch4_window12_384,87.148,12.852,98.234,1.766,196.74,384,1.000,bicubic +vit_large_patch16_384,87.080,12.920,98.300,1.700,304.72,384,1.000,bicubic +tf_efficientnet_b7_ns,86.840,13.160,98.094,1.906,66.35,600,0.949,bicubic +cait_m48_448,86.484,13.516,97.754,2.246,356.46,448,1.000,bicubic +tf_efficientnet_b6_ns,86.452,13.548,97.882,2.118,43.04,528,0.942,bicubic +swin_base_patch4_window12_384,86.432,13.568,98.058,1.942,87.90,384,1.000,bicubic +swin_large_patch4_window7_224,86.320,13.680,97.896,2.104,196.53,224,0.900,bicubic +tf_efficientnetv2_l_in21ft1k,86.304,13.696,97.978,2.022,118.52,480,1.000,bicubic +vit_large_r50_s32_384,86.184,13.816,97.918,2.082,329.09,384,1.000,bicubic +dm_nfnet_f6,86.144,13.856,97.730,2.270,438.36,576,0.956,bicubic +tf_efficientnet_b5_ns,86.088,13.912,97.752,2.248,30.39,456,0.934,bicubic +cait_m36_384,86.054,13.946,97.730,2.270,271.22,384,1.000,bicubic +vit_base_patch16_384,86.006,13.994,98.000,2.000,86.86,384,1.000,bicubic +vit_large_patch16_224,85.842,14.158,97.824,2.176,304.33,224,0.900,bicubic +dm_nfnet_f5,85.814,14.186,97.488,2.512,377.21,544,0.954,bicubic +dm_nfnet_f4,85.714,14.286,97.520,2.480,316.07,512,0.951,bicubic +tf_efficientnetv2_m_in21ft1k,85.588,14.412,97.752,2.248,54.14,480,1.000,bicubic +dm_nfnet_f3,85.522,14.478,97.462,2.538,254.92,416,0.940,bicubic +tf_efficientnetv2_l,85.490,14.510,97.372,2.628,118.52,480,1.000,bicubic +cait_s36_384,85.460,14.540,97.480,2.520,68.37,384,1.000,bicubic +ig_resnext101_32x48d,85.428,14.572,97.572,2.428,828.41,224,0.875,bilinear +deit_base_distilled_patch16_384,85.422,14.578,97.332,2.668,87.63,384,1.000,bicubic +tf_efficientnet_b8,85.370,14.630,97.390,2.610,87.41,672,0.954,bicubic +tf_efficientnet_b8_ap,85.370,14.630,97.294,2.706,87.41,672,0.954,bicubic +swin_base_patch4_window7_224,85.252,14.748,97.562,2.438,87.77,224,0.900,bicubic +tf_efficientnet_b4_ns,85.162,14.838,97.470,2.530,19.34,380,0.922,bicubic +tf_efficientnet_b7_ap,85.120,14.880,97.252,2.748,66.35,600,0.949,bicubic +ig_resnext101_32x32d,85.094,14.906,97.438,2.562,468.53,224,0.875,bilinear +dm_nfnet_f2,85.064,14.936,97.240,2.760,193.78,352,0.920,bicubic +cait_s24_384,85.046,14.954,97.346,2.654,47.06,384,1.000,bicubic +tf_efficientnetv2_m,85.044,14.956,97.278,2.722,54.14,480,1.000,bicubic +resnetrs420,85.008,14.992,97.124,2.876,191.89,416,1.000,bicubic +ecaresnet269d,84.976,15.024,97.226,2.774,102.09,352,1.000,bicubic +vit_base_r50_s16_384,84.972,15.028,97.288,2.712,98.95,384,1.000,bicubic +tf_efficientnet_b7,84.936,15.064,97.204,2.796,66.35,600,0.949,bicubic +resnetv2_152x4_bitm,84.916,15.084,97.442,2.558,936.53,480,1.000,bilinear +efficientnetv2_rw_m,84.808,15.192,97.148,2.852,53.24,416,1.000,bicubic +tf_efficientnet_b6_ap,84.788,15.212,97.138,2.862,43.04,528,0.942,bicubic +resnetrs350,84.720,15.280,96.988,3.012,163.96,384,1.000,bicubic +eca_nfnet_l2,84.698,15.302,97.264,2.736,56.72,384,1.000,bicubic +dm_nfnet_f1,84.626,15.374,97.100,2.900,132.63,320,0.910,bicubic +vit_base_patch16_224,84.532,15.468,97.294,2.706,86.57,224,0.900,bicubic +resnest269e,84.518,15.482,96.986,3.014,110.93,416,0.928,bicubic +resnetv2_152x2_bitm,84.510,15.490,97.432,2.568,236.34,448,1.000,bilinear +resnetv2_101x3_bitm,84.440,15.560,97.382,2.618,387.93,448,1.000,bilinear +resnetrs270,84.434,15.566,96.970,3.030,129.86,352,1.000,bicubic +vit_large_r50_s32_224,84.434,15.566,97.164,2.836,328.99,224,0.900,bicubic +resmlp_big_24_224_in22ft1k,84.394,15.606,97.120,2.880,129.14,224,0.875,bicubic +seresnet152d,84.362,15.638,97.040,2.960,66.84,320,1.000,bicubic +tf_efficientnetv2_s_in21ft1k,84.302,15.698,97.252,2.748,21.46,384,1.000,bicubic +swsl_resnext101_32x8d,84.284,15.716,97.176,2.824,88.79,224,0.875,bilinear +vit_base_patch16_224_miil,84.268,15.732,96.802,3.198,86.54,224,0.875,bilinear +tf_efficientnet_b5_ap,84.252,15.748,96.974,3.026,30.39,456,0.934,bicubic +ig_resnext101_32x16d,84.170,15.830,97.196,2.804,194.03,224,0.875,bilinear +pit_b_distilled_224,84.144,15.856,96.856,3.144,74.79,224,0.900,bicubic +tf_efficientnet_b6,84.110,15.890,96.886,3.114,43.04,528,0.942,bicubic +resnetrs200,84.066,15.934,96.874,3.126,93.21,320,1.000,bicubic +cait_xs24_384,84.062,15.938,96.888,3.112,26.67,384,1.000,bicubic +tf_efficientnet_b3_ns,84.048,15.952,96.910,3.090,12.23,300,0.904,bicubic +vit_small_r26_s32_384,84.046,15.954,97.328,2.672,36.47,384,1.000,bicubic +resnetv2_50x3_bitm,84.014,15.986,97.124,2.876,217.32,448,1.000,bilinear +eca_nfnet_l1,84.010,15.990,97.028,2.972,41.41,320,1.000,bicubic +resnet200d,83.962,16.038,96.824,3.176,64.69,320,1.000,bicubic +tf_efficientnetv2_s,83.894,16.106,96.698,3.302,21.46,384,1.000,bicubic +resnetv2_152x2_bit_teacher_384,83.844,16.156,97.118,2.882,236.34,384,1.000,bicubic +resnest200e,83.832,16.168,96.894,3.106,70.20,320,0.909,bicubic +tf_efficientnet_b5,83.812,16.188,96.748,3.252,30.39,456,0.934,bicubic +efficientnetv2_rw_s,83.808,16.192,96.724,3.276,23.94,384,1.000,bicubic +vit_small_patch16_384,83.802,16.198,97.102,2.898,22.20,384,1.000,bicubic +resnetrs152,83.712,16.288,96.614,3.386,86.62,320,1.000,bicubic +regnety_160,83.686,16.314,96.776,3.224,83.59,288,1.000,bicubic +resnet152d,83.680,16.320,96.738,3.262,60.21,320,1.000,bicubic +twins_svt_large,83.678,16.322,96.594,3.406,99.27,224,0.900,bicubic +resmlp_big_24_distilled_224,83.590,16.410,96.648,3.352,129.14,224,0.875,bicubic +cait_s24_224,83.452,16.548,96.564,3.436,46.92,224,1.000,bicubic +efficientnet_b4,83.428,16.572,96.596,3.404,19.34,384,1.000,bicubic +deit_base_distilled_patch16_224,83.388,16.612,96.488,3.512,87.34,224,0.900,bicubic +dm_nfnet_f0,83.386,16.614,96.572,3.428,71.49,256,0.900,bicubic +vit_base_patch32_384,83.350,16.650,96.836,3.164,88.30,384,1.000,bicubic +swsl_resnext101_32x16d,83.346,16.654,96.846,3.154,194.03,224,0.875,bilinear +tf_efficientnet_b4_ap,83.248,16.752,96.392,3.608,19.34,380,0.922,bicubic +swsl_resnext101_32x4d,83.230,16.770,96.760,3.240,44.18,224,0.875,bilinear +swin_small_patch4_window7_224,83.212,16.788,96.322,3.678,49.61,224,0.900,bicubic +twins_pcpvt_large,83.140,16.860,96.598,3.402,60.99,224,0.900,bicubic +twins_svt_base,83.136,16.864,96.418,3.582,56.07,224,0.900,bicubic +deit_base_patch16_384,83.106,16.894,96.372,3.628,86.86,384,1.000,bicubic +tresnet_m,83.080,16.920,96.118,3.882,31.39,224,0.875,bilinear +tresnet_xl_448,83.050,16.950,96.174,3.826,78.44,448,0.875,bilinear +resnet101d,83.022,16.978,96.446,3.554,44.57,320,1.000,bicubic +tf_efficientnet_b4,83.022,16.978,96.300,3.700,19.34,380,0.922,bicubic +resnest101e,82.890,17.110,96.320,3.680,48.28,256,0.875,bilinear +resnetv2_152x2_bit_teacher,82.862,17.138,96.568,3.432,236.34,224,0.875,bicubic +resnetv2_50x1_bit_distilled,82.818,17.182,96.522,3.478,25.55,224,0.875,bicubic +pnasnet5large,82.782,17.218,96.040,3.960,86.06,331,0.911,bicubic +nfnet_l0,82.750,17.250,96.516,3.484,35.07,288,1.000,bicubic +regnety_032,82.724,17.276,96.424,3.576,19.44,288,1.000,bicubic +twins_pcpvt_base,82.708,17.292,96.346,3.654,43.83,224,0.900,bicubic +ig_resnext101_32x8d,82.688,17.312,96.636,3.364,88.79,224,0.875,bilinear +nasnetalarge,82.620,17.380,96.046,3.954,88.75,331,0.911,bicubic +levit_384,82.586,17.414,96.016,3.984,39.13,224,0.900,bicubic +eca_nfnet_l0,82.580,17.420,96.490,3.510,24.14,288,1.000,bicubic +pit_b_224,82.446,17.554,95.710,4.290,73.76,224,0.900,bicubic +tf_efficientnet_b2_ns,82.380,17.620,96.248,3.752,9.11,260,0.890,bicubic +resnet51q,82.360,17.640,96.180,3.820,35.70,288,1.000,bilinear +ecaresnet50t,82.346,17.654,96.138,3.862,25.57,320,0.950,bicubic +resnetv2_101x1_bitm,82.332,17.668,96.518,3.482,44.54,448,1.000,bilinear +coat_lite_small,82.308,17.692,95.850,4.150,19.84,224,0.900,bicubic +mixer_b16_224_miil,82.308,17.692,95.716,4.284,59.88,224,0.875,bilinear +convit_base,82.290,17.710,95.938,4.062,86.54,224,0.875,bicubic +resnetrs101,82.288,17.712,96.008,3.992,63.62,288,0.940,bicubic +tresnet_l_448,82.268,17.732,95.976,4.024,55.99,448,0.875,bilinear +efficientnet_b3,82.242,17.758,96.114,3.886,12.23,320,1.000,bicubic +cait_xxs36_384,82.194,17.806,96.148,3.852,17.37,384,1.000,bicubic +swsl_resnext50_32x4d,82.182,17.818,96.230,3.770,25.03,224,0.875,bilinear +ecaresnet101d,82.172,17.828,96.046,3.954,44.57,224,0.875,bicubic +visformer_small,82.106,17.894,95.872,4.128,40.22,224,0.900,bicubic +tresnet_xl,82.054,17.946,95.936,4.064,78.44,224,0.875,bilinear +deit_base_patch16_224,81.998,18.002,95.734,4.266,86.57,224,0.900,bicubic +pit_s_distilled_224,81.996,18.004,95.798,4.202,24.04,224,0.900,bicubic +tf_efficientnetv2_b3,81.970,18.030,95.782,4.218,14.36,300,0.904,bicubic +vit_small_r26_s32_224,81.858,18.142,96.022,3.978,36.43,224,0.900,bicubic +ssl_resnext101_32x16d,81.844,18.156,96.096,3.904,194.03,224,0.875,bilinear +tf_efficientnet_b3_ap,81.822,18.178,95.624,4.376,12.23,300,0.904,bicubic +tresnet_m_448,81.714,18.286,95.572,4.428,31.39,448,0.875,bilinear +twins_svt_small,81.682,18.318,95.670,4.330,24.06,224,0.900,bicubic +tf_efficientnet_b3,81.636,18.364,95.718,4.282,12.23,300,0.904,bicubic +rexnet_200,81.632,18.368,95.668,4.332,16.37,224,0.875,bicubic +ssl_resnext101_32x8d,81.616,18.384,96.038,3.962,88.79,224,0.875,bilinear +tf_efficientnet_lite4,81.536,18.464,95.668,4.332,13.01,380,0.920,bilinear +tnt_s_patch16_224,81.518,18.482,95.748,4.252,23.76,224,0.900,bicubic +levit_256,81.510,18.490,95.490,4.510,18.89,224,0.900,bicubic +vit_large_patch32_384,81.506,18.494,96.092,3.908,306.63,384,1.000,bicubic +tresnet_l,81.490,18.510,95.624,4.376,55.99,224,0.875,bilinear +wide_resnet50_2,81.456,18.544,95.532,4.468,68.88,224,0.875,bicubic +convit_small,81.426,18.574,95.744,4.256,27.78,224,0.875,bicubic +vit_small_patch16_224,81.402,18.598,96.134,3.866,22.05,224,0.900,bicubic +tf_efficientnet_b1_ns,81.388,18.612,95.738,4.262,7.79,240,0.882,bicubic +swin_tiny_patch4_window7_224,81.378,18.622,95.540,4.460,28.29,224,0.900,bicubic +gernet_l,81.354,18.646,95.536,4.464,31.08,256,0.875,bilinear +efficientnet_el,81.316,18.684,95.526,4.474,10.59,300,0.904,bicubic +legacy_senet154,81.310,18.690,95.496,4.504,115.09,224,0.875,bilinear +coat_mini,81.268,18.732,95.392,4.608,10.34,224,0.900,bicubic +seresnext50_32x4d,81.266,18.734,95.620,4.380,27.56,224,0.875,bicubic +gluon_senet154,81.234,18.766,95.348,4.652,115.09,224,0.875,bicubic +deit_small_distilled_patch16_224,81.200,18.800,95.378,4.622,22.44,224,0.900,bicubic +swsl_resnet50,81.166,18.834,95.972,4.028,25.56,224,0.875,bilinear +resmlp_36_distilled_224,81.160,18.840,95.488,4.512,44.69,224,0.875,bicubic +resnest50d_4s2x40d,81.108,18.892,95.558,4.442,30.42,224,0.875,bicubic +pit_s_224,81.094,18.906,95.332,4.668,23.46,224,0.900,bicubic +twins_pcpvt_small,81.088,18.912,95.642,4.358,24.11,224,0.900,bicubic +resmlp_big_24_224,81.028,18.972,95.022,4.978,129.14,224,0.875,bicubic +gluon_resnet152_v1s,81.016,18.984,95.412,4.588,60.32,224,0.875,bicubic +resnest50d_1s4x24d,80.988,19.012,95.322,4.678,25.68,224,0.875,bicubic +resnest50d,80.974,19.026,95.378,4.622,27.48,224,0.875,bilinear +cait_xxs24_384,80.966,19.034,95.646,4.354,12.03,384,1.000,bicubic +ssl_resnext101_32x4d,80.924,19.076,95.728,4.272,44.18,224,0.875,bilinear +gluon_seresnext101_32x4d,80.904,19.096,95.294,4.706,48.96,224,0.875,bicubic +gluon_seresnext101_64x4d,80.894,19.106,95.308,4.692,88.23,224,0.875,bicubic +efficientnet_b3_pruned,80.858,19.142,95.242,4.758,9.86,300,0.904,bicubic +ecaresnet101d_pruned,80.818,19.182,95.628,4.372,24.88,224,0.875,bicubic +regnety_320,80.812,19.188,95.244,4.756,145.05,224,0.875,bicubic +resmlp_24_distilled_224,80.766,19.234,95.218,4.782,30.02,224,0.875,bicubic +gernet_m,80.732,19.268,95.184,4.816,21.14,224,0.875,bilinear +vit_base_patch32_224,80.724,19.276,95.568,4.432,88.22,224,0.900,bicubic +nf_resnet50,80.660,19.340,95.336,4.664,25.56,288,0.940,bicubic +efficientnet_b2,80.612,19.388,95.318,4.682,9.11,288,1.000,bicubic +gluon_resnext101_64x4d,80.604,19.396,94.988,5.012,83.46,224,0.875,bicubic +ecaresnet50d,80.592,19.408,95.320,4.680,25.58,224,0.875,bicubic +resnet50d,80.530,19.470,95.160,4.840,25.58,224,0.875,bicubic +repvgg_b3,80.492,19.508,95.260,4.740,123.09,224,0.875,bilinear +vit_small_patch32_384,80.480,19.520,95.598,4.402,22.92,384,1.000,bicubic +mixnet_xl,80.476,19.524,94.936,5.064,11.90,224,0.875,bicubic +gluon_resnet152_v1d,80.474,19.526,95.206,4.794,60.21,224,0.875,bicubic +ecaresnetlight,80.462,19.538,95.250,4.750,30.16,224,0.875,bicubic +inception_resnet_v2,80.458,19.542,95.306,4.694,55.84,299,0.897,bicubic +gluon_resnet101_v1d,80.414,19.586,95.014,4.986,44.57,224,0.875,bicubic +regnety_120,80.366,19.634,95.126,4.874,51.82,224,0.875,bicubic +resnetv2_50x1_bitm,80.342,19.658,95.684,4.316,25.55,448,1.000,bilinear +gluon_resnext101_32x4d,80.334,19.666,94.926,5.074,44.18,224,0.875,bicubic +ssl_resnext50_32x4d,80.318,19.682,95.406,4.594,25.03,224,0.875,bilinear +rexnet_150,80.310,19.690,95.166,4.834,9.73,224,0.875,bicubic +gluon_resnet101_v1s,80.302,19.698,95.160,4.840,44.67,224,0.875,bicubic +tf_efficientnet_b2_ap,80.300,19.700,95.028,4.972,9.11,260,0.890,bicubic +efficientnet_el_pruned,80.300,19.700,95.218,4.782,10.59,300,0.904,bicubic +seresnet50,80.274,19.726,95.070,4.930,28.09,224,0.875,bicubic +tf_efficientnet_el,80.250,19.750,95.128,4.872,10.59,300,0.904,bicubic +regnetx_320,80.246,19.754,95.026,4.974,107.81,224,0.875,bicubic +legacy_seresnext101_32x4d,80.228,19.772,95.018,4.982,48.96,224,0.875,bilinear +repvgg_b3g4,80.212,19.788,95.110,4.890,83.83,224,0.875,bilinear +tf_efficientnetv2_b2,80.208,19.792,95.042,4.958,10.10,260,0.890,bicubic +inception_v4,80.168,19.832,94.968,5.032,42.68,299,0.875,bicubic +dpn107,80.156,19.844,94.910,5.090,86.92,224,0.875,bicubic +skresnext50_32x4d,80.156,19.844,94.642,5.358,27.48,224,0.875,bicubic +tf_efficientnet_b2,80.086,19.914,94.908,5.092,9.11,260,0.890,bicubic +cspdarknet53,80.058,19.942,95.084,4.916,27.64,256,0.887,bilinear +cspresnext50,80.040,19.960,94.944,5.056,20.57,224,0.875,bilinear +dpn92,80.008,19.992,94.836,5.164,37.67,224,0.875,bicubic +ens_adv_inception_resnet_v2,79.982,20.018,94.938,5.062,55.84,299,0.897,bicubic +gluon_seresnext50_32x4d,79.918,20.082,94.822,5.178,27.56,224,0.875,bicubic +efficientnet_b2_pruned,79.916,20.084,94.856,5.144,8.31,260,0.890,bicubic +gluon_resnet152_v1c,79.910,20.090,94.840,5.160,60.21,224,0.875,bicubic +resnetrs50,79.892,20.108,94.968,5.032,35.69,224,0.910,bicubic +regnety_080,79.876,20.124,94.830,5.170,39.18,224,0.875,bicubic +xception71,79.874,20.126,94.922,5.078,42.34,299,0.903,bicubic +deit_small_patch16_224,79.856,20.144,95.052,4.948,22.05,224,0.900,bicubic +regnetx_160,79.856,20.144,94.830,5.170,54.28,224,0.875,bicubic +ecaresnet26t,79.854,20.146,95.084,4.916,16.01,320,0.950,bicubic +levit_192,79.842,20.158,94.786,5.214,10.95,224,0.900,bicubic +dpn131,79.822,20.178,94.710,5.290,79.25,224,0.875,bicubic +tf_efficientnet_lite3,79.820,20.180,94.914,5.086,8.20,300,0.904,bilinear +resmlp_36_224,79.770,20.230,94.886,5.114,44.69,224,0.875,bicubic +resnext50_32x4d,79.768,20.232,94.598,5.402,25.03,224,0.875,bicubic +cait_xxs36_224,79.750,20.250,94.866,5.134,17.30,224,1.000,bicubic +regnety_064,79.722,20.278,94.768,5.232,30.58,224,0.875,bicubic +ecaresnet50d_pruned,79.716,20.284,94.880,5.120,19.94,224,0.875,bicubic +gluon_xception65,79.716,20.284,94.860,5.140,39.92,299,0.903,bicubic +gluon_resnet152_v1b,79.686,20.314,94.736,5.264,60.19,224,0.875,bicubic +resnext50d_32x4d,79.676,20.324,94.866,5.134,25.05,224,0.875,bicubic +dpn98,79.642,20.358,94.598,5.402,61.57,224,0.875,bicubic +regnetx_120,79.596,20.404,94.738,5.262,46.11,224,0.875,bicubic +cspresnet50,79.574,20.426,94.712,5.288,21.62,256,0.887,bilinear +xception65,79.552,20.448,94.654,5.346,39.92,299,0.903,bicubic +gluon_resnet101_v1c,79.534,20.466,94.578,5.422,44.57,224,0.875,bicubic +rexnet_130,79.500,20.500,94.682,5.318,7.56,224,0.875,bicubic +hrnet_w64,79.474,20.526,94.652,5.348,128.06,224,0.875,bilinear +tf_efficientnetv2_b1,79.462,20.538,94.722,5.278,8.14,240,0.882,bicubic +dla102x2,79.448,20.552,94.640,5.360,41.28,224,0.875,bilinear +resmlp_24_224,79.374,20.626,94.546,5.454,30.02,224,0.875,bicubic +repvgg_b2g4,79.366,20.634,94.688,5.312,61.76,224,0.875,bilinear +gluon_resnext50_32x4d,79.354,20.646,94.426,5.574,25.03,224,0.875,bicubic +ese_vovnet39b,79.320,20.680,94.712,5.288,24.57,224,0.875,bicubic +resnext101_32x8d,79.308,20.692,94.518,5.482,88.79,224,0.875,bilinear +tf_efficientnet_cc_b1_8e,79.308,20.692,94.370,5.630,39.72,240,0.882,bicubic +gluon_resnet101_v1b,79.306,20.694,94.524,5.476,44.55,224,0.875,bicubic +pit_xs_distilled_224,79.306,20.694,94.364,5.636,11.00,224,0.900,bicubic +hrnet_w48,79.300,20.700,94.512,5.488,77.47,224,0.875,bilinear +nf_regnet_b1,79.292,20.708,94.748,5.252,10.22,288,0.900,bicubic +resnetblur50,79.286,20.714,94.638,5.362,25.56,224,0.875,bicubic +tf_efficientnet_b1_ap,79.280,20.720,94.306,5.694,7.79,240,0.882,bicubic +efficientnet_em,79.252,20.748,94.794,5.206,6.90,240,0.882,bicubic +ssl_resnet50,79.222,20.778,94.832,5.168,25.56,224,0.875,bilinear +regnety_040,79.220,20.780,94.656,5.344,20.65,224,0.875,bicubic +dpn68b,79.216,20.784,94.414,5.586,12.61,224,0.875,bicubic +res2net50_26w_8s,79.198,20.802,94.368,5.632,48.40,224,0.875,bilinear +res2net101_26w_4s,79.198,20.802,94.432,5.568,45.21,224,0.875,bilinear +regnetx_080,79.194,20.806,94.560,5.440,39.57,224,0.875,bicubic +coat_lite_mini,79.088,20.912,94.604,5.396,11.01,224,0.900,bicubic +legacy_seresnext50_32x4d,79.078,20.922,94.436,5.564,27.56,224,0.875,bilinear +gluon_resnet50_v1d,79.074,20.926,94.470,5.530,25.58,224,0.875,bicubic +regnetx_064,79.072,20.928,94.458,5.542,26.21,224,0.875,bicubic +xception,79.052,20.948,94.392,5.608,22.86,299,0.897,bicubic +resnet50,79.038,20.962,94.390,5.610,25.56,224,0.875,bicubic +mixnet_l,78.976,21.024,94.182,5.818,7.33,224,0.875,bicubic +hrnet_w40,78.920,21.080,94.470,5.530,57.56,224,0.875,bilinear +hrnet_w44,78.896,21.104,94.368,5.632,67.06,224,0.875,bilinear +wide_resnet101_2,78.856,21.144,94.282,5.718,126.89,224,0.875,bilinear +tf_efficientnet_b1,78.826,21.174,94.198,5.802,7.79,240,0.882,bicubic +gluon_inception_v3,78.806,21.194,94.370,5.630,23.83,299,0.875,bicubic +efficientnet_b1,78.794,21.206,94.342,5.658,7.79,256,1.000,bicubic +repvgg_b2,78.792,21.208,94.414,5.586,89.02,224,0.875,bilinear +tf_mixnet_l,78.774,21.226,93.998,6.002,7.33,224,0.875,bicubic +gluon_resnet50_v1s,78.712,21.288,94.238,5.762,25.68,224,0.875,bicubic +dla169,78.688,21.312,94.336,5.664,53.39,224,0.875,bilinear +legacy_seresnet152,78.660,21.340,94.370,5.630,66.82,224,0.875,bilinear +tf_efficientnet_b0_ns,78.658,21.342,94.376,5.624,5.29,224,0.875,bicubic +res2net50_26w_6s,78.570,21.430,94.124,5.876,37.05,224,0.875,bilinear +xception41,78.516,21.484,94.278,5.722,26.97,299,0.903,bicubic +dla102x,78.510,21.490,94.228,5.772,26.31,224,0.875,bilinear +levit_128,78.486,21.514,94.010,5.990,9.21,224,0.900,bicubic +regnetx_040,78.482,21.518,94.244,5.756,22.12,224,0.875,bicubic +resnest26d,78.478,21.522,94.298,5.702,17.07,224,0.875,bilinear +dla60_res2net,78.464,21.536,94.206,5.794,20.85,224,0.875,bilinear +hrnet_w32,78.450,21.550,94.186,5.814,41.23,224,0.875,bilinear +dla60_res2next,78.440,21.560,94.152,5.848,17.03,224,0.875,bilinear +coat_tiny,78.434,21.566,94.038,5.962,5.50,224,0.900,bicubic +vit_tiny_patch16_384,78.430,21.570,94.542,5.458,5.79,384,1.000,bicubic +selecsls60b,78.412,21.588,94.174,5.826,32.77,224,0.875,bicubic +cait_xxs24_224,78.386,21.614,94.310,5.690,11.96,224,1.000,bicubic +legacy_seresnet101,78.382,21.618,94.264,5.736,49.33,224,0.875,bilinear +repvgg_b1,78.366,21.634,94.098,5.902,57.42,224,0.875,bilinear +tf_efficientnetv2_b0,78.356,21.644,94.024,5.976,7.14,224,0.875,bicubic +tv_resnet152,78.312,21.688,94.038,5.962,60.19,224,0.875,bilinear +dla60x,78.246,21.754,94.018,5.982,17.35,224,0.875,bilinear +res2next50,78.246,21.754,93.892,6.108,24.67,224,0.875,bilinear +efficientnet_b1_pruned,78.236,21.764,93.834,6.166,6.33,240,0.882,bicubic +hrnet_w30,78.206,21.794,94.222,5.778,37.71,224,0.875,bilinear +pit_xs_224,78.182,21.818,94.168,5.832,10.62,224,0.900,bicubic +regnetx_032,78.172,21.828,94.088,5.912,15.30,224,0.875,bicubic +res2net50_14w_8s,78.150,21.850,93.848,6.152,25.06,224,0.875,bilinear +tf_efficientnet_em,78.130,21.870,94.044,5.956,6.90,240,0.882,bicubic +hardcorenas_f,78.104,21.896,93.802,6.198,8.20,224,0.875,bilinear +efficientnet_es,78.066,21.934,93.926,6.074,5.44,224,0.875,bicubic +gmixer_24_224,78.036,21.964,93.664,6.336,24.72,224,0.875,bicubic +dla102,78.032,21.968,93.946,6.054,33.27,224,0.875,bilinear +gluon_resnet50_v1c,78.012,21.988,93.988,6.012,25.58,224,0.875,bicubic +seresnext26t_32x4d,77.986,22.014,93.746,6.254,16.81,224,0.875,bicubic +selecsls60,77.982,22.018,93.828,6.172,30.67,224,0.875,bicubic +res2net50_26w_4s,77.964,22.036,93.854,6.146,25.70,224,0.875,bilinear +resmlp_12_distilled_224,77.944,22.056,93.558,6.442,15.35,224,0.875,bicubic +mobilenetv3_large_100_miil,77.916,22.084,92.910,7.090,5.48,224,0.875,bilinear +tf_efficientnet_cc_b0_8e,77.908,22.092,93.654,6.346,24.01,224,0.875,bicubic +regnety_016,77.862,22.138,93.720,6.280,11.20,224,0.875,bicubic +tf_inception_v3,77.862,22.138,93.640,6.360,23.83,299,0.875,bicubic +rexnet_100,77.858,22.142,93.870,6.130,4.80,224,0.875,bicubic +hardcorenas_e,77.794,22.206,93.694,6.306,8.07,224,0.875,bilinear +efficientnet_b0,77.698,22.302,93.532,6.468,5.29,224,0.875,bicubic +legacy_seresnet50,77.630,22.370,93.748,6.252,28.09,224,0.875,bilinear +tv_resnext50_32x4d,77.620,22.380,93.696,6.304,25.03,224,0.875,bilinear +seresnext26d_32x4d,77.602,22.398,93.608,6.392,16.81,224,0.875,bicubic +repvgg_b1g4,77.594,22.406,93.826,6.174,39.97,224,0.875,bilinear +adv_inception_v3,77.582,22.418,93.736,6.264,23.83,299,0.875,bicubic +gluon_resnet50_v1b,77.580,22.420,93.716,6.284,25.56,224,0.875,bicubic +res2net50_48w_2s,77.522,22.478,93.554,6.446,25.29,224,0.875,bilinear +coat_lite_tiny,77.512,22.488,93.916,6.084,5.72,224,0.900,bicubic +tf_efficientnet_lite2,77.468,22.532,93.754,6.246,6.09,260,0.890,bicubic +inception_v3,77.438,22.562,93.474,6.526,23.83,299,0.875,bicubic +hardcorenas_d,77.432,22.568,93.484,6.516,7.50,224,0.875,bilinear +tv_resnet101,77.374,22.626,93.540,6.460,44.55,224,0.875,bilinear +densenet161,77.358,22.642,93.638,6.362,28.68,224,0.875,bicubic +tf_efficientnet_cc_b0_4e,77.306,22.694,93.334,6.666,13.31,224,0.875,bicubic +densenet201,77.286,22.714,93.478,6.522,20.01,224,0.875,bicubic +mobilenetv2_120d,77.284,22.716,93.492,6.508,5.83,224,0.875,bicubic +mixnet_m,77.260,22.740,93.424,6.576,5.01,224,0.875,bicubic +selecsls42b,77.174,22.826,93.390,6.610,32.46,224,0.875,bicubic +resnet34d,77.116,22.884,93.382,6.618,21.82,224,0.875,bicubic +legacy_seresnext26_32x4d,77.104,22.896,93.316,6.684,16.79,224,0.875,bicubic +tf_efficientnet_b0_ap,77.086,22.914,93.256,6.744,5.29,224,0.875,bicubic +hardcorenas_c,77.054,22.946,93.158,6.842,5.52,224,0.875,bilinear +dla60,77.032,22.968,93.318,6.682,22.04,224,0.875,bilinear +regnetx_016,76.950,23.050,93.420,6.580,9.19,224,0.875,bicubic +tf_mixnet_m,76.942,23.058,93.152,6.848,5.01,224,0.875,bicubic +gernet_s,76.916,23.084,93.132,6.868,8.17,224,0.875,bilinear +skresnet34,76.912,23.088,93.322,6.678,22.28,224,0.875,bicubic +tf_efficientnet_b0,76.848,23.152,93.228,6.772,5.29,224,0.875,bicubic +ese_vovnet19b_dw,76.798,23.202,93.268,6.732,6.54,224,0.875,bicubic +hrnet_w18,76.758,23.242,93.444,6.556,21.30,224,0.875,bilinear +resnet26d,76.696,23.304,93.150,6.850,16.01,224,0.875,bicubic +resmlp_12_224,76.654,23.346,93.180,6.820,15.35,224,0.875,bicubic +tf_efficientnet_lite1,76.642,23.358,93.226,6.774,5.42,240,0.882,bicubic +mixer_b16_224,76.602,23.398,92.228,7.772,59.88,224,0.875,bicubic +tf_efficientnet_es,76.594,23.406,93.202,6.798,5.44,224,0.875,bicubic +densenetblur121d,76.588,23.412,93.192,6.808,8.00,224,0.875,bicubic +hardcorenas_b,76.538,23.462,92.754,7.246,5.18,224,0.875,bilinear +levit_128s,76.530,23.470,92.866,7.134,7.78,224,0.900,bicubic +mobilenetv2_140,76.516,23.484,92.996,7.004,6.11,224,0.875,bicubic +repvgg_a2,76.460,23.540,93.004,6.996,28.21,224,0.875,bilinear +dpn68,76.318,23.682,92.978,7.022,12.61,224,0.875,bicubic +regnety_008,76.316,23.684,93.066,6.934,6.26,224,0.875,bicubic +tv_resnet50,76.138,23.862,92.864,7.136,25.56,224,0.875,bilinear +mixnet_s,75.992,24.008,92.796,7.204,4.13,224,0.875,bicubic +vit_small_patch32_224,75.990,24.010,93.272,6.728,22.88,224,0.900,bicubic +vit_tiny_r_s16_p8_384,75.952,24.048,93.260,6.740,6.36,384,1.000,bicubic +hardcorenas_a,75.916,24.084,92.514,7.486,5.26,224,0.875,bilinear +densenet169,75.906,24.094,93.026,6.974,14.15,224,0.875,bicubic +mobilenetv3_large_100,75.766,24.234,92.542,7.458,5.48,224,0.875,bicubic +tf_mixnet_s,75.650,24.350,92.628,7.372,4.13,224,0.875,bicubic +mobilenetv3_rw,75.634,24.366,92.708,7.292,5.48,224,0.875,bicubic +densenet121,75.578,24.422,92.652,7.348,7.98,224,0.875,bicubic +tf_mobilenetv3_large_100,75.518,24.482,92.606,7.394,5.48,224,0.875,bilinear +resnest14d,75.506,24.494,92.518,7.482,10.61,224,0.875,bilinear +efficientnet_lite0,75.484,24.516,92.510,7.490,4.65,224,0.875,bicubic +vit_tiny_patch16_224,75.454,24.546,92.848,7.152,5.72,224,0.900,bicubic +semnasnet_100,75.448,24.552,92.604,7.396,3.89,224,0.875,bicubic +resnet26,75.292,24.708,92.570,7.430,16.00,224,0.875,bicubic +regnety_006,75.246,24.754,92.532,7.468,6.06,224,0.875,bicubic +repvgg_b0,75.152,24.848,92.418,7.582,15.82,224,0.875,bilinear +fbnetc_100,75.124,24.876,92.386,7.614,5.57,224,0.875,bilinear +hrnet_w18_small_v2,75.114,24.886,92.416,7.584,15.60,224,0.875,bilinear +resnet34,75.110,24.890,92.284,7.716,21.80,224,0.875,bilinear +regnetx_008,75.038,24.962,92.336,7.664,7.26,224,0.875,bicubic +mobilenetv2_110d,75.036,24.964,92.186,7.814,4.52,224,0.875,bicubic +efficientnet_es_pruned,75.000,25.000,92.448,7.552,5.44,224,0.875,bicubic +tf_efficientnet_lite0,74.830,25.170,92.176,7.824,4.65,224,0.875,bicubic +legacy_seresnet34,74.808,25.192,92.124,7.876,21.96,224,0.875,bilinear +tv_densenet121,74.738,25.262,92.150,7.850,7.98,224,0.875,bicubic +mnasnet_100,74.658,25.342,92.114,7.886,4.38,224,0.875,bicubic +dla34,74.630,25.370,92.078,7.922,15.74,224,0.875,bilinear +gluon_resnet34_v1b,74.588,25.412,91.990,8.010,21.80,224,0.875,bicubic +pit_ti_distilled_224,74.530,25.470,92.096,7.904,5.10,224,0.900,bicubic +deit_tiny_distilled_patch16_224,74.510,25.490,91.890,8.110,5.91,224,0.900,bicubic +vgg19_bn,74.214,25.786,91.842,8.158,143.68,224,0.875,bilinear +spnasnet_100,74.084,25.916,91.818,8.182,4.42,224,0.875,bilinear +regnety_004,74.034,25.966,91.752,8.248,4.34,224,0.875,bicubic +ghostnet_100,73.978,26.022,91.456,8.544,5.18,224,0.875,bilinear +regnetx_006,73.852,26.148,91.672,8.328,6.20,224,0.875,bicubic +tf_mobilenetv3_large_075,73.438,26.562,91.350,8.650,3.99,224,0.875,bilinear +vgg16_bn,73.350,26.650,91.506,8.494,138.37,224,0.875,bilinear +tv_resnet34,73.312,26.688,91.426,8.574,21.80,224,0.875,bilinear +swsl_resnet18,73.276,26.724,91.734,8.266,11.69,224,0.875,bilinear +convit_tiny,73.116,26.884,91.714,8.286,5.71,224,0.875,bicubic +skresnet18,73.038,26.962,91.168,8.832,11.96,224,0.875,bicubic +mobilenetv2_100,72.970,27.030,91.016,8.984,3.50,224,0.875,bicubic +pit_ti_224,72.912,27.088,91.402,8.598,4.85,224,0.900,bicubic +ssl_resnet18,72.610,27.390,91.416,8.584,11.69,224,0.875,bilinear +regnetx_004,72.396,27.604,90.830,9.170,5.16,224,0.875,bicubic +vgg19,72.368,27.632,90.872,9.128,143.67,224,0.875,bilinear +hrnet_w18_small,72.342,27.658,90.678,9.322,13.19,224,0.875,bilinear +resnet18d,72.260,27.740,90.696,9.304,11.71,224,0.875,bicubic +tf_mobilenetv3_large_minimal_100,72.248,27.752,90.630,9.370,3.92,224,0.875,bilinear +deit_tiny_patch16_224,72.168,27.832,91.118,8.882,5.72,224,0.900,bicubic +mixer_l16_224,72.058,27.942,87.668,12.332,208.20,224,0.875,bicubic +vit_tiny_r_s16_p8_224,71.788,28.212,90.828,9.172,6.34,224,0.900,bicubic +legacy_seresnet18,71.742,28.258,90.334,9.666,11.78,224,0.875,bicubic +vgg13_bn,71.594,28.406,90.376,9.624,133.05,224,0.875,bilinear +vgg16,71.594,28.406,90.382,9.618,138.36,224,0.875,bilinear +gluon_resnet18_v1b,70.836,29.164,89.762,10.238,11.69,224,0.875,bicubic +vgg11_bn,70.360,29.640,89.802,10.198,132.87,224,0.875,bilinear +regnety_002,70.252,29.748,89.540,10.460,3.16,224,0.875,bicubic +vgg13,69.926,30.074,89.246,10.754,133.05,224,0.875,bilinear +resnet18,69.748,30.252,89.078,10.922,11.69,224,0.875,bilinear +vgg11,69.024,30.976,88.628,11.372,132.86,224,0.875,bilinear +regnetx_002,68.762,31.238,88.556,11.444,2.68,224,0.875,bicubic +tf_mobilenetv3_small_100,67.922,32.078,87.664,12.336,2.54,224,0.875,bilinear +dla60x_c,67.892,32.108,88.426,11.574,1.32,224,0.875,bilinear +dla46x_c,65.970,34.030,86.980,13.020,1.07,224,0.875,bilinear +tf_mobilenetv3_small_075,65.716,34.284,86.130,13.870,2.04,224,0.875,bilinear +dla46_c,64.866,35.134,86.292,13.708,1.30,224,0.875,bilinear +tf_mobilenetv3_small_minimal_100,62.906,37.094,84.230,15.770,2.04,224,0.875,bilinear diff --git a/testbed/huggingface__pytorch-image-models/results/results-sketch.csv b/testbed/huggingface__pytorch-image-models/results/results-sketch.csv new file mode 100644 index 0000000000000000000000000000000000000000..de2d85d0c7c068697f441832d0c7a24c42122adf --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/results/results-sketch.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation,top1_diff,top5_diff,rank_diff +ig_resnext101_32x48d,58.810,41.190,81.076,18.924,828.41,224,0.875,bilinear,-26.618,-16.496,+22 +ig_resnext101_32x32d,58.386,41.614,80.381,19.619,468.53,224,0.875,bilinear,-26.708,-17.057,+28 +ig_resnext101_32x16d,57.690,42.310,79.905,20.095,194.03,224,0.875,bilinear,-26.480,-17.291,+53 +swsl_resnext101_32x16d,57.458,42.542,80.385,19.615,194.03,224,0.875,bilinear,-25.888,-16.461,+78 +swsl_resnext101_32x8d,56.438,43.562,78.944,21.056,88.79,224,0.875,bilinear,-27.846,-18.232,+48 +ig_resnext101_32x8d,54.918,45.082,77.534,22.466,88.79,224,0.875,bilinear,-27.770,-19.102,+94 +swsl_resnext101_32x4d,53.603,46.397,76.347,23.653,44.18,224,0.875,bilinear,-29.627,-20.413,+77 +vit_large_patch16_384,52.754,47.246,74.696,25.304,304.72,384,1.000,bicubic,-34.326,-23.604,-4 +vit_large_r50_s32_384,52.039,47.961,73.558,26.442,329.09,384,1.000,bicubic,-34.145,-24.360,+2 +vit_large_patch16_224,51.832,48.168,73.694,26.306,304.33,224,0.900,bicubic,-34.010,-24.130,+6 +tf_efficientnet_l2_ns_475,51.494,48.506,73.928,26.072,480.31,475,0.936,bicubic,-36.740,-24.618,-9 +swsl_resnext50_32x4d,50.437,49.563,73.368,26.633,25.03,224,0.875,bilinear,-31.745,-22.862,+104 +swin_large_patch4_window12_384,50.404,49.596,72.564,27.436,196.74,384,1.000,bicubic,-36.744,-25.670,-10 +swsl_resnet50,49.541,50.459,72.334,27.666,25.56,224,0.875,bilinear,-31.625,-23.638,+134 +swin_large_patch4_window7_224,48.991,51.009,71.391,28.609,196.53,224,0.900,bicubic,-37.329,-26.505,-6 +swin_base_patch4_window12_384,48.553,51.447,71.813,28.187,87.90,384,1.000,bicubic,-37.879,-26.245,-8 +vit_large_r50_s32_224,48.203,51.797,70.868,29.132,328.99,224,0.900,bicubic,-36.231,-26.296,+32 +tf_efficientnet_b7_ns,47.800,52.200,69.640,30.360,66.35,600,0.949,bicubic,-39.040,-28.454,-13 +tf_efficientnet_b6_ns,47.761,52.239,69.968,30.032,43.04,528,0.942,bicubic,-38.691,-27.914,-12 +tf_efficientnet_l2_ns,47.570,52.430,70.019,29.981,480.31,800,0.960,bicubic,-40.782,-28.631,-19 +tf_efficientnetv2_l_in21ft1k,46.939,53.061,70.310,29.690,118.52,480,1.000,bicubic,-39.365,-27.668,-11 +vit_base_patch16_384,45.894,54.106,68.557,31.443,86.86,384,1.000,bicubic,-40.112,-29.443,-7 +tf_efficientnet_b8_ap,45.774,54.226,67.911,32.089,87.41,672,0.954,bicubic,-39.596,-29.383,+3 +tf_efficientnet_b5_ns,45.615,54.385,67.842,32.158,30.39,456,0.934,bicubic,-40.473,-29.910,-11 +tf_efficientnetv2_m_in21ft1k,45.582,54.418,69.150,30.849,54.14,480,1.000,bicubic,-40.006,-28.602,-6 +swin_base_patch4_window7_224,45.560,54.440,68.512,31.488,87.77,224,0.900,bicubic,-39.692,-29.050,+1 +cait_m48_448,44.245,55.755,64.653,35.347,356.46,448,1.000,bicubic,-42.239,-33.102,-21 +vit_base_r50_s16_384,43.512,56.488,66.785,33.215,98.95,384,1.000,bicubic,-41.460,-30.503,+8 +tf_efficientnet_b4_ns,43.450,56.550,65.519,34.481,19.34,380,0.922,bicubic,-41.713,-31.951,-1 +vit_base_patch16_224,43.220,56.780,65.708,34.292,86.57,224,0.900,bicubic,-41.312,-31.586,+14 +tf_efficientnet_b8,42.508,57.492,64.857,35.143,87.41,672,0.954,bicubic,-42.862,-32.533,-6 +cait_m36_384,42.398,57.602,63.324,36.676,271.22,384,1.000,bicubic,-43.656,-34.406,-18 +tf_efficientnet_b7,41.431,58.569,63.017,36.983,66.35,600,0.949,bicubic,-43.505,-34.186,+4 +tf_efficientnet_b7_ap,41.429,58.571,62.874,37.126,66.35,600,0.949,bicubic,-43.691,-34.378,-5 +tf_efficientnet_b5_ap,41.418,58.582,62.084,37.916,30.39,456,0.934,bicubic,-42.834,-34.890,+20 +resnetv2_152x4_bitm,41.302,58.698,64.307,35.693,936.53,480,1.000,bilinear,-43.614,-33.135,+2 +tf_efficientnet_b6_ap,41.099,58.901,62.355,37.645,43.04,528,0.942,bicubic,-43.689,-34.783,+3 +tf_efficientnetv2_s_in21ft1k,40.950,59.050,63.849,36.151,21.46,384,1.000,bicubic,-43.352,-33.403,+14 +tf_efficientnet_b4_ap,40.484,59.516,61.723,38.277,19.34,380,0.922,bicubic,-42.764,-34.669,+44 +vit_small_r26_s32_384,40.476,59.524,62.736,37.264,36.47,384,1.000,bicubic,-43.570,-34.592,+22 +vit_base_patch16_224_miil,40.168,59.832,60.887,39.113,86.54,224,0.875,bilinear,-44.100,-35.915,+13 +tf_efficientnetv2_l,39.830,60.170,60.801,39.199,118.52,480,1.000,bicubic,-45.660,-36.571,-21 +dm_nfnet_f3,39.818,60.182,60.610,39.390,254.92,416,0.940,bicubic,-45.704,-36.852,-23 +cait_s36_384,39.765,60.235,60.475,39.525,68.37,384,1.000,bicubic,-45.695,-37.005,-22 +efficientnetv2_rw_m,39.667,60.333,59.687,40.313,53.24,416,1.000,bicubic,-45.141,-37.461,-6 +ecaresnet269d,39.594,60.406,60.343,39.657,102.09,352,1.000,bicubic,-45.382,-36.883,-11 +tf_efficientnet_b3_ns,39.584,60.416,61.453,38.547,12.23,300,0.904,bicubic,-44.464,-35.457,+14 +dm_nfnet_f6,39.578,60.422,60.911,39.089,438.36,576,0.956,bicubic,-46.566,-36.819,-36 +dm_nfnet_f5,39.508,60.492,60.227,39.773,377.21,544,0.954,bicubic,-46.306,-37.261,-32 +efficientnet_b4,39.079,60.921,59.608,40.392,19.34,384,1.000,bicubic,-44.349,-36.988,+28 +resnetv2_152x2_bit_teacher_384,38.979,61.021,62.440,37.560,236.34,384,1.000,bicubic,-44.865,-34.678,+16 +vit_base_patch32_384,38.794,61.206,60.329,39.671,88.30,384,1.000,bicubic,-44.556,-36.507,+29 +eca_nfnet_l2,38.664,61.336,59.445,40.555,56.72,384,1.000,bicubic,-46.033,-37.819,-11 +tf_efficientnet_b5,38.356,61.644,59.913,40.087,30.39,456,0.934,bicubic,-45.456,-36.835,+15 +deit_base_distilled_patch16_384,38.260,61.740,57.783,42.217,87.63,384,1.000,bicubic,-47.162,-39.549,-31 +dm_nfnet_f4,38.224,61.776,58.626,41.374,316.07,512,0.951,bicubic,-47.490,-38.894,-38 +resnetv2_152x2_bitm,37.985,62.015,61.135,38.865,236.34,448,1.000,bilinear,-46.525,-36.297,-11 +cait_s24_384,37.873,62.127,58.079,41.921,47.06,384,1.000,bicubic,-47.173,-39.267,-26 +resnet152d,37.857,62.143,58.356,41.644,60.21,320,1.000,bicubic,-45.823,-38.382,+15 +tf_efficientnetv2_m,37.824,62.176,58.710,41.290,54.14,480,1.000,bicubic,-47.220,-38.568,-27 +resnetrs420,37.747,62.253,58.215,41.785,191.89,416,1.000,bicubic,-47.261,-38.909,-27 +resnetrs350,37.676,62.324,58.083,41.917,163.96,384,1.000,bicubic,-47.044,-38.905,-21 +pit_b_distilled_224,37.590,62.410,57.238,42.762,74.79,224,0.900,bicubic,-46.554,-39.618,-6 +resnet200d,37.505,62.495,58.297,41.703,64.69,320,1.000,bicubic,-46.457,-38.526,+1 +resnetv2_152x2_bit_teacher,37.324,62.676,59.390,40.610,236.34,224,0.875,bicubic,-45.538,-37.178,+29 +resnest269e,37.315,62.685,57.468,42.532,110.93,416,0.928,bicubic,-47.203,-39.518,-21 +resmlp_big_24_224_in22ft1k,37.244,62.756,58.184,41.816,129.14,224,0.875,bicubic,-47.150,-38.937,-17 +vit_small_r26_s32_224,37.234,62.766,59.060,40.940,36.43,224,0.900,bicubic,-44.624,-36.962,+55 +cait_s24_224,37.153,62.847,56.724,43.276,46.92,224,1.000,bicubic,-46.299,-39.840,+8 +vit_base_patch32_224,37.077,62.923,59.294,40.706,88.22,224,0.900,bicubic,-43.647,-36.274,+96 +tf_efficientnet_b3_ap,37.055,62.945,57.240,42.760,12.23,300,0.904,bicubic,-44.767,-38.384,+54 +efficientnetv2_rw_s,37.049,62.951,56.814,43.186,23.94,384,1.000,bicubic,-46.759,-39.910,-2 +seresnet152d,36.790,63.210,56.718,43.282,66.84,320,1.000,bicubic,-47.572,-40.322,-22 +resnetrs200,36.639,63.361,56.828,43.172,93.21,320,1.000,bicubic,-47.427,-40.046,-15 +efficientnet_b3,36.420,63.580,56.845,43.155,12.23,320,1.000,bicubic,-45.822,-39.269,+39 +cait_xs24_384,36.416,63.584,56.944,43.056,26.67,384,1.000,bicubic,-47.645,-39.945,-16 +deit_base_distilled_patch16_224,36.397,63.603,56.617,43.383,87.34,224,0.900,bicubic,-46.991,-39.871,+2 +resnetv2_101x3_bitm,36.381,63.619,59.070,40.930,387.93,448,1.000,bilinear,-48.059,-38.312,-31 +resnetrs270,36.320,63.680,56.562,43.438,129.86,352,1.000,bicubic,-48.114,-40.408,-31 +tresnet_m,36.285,63.715,55.796,44.204,31.39,224,0.875,bilinear,-46.795,-40.322,+9 +mixer_b16_224_miil,36.269,63.731,55.965,44.035,59.88,224,0.875,bilinear,-46.039,-39.751,+29 +tf_efficientnet_b2_ns,36.183,63.817,57.551,42.449,9.11,260,0.890,bicubic,-46.197,-38.697,+23 +dm_nfnet_f2,36.004,63.996,55.456,44.544,193.78,352,0.920,bicubic,-49.060,-41.784,-52 +ecaresnet101d,36.004,63.996,56.165,43.835,44.57,224,0.875,bicubic,-46.168,-39.881,+33 +resnest200e,35.931,64.069,55.849,44.151,70.20,320,0.909,bicubic,-47.901,-41.045,-17 +swsl_resnet18,35.858,64.142,58.455,41.545,11.69,224,0.875,bilinear,-37.418,-33.279,+305 +eca_nfnet_l1,35.823,64.177,55.957,44.043,41.41,320,1.000,bicubic,-48.187,-41.071,-23 +vit_small_patch16_384,35.479,64.521,57.549,42.451,22.20,384,1.000,bicubic,-48.323,-39.553,-17 +resnest101e,35.373,64.627,55.780,44.220,48.28,256,0.875,bilinear,-47.517,-40.540,+4 +convit_base,35.314,64.686,54.927,45.073,86.54,224,0.875,bicubic,-46.976,-41.011,+21 +twins_svt_large,35.086,64.914,54.721,45.279,99.27,224,0.900,bicubic,-48.592,-41.873,-16 +repvgg_b3g4,35.043,64.957,54.772,45.228,83.83,224,0.875,bilinear,-45.169,-40.338,+98 +repvgg_b3,35.043,64.957,54.542,45.458,123.09,224,0.875,bilinear,-45.449,-40.718,+80 +dm_nfnet_f1,34.990,65.010,54.108,45.892,132.63,320,0.910,bicubic,-49.636,-42.992,-51 +resnet101d,34.872,65.128,54.202,45.798,44.57,320,1.000,bicubic,-48.150,-42.244,-4 +resmlp_big_24_distilled_224,34.788,65.213,54.637,45.363,129.14,224,0.875,bicubic,-48.803,-42.011,-20 +vit_large_patch32_384,34.673,65.326,55.729,44.271,306.63,384,1.000,bicubic,-46.833,-40.363,+37 +dm_nfnet_f0,34.618,65.382,54.672,45.328,71.49,256,0.900,bicubic,-48.767,-41.900,-18 +ssl_resnext101_32x16d,34.605,65.395,55.931,44.069,194.03,224,0.875,bilinear,-47.239,-40.165,+25 +repvgg_b2g4,34.587,65.413,54.782,45.218,61.76,224,0.875,bilinear,-44.779,-39.906,+131 +resnest50d_4s2x40d,34.355,65.645,54.725,45.275,30.42,224,0.875,bicubic,-46.753,-40.833,+49 +resnetrs152,34.355,65.645,53.562,46.438,86.62,320,1.000,bicubic,-49.357,-43.052,-30 +tf_efficientnet_b1_ns,34.157,65.843,55.489,44.511,7.79,240,0.882,bicubic,-47.231,-40.249,+36 +twins_pcpvt_large,34.111,65.888,54.128,45.872,60.99,224,0.900,bicubic,-49.029,-42.470,-18 +tf_efficientnet_b4,34.064,65.936,54.198,45.802,19.34,380,0.922,bicubic,-48.958,-42.102,-13 +ssl_resnext101_32x8d,34.017,65.983,55.601,44.399,88.79,224,0.875,bilinear,-47.599,-40.437,+24 +nfnet_l0,34.002,65.999,54.365,45.635,35.07,288,1.000,bicubic,-48.748,-42.151,-10 +tf_efficientnet_b6,33.998,66.002,54.544,45.456,43.04,528,0.942,bicubic,-50.112,-42.342,-50 +efficientnet_b3_pruned,33.996,66.004,54.108,45.892,9.86,300,0.904,bicubic,-46.862,-41.134,+52 +regnety_160,33.976,66.024,53.546,46.454,83.59,288,1.000,bicubic,-49.710,-43.230,-37 +pit_s_distilled_224,33.939,66.061,53.265,46.735,24.04,224,0.900,bicubic,-48.057,-42.533,+10 +resnetv2_50x3_bitm,33.658,66.342,55.882,44.118,217.32,448,1.000,bilinear,-50.356,-41.242,-49 +resnet51q,33.563,66.437,53.021,46.979,35.70,288,1.000,bilinear,-48.797,-43.159,-7 +regnety_032,33.412,66.588,52.754,47.246,19.44,288,1.000,bicubic,-49.312,-43.670,-16 +gernet_l,33.357,66.643,51.901,48.099,31.08,256,0.875,bilinear,-47.997,-43.635,+26 +tresnet_xl,33.257,66.743,52.294,47.706,78.44,224,0.875,bilinear,-48.797,-43.642,+3 +resnest50d_1s4x24d,33.147,66.853,52.839,47.161,25.68,224,0.875,bicubic,-47.841,-42.483,+38 +twins_pcpvt_base,33.021,66.979,52.485,47.515,43.83,224,0.900,bicubic,-49.687,-43.861,-19 +rexnet_200,32.987,67.013,52.939,47.061,16.37,224,0.875,bicubic,-48.645,-42.729,+10 +resnest50d,32.972,67.028,52.713,47.287,27.48,224,0.875,bilinear,-48.002,-42.665,+36 +tf_efficientnetv2_s,32.915,67.085,51.726,48.274,21.46,384,1.000,bicubic,-50.979,-44.972,-55 +convit_small,32.913,67.087,52.123,47.877,27.78,224,0.875,bicubic,-48.513,-43.621,+15 +vit_small_patch16_224,32.885,67.115,53.923,46.077,22.05,224,0.900,bicubic,-48.517,-42.211,+15 +tf_efficientnet_b3,32.860,67.140,52.950,47.050,12.23,300,0.904,bicubic,-48.776,-42.768,+4 +pnasnet5large,32.848,67.152,50.500,49.500,86.06,331,0.911,bicubic,-49.934,-45.540,-29 +twins_svt_base,32.836,67.164,51.559,48.441,56.07,224,0.900,bicubic,-50.300,-44.859,-39 +nasnetalarge,32.775,67.225,50.141,49.859,88.75,331,0.911,bicubic,-49.845,-45.906,-26 +gernet_m,32.740,67.260,51.913,48.087,21.14,224,0.875,bilinear,-47.992,-43.271,+37 +inception_resnet_v2,32.738,67.262,50.648,49.352,55.84,299,0.897,bicubic,-47.720,-44.658,+48 +gluon_resnet152_v1d,32.734,67.266,51.088,48.912,60.21,224,0.875,bicubic,-47.740,-44.118,+45 +pit_b_224,32.718,67.282,49.852,50.148,73.76,224,0.900,bicubic,-49.728,-45.858,-27 +tf_efficientnet_b2_ap,32.681,67.319,52.239,47.761,9.11,260,0.890,bicubic,-47.619,-42.979,+53 +tresnet_l,32.559,67.441,51.139,48.861,55.99,224,0.875,bilinear,-48.931,-44.485,+2 +cait_xxs36_384,32.549,67.451,52.233,47.767,17.37,384,1.000,bicubic,-49.645,-43.915,-19 +wide_resnet50_2,32.439,67.561,51.459,48.541,68.88,224,0.875,bicubic,-49.017,-44.073,+1 +ens_adv_inception_resnet_v2,32.370,67.629,50.427,49.573,55.84,299,0.897,bicubic,-47.611,-44.511,+64 +deit_base_patch16_224,32.363,67.637,51.011,48.989,86.57,224,0.900,bicubic,-49.635,-44.723,-17 +swin_small_patch4_window7_224,32.341,67.659,50.905,49.095,49.61,224,0.900,bicubic,-50.871,-45.417,-53 +gluon_resnet152_v1s,32.331,67.669,50.526,49.474,60.32,224,0.875,bicubic,-48.685,-44.886,+15 +deit_small_distilled_patch16_224,32.284,67.716,52.102,47.898,22.44,224,0.900,bicubic,-48.916,-43.276,+7 +gluon_seresnext101_64x4d,32.205,67.795,50.319,49.681,88.23,224,0.875,bicubic,-48.689,-44.989,+19 +coat_lite_small,32.127,67.873,49.934,50.066,19.84,224,0.900,bicubic,-50.181,-45.916,-33 +gluon_seresnext101_32x4d,32.107,67.893,51.237,48.763,48.96,224,0.875,bicubic,-48.797,-44.057,+16 +deit_base_patch16_384,31.989,68.011,50.547,49.453,86.86,384,1.000,bicubic,-51.117,-45.825,-56 +seresnext50_32x4d,31.985,68.015,51.231,48.769,27.56,224,0.875,bicubic,-49.281,-44.389,0 +levit_384,31.877,68.123,50.598,49.402,39.13,224,0.900,bicubic,-50.709,-45.418,-44 +resnetrs101,31.858,68.142,51.017,48.983,63.62,288,0.940,bicubic,-50.430,-44.991,-35 +cspresnext50,31.822,68.178,51.602,48.398,20.57,224,0.875,bilinear,-48.218,-43.342,+50 +tnt_s_patch16_224,31.643,68.357,51.143,48.857,23.76,224,0.900,bicubic,-49.875,-44.605,-17 +eca_nfnet_l0,31.612,68.388,51.612,48.388,24.14,288,1.000,bicubic,-50.968,-44.878,-47 +resnetv2_50x1_bit_distilled,31.584,68.416,51.263,48.737,25.55,224,0.875,bicubic,-51.234,-45.259,-56 +resnet50,31.547,68.453,50.170,49.830,25.56,224,0.875,bicubic,-47.491,-44.220,+102 +ssl_resnext101_32x4d,31.423,68.577,52.121,47.879,44.18,224,0.875,bilinear,-49.501,-43.607,+5 +inception_v4,31.378,68.622,49.244,50.756,42.68,299,0.875,bicubic,-48.790,-45.724,+39 +rexnet_150,31.366,68.634,51.288,48.712,9.73,224,0.875,bicubic,-48.944,-43.878,+28 +pit_s_224,31.333,68.667,49.661,50.339,23.46,224,0.900,bicubic,-49.761,-45.671,-5 +cait_xxs36_224,31.278,68.722,50.616,49.384,17.30,224,1.000,bicubic,-48.472,-44.250,+58 +cspresnet50,31.270,68.730,51.223,48.777,21.62,256,0.887,bilinear,-48.304,-43.489,+65 +coat_mini,31.203,68.797,49.773,50.227,10.34,224,0.900,bicubic,-50.065,-45.619,-15 +ecaresnetlight,31.121,68.879,50.243,49.757,30.16,224,0.875,bicubic,-49.341,-45.007,+16 +gluon_resnet101_v1s,31.115,68.885,49.793,50.207,44.67,224,0.875,bicubic,-49.187,-45.367,+23 +tf_efficientnet_cc_b0_8e,31.087,68.913,50.761,49.239,24.01,224,0.875,bicubic,-46.821,-42.892,+141 +resmlp_36_distilled_224,31.070,68.930,49.683,50.317,44.69,224,0.875,bicubic,-50.090,-45.805,-14 +ecaresnet50d,31.058,68.942,50.848,49.152,25.58,224,0.875,bicubic,-49.534,-44.472,+6 +ecaresnet50t,31.058,68.942,50.577,49.423,25.57,320,0.950,bicubic,-51.288,-45.561,-58 +resnet50d,31.020,68.980,49.808,50.192,25.58,224,0.875,bicubic,-49.510,-45.352,+5 +cspdarknet53,31.018,68.981,50.390,49.610,27.64,256,0.887,bilinear,-49.040,-44.694,+30 +gluon_resnet152_v1c,30.991,69.009,48.924,51.076,60.21,224,0.875,bicubic,-48.919,-45.916,+35 +gluon_resnext101_64x4d,30.987,69.013,48.549,51.451,83.46,224,0.875,bicubic,-49.617,-46.439,0 +twins_svt_small,30.985,69.015,49.223,50.777,24.06,224,0.900,bicubic,-50.697,-46.447,-43 +resmlp_24_distilled_224,30.901,69.099,50.178,49.822,30.02,224,0.875,bicubic,-49.865,-45.040,-7 +tf_efficientnet_cc_b1_8e,30.899,69.101,50.080,49.920,39.72,240,0.882,bicubic,-48.409,-44.290,+63 +ecaresnet101d_pruned,30.897,69.103,50.013,49.987,24.88,224,0.875,bicubic,-49.921,-45.615,-11 +gluon_resnext101_32x4d,30.877,69.123,48.537,51.463,44.18,224,0.875,bicubic,-49.457,-46.389,+7 +tf_efficientnetv2_b3,30.861,69.139,49.814,50.186,14.36,300,0.904,bicubic,-51.109,-45.968,-53 +tf_efficientnet_lite4,30.830,69.170,50.386,49.614,13.01,380,0.920,bilinear,-50.706,-45.282,-45 +nf_resnet50,30.702,69.298,49.958,50.042,25.56,288,0.940,bicubic,-49.958,-45.378,-10 +dpn107,30.678,69.322,48.810,51.190,86.92,224,0.875,bicubic,-49.478,-46.100,+16 +ese_vovnet39b,30.657,69.343,49.875,50.125,24.57,224,0.875,bicubic,-48.663,-44.837,+54 +gluon_resnet152_v1b,30.623,69.376,48.521,51.479,60.19,224,0.875,bicubic,-49.063,-46.215,+39 +tresnet_xl_448,30.614,69.386,49.069,50.931,78.44,448,0.875,bilinear,-52.436,-47.105,-91 +ssl_resnext50_32x4d,30.594,69.406,50.657,49.343,25.03,224,0.875,bilinear,-49.724,-44.749,0 +gluon_resnet101_v1d,30.523,69.477,47.950,52.050,44.57,224,0.875,bicubic,-49.891,-47.064,-5 +dpn68b,30.517,69.483,49.158,50.842,12.61,224,0.875,bicubic,-48.699,-45.256,+61 +resnest26d,30.490,69.510,50.677,49.323,17.07,224,0.875,bilinear,-47.988,-43.621,+88 +efficientnet_b2,30.435,69.565,49.698,50.302,9.11,288,1.000,bicubic,-50.177,-45.620,-18 +tf_efficientnet_b1_ap,30.421,69.579,49.553,50.447,7.79,240,0.882,bicubic,-48.859,-44.753,+54 +twins_pcpvt_small,30.382,69.618,49.386,50.614,24.11,224,0.900,bicubic,-50.706,-46.256,-36 +visformer_small,30.329,69.671,48.285,51.715,40.22,224,0.900,bicubic,-51.777,-47.587,-71 +pit_xs_distilled_224,30.278,69.722,49.836,50.164,11.00,224,0.900,bicubic,-49.028,-44.528,+47 +seresnet50,30.077,69.923,49.292,50.708,28.09,224,0.875,bicubic,-50.197,-45.778,-4 +dpn98,30.067,69.933,48.244,51.756,61.57,224,0.875,bicubic,-49.575,-46.354,+29 +tf_efficientnet_b2,30.026,69.974,49.581,50.419,9.11,260,0.890,bicubic,-50.060,-45.328,+3 +dpn131,30.024,69.976,48.146,51.854,79.25,224,0.875,bicubic,-49.798,-46.564,+17 +efficientnet_el,30.018,69.982,48.834,51.166,10.59,300,0.904,bicubic,-51.298,-46.692,-53 +legacy_senet154,30.001,69.999,48.034,51.966,115.09,224,0.875,bilinear,-51.309,-47.462,-53 +dpn92,29.953,70.047,49.162,50.838,37.67,224,0.875,bicubic,-50.055,-45.674,+2 +resnetv2_101x1_bitm,29.898,70.102,51.121,48.879,44.54,448,1.000,bilinear,-52.434,-45.397,-90 +gluon_senet154,29.877,70.123,47.894,52.106,115.09,224,0.875,bicubic,-51.357,-47.454,-53 +xception,29.865,70.135,48.686,51.314,22.86,299,0.897,bicubic,-49.187,-45.706,+53 +adv_inception_v3,29.816,70.184,47.847,52.153,23.83,299,0.875,bicubic,-47.766,-45.889,+112 +gluon_xception65,29.784,70.216,47.755,52.245,39.92,299,0.903,bicubic,-49.932,-47.105,+16 +resmlp_36_224,29.692,70.308,48.969,51.031,44.69,224,0.875,bicubic,-50.078,-45.917,+10 +resnetblur50,29.625,70.375,48.248,51.752,25.56,224,0.875,bicubic,-49.661,-46.390,+36 +efficientnet_em,29.486,70.514,48.946,51.054,6.90,240,0.882,bicubic,-49.766,-45.848,+37 +resnext101_32x8d,29.439,70.561,48.486,51.514,88.79,224,0.875,bilinear,-49.869,-46.032,+28 +coat_lite_mini,29.433,70.567,47.724,52.276,11.01,224,0.900,bicubic,-49.655,-46.880,+42 +ssl_resnet50,29.423,70.577,49.781,50.219,25.56,224,0.875,bilinear,-49.799,-45.051,+35 +deit_small_patch16_224,29.421,70.579,48.256,51.744,22.05,224,0.900,bicubic,-50.435,-46.796,-2 +nf_regnet_b1,29.390,70.611,49.425,50.575,10.22,288,0.900,bicubic,-49.903,-45.323,+29 +cait_xxs24_384,29.387,70.612,48.753,51.247,12.03,384,1.000,bicubic,-51.578,-46.893,-54 +swin_tiny_patch4_window7_224,29.334,70.666,47.602,52.398,28.29,224,0.900,bicubic,-52.044,-47.938,-72 +resnext50_32x4d,29.331,70.669,47.397,52.603,25.03,224,0.875,bicubic,-50.438,-47.201,+1 +resnet34d,29.328,70.671,48.409,51.591,21.82,224,0.875,bicubic,-47.788,-44.973,+113 +cait_xxs24_224,29.303,70.697,48.535,51.465,11.96,224,1.000,bicubic,-49.083,-45.775,+65 +ecaresnet50d_pruned,29.215,70.785,48.453,51.547,19.94,224,0.875,bicubic,-50.501,-46.427,+1 +tresnet_l_448,29.165,70.835,47.232,52.768,55.99,448,0.875,bilinear,-53.103,-48.744,-104 +gluon_inception_v3,29.122,70.878,46.957,53.043,23.83,299,0.875,bicubic,-49.684,-47.413,+42 +xception71,29.047,70.953,47.405,52.595,42.34,299,0.903,bicubic,-50.826,-47.517,-13 +hrnet_w64,28.989,71.011,47.142,52.858,128.06,224,0.875,bilinear,-50.485,-47.510,+7 +tf_efficientnet_b0_ns,28.902,71.098,49.011,50.989,5.29,224,0.875,bicubic,-49.756,-45.365,+46 +xception65,28.896,71.104,47.167,52.833,39.92,299,0.903,bicubic,-50.656,-47.487,+2 +tf_efficientnet_b1,28.886,71.114,47.503,52.497,7.79,240,0.882,bicubic,-49.940,-46.695,+36 +gluon_resnet101_v1b,28.878,71.121,46.389,53.611,44.55,224,0.875,bicubic,-50.427,-48.135,+12 +vit_small_patch32_384,28.871,71.129,48.887,51.113,22.92,384,1.000,bicubic,-51.609,-46.711,-52 +skresnext50_32x4d,28.818,71.182,46.497,53.503,27.48,224,0.875,bicubic,-51.338,-48.145,-31 +levit_256,28.745,71.255,46.723,53.277,18.89,224,0.900,bicubic,-52.765,-48.767,-94 +tf_efficientnet_lite3,28.660,71.340,47.354,52.646,8.20,300,0.904,bilinear,-51.160,-47.560,-16 +gluon_seresnext50_32x4d,28.651,71.349,46.436,53.564,27.56,224,0.875,bicubic,-51.267,-48.386,-28 +skresnet34,28.645,71.355,47.953,52.047,22.28,224,0.875,bicubic,-48.267,-45.369,+105 +hrnet_w40,28.641,71.359,47.454,52.546,57.56,224,0.875,bilinear,-50.279,-47.016,+25 +tf_efficientnetv2_b0,28.566,71.434,47.079,52.921,7.14,224,0.875,bicubic,-49.790,-46.945,+51 +tv_resnet152,28.533,71.467,47.118,52.882,60.19,224,0.875,bilinear,-49.779,-46.920,+51 +repvgg_b2,28.427,71.573,47.038,52.962,89.02,224,0.875,bilinear,-50.365,-47.376,+28 +hrnet_w48,28.413,71.587,47.586,52.414,77.47,224,0.875,bilinear,-50.887,-46.926,+3 +gluon_resnext50_32x4d,28.375,71.624,45.328,54.672,25.03,224,0.875,bicubic,-50.978,-49.098,-4 +efficientnet_b2_pruned,28.362,71.638,47.051,52.949,8.31,260,0.890,bicubic,-51.554,-47.805,-35 +tf_efficientnet_b0_ap,28.346,71.654,47.531,52.469,5.29,224,0.875,bicubic,-48.740,-45.725,+91 +tf_efficientnet_cc_b0_4e,28.315,71.685,47.364,52.636,13.31,224,0.875,bicubic,-48.991,-45.970,+83 +dla102x2,28.313,71.687,46.761,53.239,41.28,224,0.875,bilinear,-51.135,-47.879,-11 +dla169,28.313,71.687,47.391,52.609,53.39,224,0.875,bilinear,-50.375,-46.945,+24 +mixnet_xl,28.287,71.713,46.702,53.298,11.90,224,0.875,bicubic,-52.189,-48.234,-68 +gluon_resnet50_v1d,28.246,71.754,45.878,54.122,25.58,224,0.875,bicubic,-50.828,-48.592,+8 +wide_resnet101_2,28.108,71.892,46.401,53.599,126.89,224,0.875,bilinear,-50.748,-47.881,+14 +gluon_resnet101_v1c,28.104,71.896,45.961,54.039,44.57,224,0.875,bicubic,-51.430,-48.617,-20 +regnetx_320,28.093,71.907,45.126,54.874,107.81,224,0.875,bicubic,-52.153,-49.900,-57 +densenet161,28.081,71.919,46.641,53.359,28.68,224,0.875,bicubic,-49.277,-46.997,+74 +regnety_320,28.059,71.941,45.444,54.556,145.05,224,0.875,bicubic,-52.753,-49.800,-85 +gernet_s,28.022,71.978,46.723,53.277,8.17,224,0.875,bilinear,-48.894,-46.409,+85 +levit_192,28.016,71.984,45.880,54.120,10.95,224,0.900,bicubic,-51.826,-48.906,-41 +efficientnet_el_pruned,28.016,71.984,46.790,53.210,10.59,300,0.904,bicubic,-52.284,-48.238,-64 +xception41,27.888,72.112,45.890,54.110,26.97,299,0.903,bicubic,-50.628,-48.388,+17 +regnetx_160,27.817,72.183,45.617,54.383,54.28,224,0.875,bicubic,-52.039,-49.213,-45 +tf_inception_v3,27.780,72.220,45.721,54.279,23.83,299,0.875,bicubic,-50.082,-47.919,+51 +res2net101_26w_4s,27.768,72.232,45.179,54.821,45.21,224,0.875,bilinear,-51.430,-49.253,-8 +tf_efficientnetv2_b1,27.760,72.240,46.578,53.422,8.14,240,0.882,bicubic,-51.702,-48.144,-28 +repvgg_b1,27.656,72.344,46.531,53.469,57.42,224,0.875,bilinear,-50.710,-47.567,+25 +hrnet_w44,27.621,72.379,45.837,54.163,67.06,224,0.875,bilinear,-51.275,-48.531,-1 +inception_v3,27.556,72.444,45.263,54.737,23.83,299,0.875,bicubic,-49.882,-48.211,+59 +resmlp_24_224,27.521,72.479,45.696,54.304,30.02,224,0.875,bicubic,-51.853,-48.851,-30 +pit_xs_224,27.491,72.509,45.900,54.100,10.62,224,0.900,bicubic,-50.691,-48.268,+28 +regnetx_080,27.405,72.595,45.002,54.998,39.57,224,0.875,bicubic,-51.789,-49.558,-14 +hrnet_w30,27.381,72.619,46.554,53.446,37.71,224,0.875,bilinear,-50.825,-47.668,+25 +hrnet_w32,27.369,72.631,45.994,54.006,41.23,224,0.875,bilinear,-51.081,-48.192,+11 +gluon_resnet50_v1s,27.326,72.674,45.222,54.778,25.68,224,0.875,bicubic,-51.386,-49.016,-1 +densenet201,27.265,72.735,46.222,53.778,20.01,224,0.875,bicubic,-50.021,-47.256,+57 +densenetblur121d,27.228,72.772,46.299,53.701,8.00,224,0.875,bicubic,-49.360,-46.893,+77 +regnety_064,27.220,72.780,44.847,55.153,30.58,224,0.875,bicubic,-52.502,-49.921,-52 +efficientnet_b1_pruned,27.181,72.819,45.872,54.128,6.33,240,0.882,bicubic,-51.055,-47.962,+18 +tf_efficientnetv2_b2,27.163,72.837,44.570,55.430,10.10,260,0.890,bicubic,-53.045,-50.472,-78 +resnetrs50,27.110,72.890,45.029,54.971,35.69,224,0.910,bicubic,-52.782,-49.939,-67 +rexnet_130,27.094,72.906,45.933,54.067,7.56,224,0.875,bicubic,-52.406,-48.749,-46 +res2net50_26w_8s,27.078,72.921,44.428,55.572,48.40,224,0.875,bilinear,-52.119,-49.940,-27 +dla102x,27.061,72.939,45.475,54.525,26.31,224,0.875,bilinear,-51.449,-48.753,-4 +gmixer_24_224,27.027,72.972,44.361,55.639,24.72,224,0.875,bicubic,-51.008,-49.303,+20 +tv_resnet101,26.963,73.037,45.234,54.766,44.55,224,0.875,bilinear,-50.411,-48.306,+44 +resnext50d_32x4d,26.876,73.124,44.436,55.564,25.05,224,0.875,bicubic,-52.800,-50.430,-57 +regnetx_120,26.868,73.132,44.682,55.318,46.11,224,0.875,bicubic,-52.728,-50.056,-56 +rexnet_100,26.831,73.169,45.369,54.631,4.80,224,0.875,bicubic,-51.027,-48.501,+27 +densenet169,26.829,73.171,45.373,54.627,14.15,224,0.875,bicubic,-49.077,-47.653,+76 +legacy_seresnext101_32x4d,26.811,73.189,43.497,56.503,48.96,224,0.875,bilinear,-53.417,-51.521,-91 +regnety_120,26.788,73.212,44.454,55.546,51.82,224,0.875,bicubic,-53.578,-50.672,-103 +regnetx_064,26.784,73.216,44.927,55.073,26.21,224,0.875,bicubic,-52.288,-49.531,-31 +regnetx_032,26.703,73.297,45.236,54.764,15.30,224,0.875,bicubic,-51.469,-48.852,+6 +legacy_seresnet152,26.676,73.324,43.947,56.053,66.82,224,0.875,bilinear,-51.984,-50.423,-19 +densenet121,26.664,73.336,45.900,54.100,7.98,224,0.875,bicubic,-48.914,-46.752,+74 +efficientnet_es,26.621,73.379,45.112,54.888,5.44,224,0.875,bicubic,-51.445,-48.814,+7 +res2net50_26w_6s,26.595,73.405,43.990,56.010,37.05,224,0.875,bilinear,-51.975,-50.134,-20 +repvgg_b1g4,26.579,73.421,45.084,54.916,39.97,224,0.875,bilinear,-51.015,-48.742,+23 +dla60x,26.552,73.448,45.023,54.977,17.35,224,0.875,bilinear,-51.694,-48.995,-5 +regnety_080,26.524,73.476,44.359,55.641,39.18,224,0.875,bicubic,-53.352,-50.471,-86 +coat_lite_tiny,26.507,73.493,44.644,55.356,5.72,224,0.900,bicubic,-51.005,-49.272,+24 +tf_efficientnet_b0,26.485,73.515,45.646,54.354,5.29,224,0.875,bicubic,-50.363,-47.582,+43 +res2net50_14w_8s,26.483,73.517,44.371,55.629,25.06,224,0.875,bilinear,-51.667,-49.477,-3 +mobilenetv3_large_100_miil,26.481,73.519,44.473,55.527,5.48,224,0.875,bilinear,-51.435,-48.437,+7 +gluon_resnet50_v1b,26.436,73.564,44.035,55.965,25.56,224,0.875,bicubic,-51.144,-49.681,+18 +tf_efficientnet_el,26.357,73.643,44.175,55.825,10.59,300,0.904,bicubic,-53.893,-50.953,-109 +levit_128,26.332,73.668,44.096,55.904,9.21,224,0.900,bicubic,-52.154,-49.914,-27 +resmlp_big_24_224,26.318,73.682,43.556,56.444,129.14,224,0.875,bicubic,-54.710,-51.466,-146 +resmlp_12_distilled_224,26.314,73.686,44.874,55.126,15.35,224,0.875,bicubic,-51.630,-48.684,+1 +regnetx_040,26.243,73.757,44.438,55.562,22.12,224,0.875,bicubic,-52.239,-49.806,-29 +vit_small_patch32_224,26.151,73.849,45.104,54.896,22.88,224,0.900,bicubic,-49.839,-48.168,+51 +dpn68,26.129,73.871,44.228,55.772,12.61,224,0.875,bicubic,-50.189,-48.750,+46 +efficientnet_b1,26.061,73.939,44.080,55.920,7.79,256,1.000,bicubic,-52.733,-50.262,-43 +hrnet_w18,25.986,74.014,44.813,55.187,21.30,224,0.875,bilinear,-50.772,-48.631,+33 +hardcorenas_f,25.951,74.049,44.220,55.780,8.20,224,0.875,bilinear,-52.153,-49.582,-13 +regnety_040,25.923,74.077,43.848,56.152,20.65,224,0.875,bicubic,-53.297,-50.808,-63 +resnet34,25.888,74.112,43.982,56.018,21.80,224,0.875,bilinear,-49.222,-48.302,+63 +res2net50_26w_4s,25.866,74.134,43.155,56.845,25.70,224,0.875,bilinear,-52.098,-50.699,-9 +tresnet_m_448,25.852,74.148,42.874,57.126,31.39,448,0.875,bilinear,-55.862,-52.698,-184 +coat_tiny,25.843,74.157,43.276,56.724,5.50,224,0.900,bicubic,-52.591,-50.761,-34 +hardcorenas_c,25.815,74.185,44.772,55.228,5.52,224,0.875,bilinear,-51.239,-48.386,+18 +gluon_resnet50_v1c,25.784,74.216,43.031,56.969,25.58,224,0.875,bicubic,-52.228,-50.957,-16 +selecsls60,25.729,74.272,44.065,55.935,30.67,224,0.875,bicubic,-52.254,-49.764,-15 +hardcorenas_e,25.662,74.338,43.412,56.588,8.07,224,0.875,bilinear,-52.132,-50.282,-8 +dla60_res2net,25.652,74.348,43.599,56.401,20.85,224,0.875,bilinear,-52.812,-50.607,-42 +dla60_res2next,25.640,74.360,43.670,56.330,17.03,224,0.875,bilinear,-52.800,-50.482,-41 +ecaresnet26t,25.538,74.462,43.660,56.340,16.01,320,0.950,bicubic,-54.316,-51.424,-109 +resmlp_12_224,25.518,74.482,44.324,55.676,15.35,224,0.875,bicubic,-51.136,-48.856,+21 +mixnet_l,25.512,74.488,43.455,56.545,7.33,224,0.875,bicubic,-53.464,-50.727,-65 +tf_efficientnet_lite1,25.499,74.501,43.585,56.415,5.42,240,0.882,bicubic,-51.143,-49.641,+20 +tv_resnext50_32x4d,25.455,74.545,42.787,57.213,25.03,224,0.875,bilinear,-52.165,-50.909,-12 +repvgg_a2,25.436,74.564,43.939,56.061,28.21,224,0.875,bilinear,-51.024,-49.065,+25 +tf_mixnet_l,25.422,74.578,42.534,57.466,7.33,224,0.875,bicubic,-53.352,-51.464,-61 +hardcorenas_b,25.402,74.598,44.190,55.810,5.18,224,0.875,bilinear,-51.136,-48.564,+20 +res2next50,25.389,74.611,42.508,57.492,24.67,224,0.875,bilinear,-52.857,-51.384,-40 +legacy_seresnet101,25.334,74.666,42.825,57.175,49.33,224,0.875,bilinear,-53.048,-51.439,-46 +selecsls60b,25.332,74.668,43.559,56.441,32.77,224,0.875,bicubic,-53.080,-50.615,-49 +resnetv2_50x1_bitm,25.324,74.676,45.359,54.641,25.55,448,1.000,bilinear,-55.018,-50.325,-149 +dla102,25.316,74.684,43.827,56.173,33.27,224,0.875,bilinear,-52.716,-50.119,-34 +hardcorenas_d,25.300,74.700,43.121,56.879,7.50,224,0.875,bilinear,-52.132,-50.363,-12 +resnest14d,25.284,74.716,44.114,55.886,10.61,224,0.875,bilinear,-50.222,-48.404,+30 +legacy_seresnext50_32x4d,25.210,74.790,41.936,58.064,27.56,224,0.875,bilinear,-53.868,-52.500,-83 +mixer_b16_224,25.121,74.879,41.227,58.773,59.88,224,0.875,bicubic,-51.481,-51.001,+8 +res2net50_48w_2s,25.027,74.973,42.208,57.792,25.29,224,0.875,bilinear,-52.495,-51.346,-20 +efficientnet_b0,25.015,74.985,42.787,57.213,5.29,224,0.875,bicubic,-52.683,-50.745,-28 +gluon_resnet34_v1b,24.939,75.061,42.243,57.757,21.80,224,0.875,bicubic,-49.649,-49.747,+43 +mobilenetv2_120d,24.937,75.063,43.058,56.942,5.83,224,0.875,bicubic,-52.347,-50.434,-14 +dla60,24.933,75.067,43.296,56.704,22.04,224,0.875,bilinear,-52.099,-50.022,-8 +regnety_016,24.811,75.189,42.616,57.384,11.20,224,0.875,bicubic,-53.051,-51.104,-36 +tf_efficientnet_lite2,24.530,75.470,42.280,57.720,6.09,260,0.890,bicubic,-52.938,-51.474,-24 +skresnet18,24.483,75.517,42.536,57.464,11.96,224,0.875,bicubic,-48.555,-48.632,+51 +regnetx_016,24.473,75.527,42.514,57.486,9.19,224,0.875,bicubic,-52.477,-50.906,-11 +pit_ti_distilled_224,24.406,75.594,42.730,57.270,5.10,224,0.900,bicubic,-50.124,-49.366,+37 +tf_efficientnet_lite0,24.373,75.627,42.487,57.513,4.65,224,0.875,bicubic,-50.457,-49.689,+30 +hardcorenas_a,24.369,75.631,43.284,56.716,5.26,224,0.875,bilinear,-51.547,-49.230,+9 +tv_resnet50,24.070,75.930,41.313,58.687,25.56,224,0.875,bilinear,-52.068,-51.551,+4 +levit_128s,24.058,75.942,41.007,58.993,7.78,224,0.900,bicubic,-52.472,-51.859,-2 +legacy_seresnet34,24.027,75.973,41.909,58.091,21.96,224,0.875,bilinear,-50.781,-50.215,+27 +resnet18d,23.929,76.071,42.300,57.700,11.71,224,0.875,bicubic,-48.331,-48.396,+50 +efficientnet_lite0,23.909,76.091,42.088,57.912,4.65,224,0.875,bicubic,-51.575,-50.422,+12 +tv_densenet121,23.844,76.156,41.925,58.075,7.98,224,0.875,bicubic,-50.894,-50.225,+25 +efficientnet_es_pruned,23.828,76.172,41.995,58.005,5.44,224,0.875,bicubic,-51.172,-50.453,+21 +mobilenetv2_140,23.712,76.288,41.477,58.523,6.11,224,0.875,bicubic,-52.804,-51.519,-7 +mixnet_m,23.710,76.290,41.141,58.859,5.01,224,0.875,bicubic,-53.550,-52.284,-30 +dla34,23.669,76.331,41.551,58.449,15.74,224,0.875,bilinear,-50.961,-50.527,+23 +legacy_seresnet50,23.651,76.349,40.091,59.909,28.09,224,0.875,bilinear,-53.978,-53.657,-48 +ese_vovnet19b_dw,23.535,76.465,41.288,58.712,6.54,224,0.875,bicubic,-53.263,-51.980,-21 +tf_mixnet_m,23.484,76.516,40.989,59.011,5.01,224,0.875,bicubic,-53.458,-52.163,-26 +tv_resnet34,23.473,76.527,41.367,58.633,21.80,224,0.875,bilinear,-49.839,-50.059,+30 +tf_efficientnet_em,23.359,76.641,40.404,59.596,6.90,240,0.882,bicubic,-54.771,-53.640,-69 +selecsls42b,23.357,76.643,40.677,59.323,32.46,224,0.875,bicubic,-53.817,-52.713,-36 +repvgg_b0,23.316,76.684,41.182,58.818,15.82,224,0.875,bilinear,-51.837,-51.236,+5 +mobilenetv2_110d,23.066,76.934,40.716,59.284,4.52,224,0.875,bicubic,-51.970,-51.470,+9 +deit_tiny_distilled_patch16_224,22.718,77.282,40.771,59.229,5.91,224,0.900,bicubic,-51.792,-51.119,+17 +mobilenetv3_large_100,22.655,77.345,40.781,59.219,5.48,224,0.875,bicubic,-53.111,-51.761,-9 +mobilenetv3_rw,22.630,77.370,40.374,59.626,5.48,224,0.875,bicubic,-53.004,-52.334,-8 +tf_mobilenetv3_large_100,22.569,77.431,39.767,60.233,5.48,224,0.875,bilinear,-52.949,-52.839,-7 +tf_efficientnet_es,22.413,77.587,39.095,60.905,5.44,224,0.875,bicubic,-54.180,-54.107,-26 +hrnet_w18_small_v2,22.337,77.663,39.861,60.139,15.60,224,0.875,bilinear,-52.777,-52.555,0 +convit_tiny,22.282,77.718,39.669,60.331,5.71,224,0.875,bicubic,-50.834,-52.045,+21 +regnety_008,22.119,77.881,38.900,61.100,6.26,224,0.875,bicubic,-54.197,-54.166,-22 +seresnext26t_32x4d,21.991,78.009,38.482,61.518,16.81,224,0.875,bicubic,-55.995,-55.264,-75 +regnety_006,21.971,78.029,38.955,61.045,6.06,224,0.875,bicubic,-53.275,-53.577,-7 +vit_tiny_r_s16_p8_384,21.954,78.046,39.405,60.595,6.36,384,1.000,bicubic,-53.998,-53.855,-21 +regnetx_008,21.940,78.060,38.928,61.072,7.26,224,0.875,bicubic,-53.098,-53.408,-4 +resnet26d,21.907,78.094,38.619,61.381,16.01,224,0.875,bicubic,-54.789,-54.531,-38 +semnasnet_100,21.903,78.097,38.600,61.400,3.89,224,0.875,bicubic,-53.545,-54.004,-13 +pit_ti_224,21.875,78.125,39.541,60.459,4.85,224,0.900,bicubic,-51.037,-51.861,+16 +regnetx_006,21.738,78.263,38.904,61.096,6.20,224,0.875,bicubic,-52.115,-52.768,+7 +vit_tiny_patch16_384,21.708,78.292,39.329,60.671,5.79,384,1.000,bicubic,-56.722,-55.213,-103 +vgg19_bn,21.628,78.373,39.283,60.717,143.68,224,0.875,bilinear,-52.587,-52.559,+1 +ghostnet_100,21.620,78.380,38.692,61.308,5.18,224,0.875,bilinear,-52.358,-52.764,+3 +gluon_resnet18_v1b,21.549,78.451,38.869,61.131,11.69,224,0.875,bicubic,-49.287,-50.893,+24 +fbnetc_100,21.484,78.516,38.161,61.839,5.57,224,0.875,bilinear,-53.640,-54.224,-16 +mnasnet_100,21.350,78.650,37.719,62.281,4.38,224,0.875,bicubic,-53.308,-54.395,-8 +resnet26,21.295,78.705,38.018,61.982,16.00,224,0.875,bicubic,-53.997,-54.552,-21 +ssl_resnet18,21.278,78.722,39.113,60.887,11.69,224,0.875,bilinear,-51.332,-52.303,+8 +mixnet_s,21.254,78.746,38.187,61.813,4.13,224,0.875,bicubic,-54.738,-54.609,-37 +seresnext26d_32x4d,21.252,78.748,37.311,62.689,16.81,224,0.875,bicubic,-56.350,-56.297,-79 +legacy_seresnext26_32x4d,21.093,78.907,37.633,62.367,16.79,224,0.875,bicubic,-56.011,-55.683,-63 +regnetx_004,20.898,79.102,37.566,62.434,5.16,224,0.875,bicubic,-51.498,-53.264,+5 +spnasnet_100,20.863,79.137,37.896,62.104,4.42,224,0.875,bilinear,-53.221,-53.922,-9 +legacy_seresnet18,20.837,79.162,37.619,62.381,11.78,224,0.875,bicubic,-50.905,-52.715,+11 +mobilenetv2_100,20.773,79.227,37.759,62.241,3.50,224,0.875,bicubic,-52.197,-53.257,-1 +tf_mixnet_s,20.470,79.530,36.607,63.393,4.13,224,0.875,bicubic,-55.180,-56.021,-38 +vit_tiny_patch16_224,20.458,79.542,37.597,62.403,5.72,224,0.900,bicubic,-54.996,-55.251,-33 +regnety_004,20.415,79.585,37.002,62.998,4.34,224,0.875,bicubic,-53.619,-54.750,-13 +hrnet_w18_small,20.368,79.632,37.093,62.907,13.19,224,0.875,bilinear,-51.974,-53.585,0 +tf_mobilenetv3_large_075,20.366,79.634,36.764,63.236,3.99,224,0.875,bilinear,-53.072,-54.586,-12 +resnet18,20.228,79.772,37.261,62.739,11.69,224,0.875,bilinear,-49.520,-51.817,+11 +mixer_l16_224,20.171,79.829,32.956,67.044,208.20,224,0.875,bicubic,-51.887,-54.712,+1 +deit_tiny_patch16_224,20.162,79.838,37.546,62.454,5.72,224,0.900,bicubic,-52.007,-53.572,-1 +tf_mobilenetv3_large_minimal_100,20.122,79.878,36.908,63.092,3.92,224,0.875,bilinear,-52.126,-53.722,-3 +vgg16_bn,19.959,80.041,36.301,63.699,138.37,224,0.875,bilinear,-53.391,-55.205,-16 +vit_tiny_r_s16_p8_224,19.334,80.666,36.047,63.953,6.34,224,0.900,bicubic,-52.454,-54.781,-2 +vgg19,17.929,82.071,33.054,66.946,143.67,224,0.875,bilinear,-54.439,-57.818,-9 +vgg13_bn,17.802,82.198,34.039,65.961,133.05,224,0.875,bilinear,-53.792,-56.337,-2 +vgg16,17.540,82.460,32.773,67.227,138.36,224,0.875,bilinear,-54.054,-57.609,-2 +regnety_002,17.450,82.550,32.431,67.569,3.16,224,0.875,bicubic,-52.802,-57.109,0 +vgg11_bn,17.403,82.597,33.011,66.989,132.87,224,0.875,bilinear,-52.957,-56.791,-2 +regnetx_002,16.962,83.038,32.225,67.775,2.68,224,0.875,bicubic,-51.800,-56.331,+2 +dla60x_c,16.310,83.690,31.761,68.239,1.32,224,0.875,bilinear,-51.582,-56.665,+3 +tf_mobilenetv3_small_100,16.227,83.772,31.223,68.777,2.54,224,0.875,bilinear,-51.694,-56.441,+1 +vgg13,16.100,83.900,30.985,69.015,133.05,224,0.875,bilinear,-53.826,-58.261,-4 +vgg11,15.728,84.272,30.453,69.547,132.86,224,0.875,bilinear,-53.296,-58.175,-3 +tf_mobilenetv3_small_075,14.944,85.056,29.572,70.428,2.04,224,0.875,bilinear,-50.772,-56.558,+1 +dla46_c,14.657,85.343,29.380,70.620,1.30,224,0.875,bilinear,-50.209,-56.912,+1 +dla46x_c,14.382,85.618,29.191,70.809,1.07,224,0.875,bilinear,-51.588,-57.789,-2 +tf_mobilenetv3_small_minimal_100,13.964,86.036,27.988,72.012,2.04,224,0.875,bilinear,-48.942,-56.242,0 diff --git a/testbed/huggingface__pytorch-image-models/setup.py b/testbed/huggingface__pytorch-image-models/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..882ed467a35a73aa99239d50eedea9c6cb7ce2fb --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/setup.py @@ -0,0 +1,48 @@ +""" Setup +""" +from setuptools import setup, find_packages +from codecs import open +from os import path + +here = path.abspath(path.dirname(__file__)) + +# Get the long description from the README file +with open(path.join(here, 'README.md'), encoding='utf-8') as f: + long_description = f.read() + +exec(open('timm/version.py').read()) +setup( + name='timm', + version=__version__, + description='(Unofficial) PyTorch Image Models', + long_description=long_description, + long_description_content_type='text/markdown', + url='https://github.com/rwightman/pytorch-image-models', + author='Ross Wightman', + author_email='hello@rwightman.com', + classifiers=[ + # How mature is this project? Common values are + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + + # Note that this is a string of words separated by whitespace, not a list. + keywords='pytorch pretrained models efficientnet mobilenetv3 mnasnet', + packages=find_packages(exclude=['convert', 'tests', 'results']), + include_package_data=True, + install_requires=['torch >= 1.4', 'torchvision'], + python_requires='>=3.6', +) diff --git a/testbed/huggingface__pytorch-image-models/tests/__init__.py b/testbed/huggingface__pytorch-image-models/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__pytorch-image-models/tests/test_layers.py b/testbed/huggingface__pytorch-image-models/tests/test_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..508a6aae674196723f970bbca6223378159d9df7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/tests/test_layers.py @@ -0,0 +1,71 @@ +import pytest +import torch +import torch.nn as nn +import platform +import os + +from timm.models.layers import create_act_layer, get_act_layer, set_layer_config + + +class MLP(nn.Module): + def __init__(self, act_layer="relu", inplace=True): + super(MLP, self).__init__() + self.fc1 = nn.Linear(1000, 100) + self.act = create_act_layer(act_layer, inplace=inplace) + self.fc2 = nn.Linear(100, 10) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + +def _run_act_layer_grad(act_type, inplace=True): + x = torch.rand(10, 1000) * 10 + m = MLP(act_layer=act_type, inplace=inplace) + + def _run(x, act_layer=''): + if act_layer: + # replace act layer if set + m.act = create_act_layer(act_layer, inplace=inplace) + out = m(x) + l = (out - 0).pow(2).sum() + return l + + out_me = _run(x) + + with set_layer_config(scriptable=True): + out_jit = _run(x, act_type) + + assert torch.isclose(out_jit, out_me) + + with set_layer_config(no_jit=True): + out_basic = _run(x, act_type) + + assert torch.isclose(out_basic, out_jit) + + +def test_swish_grad(): + for _ in range(100): + _run_act_layer_grad('swish') + + +def test_mish_grad(): + for _ in range(100): + _run_act_layer_grad('mish') + + +def test_hard_sigmoid_grad(): + for _ in range(100): + _run_act_layer_grad('hard_sigmoid', inplace=None) + + +def test_hard_swish_grad(): + for _ in range(100): + _run_act_layer_grad('hard_swish') + + +def test_hard_mish_grad(): + for _ in range(100): + _run_act_layer_grad('hard_mish') diff --git a/testbed/huggingface__pytorch-image-models/tests/test_models.py b/testbed/huggingface__pytorch-image-models/tests/test_models.py new file mode 100644 index 0000000000000000000000000000000000000000..c0d0e9013adb54a27c6300eda2f1c15e6ee922c5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/tests/test_models.py @@ -0,0 +1,299 @@ +import pytest +import torch +import platform +import os +import fnmatch + +import timm +from timm import list_models, create_model, set_scriptable, has_model_default_key, is_model_default_key, \ + get_model_default_value + +if hasattr(torch._C, '_jit_set_profiling_executor'): + # legacy executor is too slow to compile large models for unit tests + # no need for the fusion performance here + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(False) + +# transformer models don't support many of the spatial / feature based model functionalities +NON_STD_FILTERS = [ + 'vit_*', 'tnt_*', 'pit_*', 'swin_*', 'coat_*', 'cait_*', '*mixer_*', 'gmlp_*', 'resmlp_*', 'twins_*', + 'convit_*', 'levit*', 'visformer*', 'deit*', 'jx_nest_*', 'nest_*', 'xcit_*', 'crossvit_*', 'beit_*'] +NUM_NON_STD = len(NON_STD_FILTERS) + +# exclude models that cause specific test failures +if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system(): + # GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models + EXCLUDE_FILTERS = [ + '*efficientnet_l2*', '*resnext101_32x48d', '*in21k', '*152x4_bitm', '*101x3_bitm', '*50x3_bitm', + '*nfnet_f3*', '*nfnet_f4*', '*nfnet_f5*', '*nfnet_f6*', '*nfnet_f7*', '*efficientnetv2_xl*', + '*resnetrs350*', '*resnetrs420*', 'xcit_large_24_p8*'] +else: + EXCLUDE_FILTERS = [] + +TARGET_FWD_SIZE = MAX_FWD_SIZE = 384 +TARGET_BWD_SIZE = 128 +MAX_BWD_SIZE = 320 +MAX_FWD_OUT_SIZE = 448 +TARGET_JIT_SIZE = 128 +MAX_JIT_SIZE = 320 +TARGET_FFEAT_SIZE = 96 +MAX_FFEAT_SIZE = 256 + + +def _get_input_size(model=None, model_name='', target=None): + if model is None: + assert model_name, "One of model or model_name must be provided" + input_size = get_model_default_value(model_name, 'input_size') + fixed_input_size = get_model_default_value(model_name, 'fixed_input_size') + min_input_size = get_model_default_value(model_name, 'min_input_size') + else: + default_cfg = model.default_cfg + input_size = default_cfg['input_size'] + fixed_input_size = default_cfg.get('fixed_input_size', None) + min_input_size = default_cfg.get('min_input_size', None) + assert input_size is not None + + if fixed_input_size: + return input_size + + if min_input_size: + if target and max(input_size) > target: + input_size = min_input_size + else: + if target and max(input_size) > target: + input_size = tuple([min(x, target) for x in input_size]) + return input_size + + +@pytest.mark.timeout(120) +@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_forward(model_name, batch_size): + """Run a single forward pass with each model""" + model = create_model(model_name, pretrained=False) + model.eval() + + input_size = _get_input_size(model=model, target=TARGET_FWD_SIZE) + if max(input_size) > MAX_FWD_SIZE: + pytest.skip("Fixed input size model > limit.") + inputs = torch.randn((batch_size, *input_size)) + outputs = model(inputs) + + assert outputs.shape[0] == batch_size + assert not torch.isnan(outputs).any(), 'Output included NaNs' + + +@pytest.mark.timeout(120) +@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS, name_matches_cfg=True)) +@pytest.mark.parametrize('batch_size', [2]) +def test_model_backward(model_name, batch_size): + """Run a single forward pass with each model""" + input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_SIZE) + if max(input_size) > MAX_BWD_SIZE: + pytest.skip("Fixed input size model > limit.") + + model = create_model(model_name, pretrained=False, num_classes=42) + num_params = sum([x.numel() for x in model.parameters()]) + model.train() + + inputs = torch.randn((batch_size, *input_size)) + outputs = model(inputs) + if isinstance(outputs, tuple): + outputs = torch.cat(outputs) + outputs.mean().backward() + for n, x in model.named_parameters(): + assert x.grad is not None, f'No gradient for {n}' + num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None]) + + assert outputs.shape[-1] == 42 + assert num_params == num_grad, 'Some parameters are missing gradients' + assert not torch.isnan(outputs).any(), 'Output included NaNs' + + +@pytest.mark.timeout(300) +@pytest.mark.parametrize('model_name', list_models(exclude_filters=NON_STD_FILTERS)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_default_cfgs(model_name, batch_size): + """Run a single forward pass with each model""" + model = create_model(model_name, pretrained=False) + model.eval() + state_dict = model.state_dict() + cfg = model.default_cfg + + pool_size = cfg['pool_size'] + input_size = model.default_cfg['input_size'] + + if all([x <= MAX_FWD_OUT_SIZE for x in input_size]) and \ + not any([fnmatch.fnmatch(model_name, x) for x in EXCLUDE_FILTERS]): + # output sizes only checked if default res <= 448 * 448 to keep resource down + input_size = tuple([min(x, MAX_FWD_OUT_SIZE) for x in input_size]) + input_tensor = torch.randn((batch_size, *input_size)) + + # test forward_features (always unpooled) + outputs = model.forward_features(input_tensor) + assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] + + # test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features + model.reset_classifier(0) + outputs = model.forward(input_tensor) + assert len(outputs.shape) == 2 + assert outputs.shape[-1] == model.num_features + + # test model forward without pooling and classifier + model.reset_classifier(0, '') # reset classifier and set global pooling to pass-through + outputs = model.forward(input_tensor) + assert len(outputs.shape) == 4 + if not isinstance(model, timm.models.MobileNetV3) and not isinstance(model, timm.models.GhostNet): + # FIXME mobilenetv3/ghostnet forward_features vs removed pooling differ + assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] + + if 'pruned' not in model_name: # FIXME better pruned model handling + # test classifier + global pool deletion via __init__ + model = create_model(model_name, pretrained=False, num_classes=0, global_pool='').eval() + outputs = model.forward(input_tensor) + assert len(outputs.shape) == 4 + if not isinstance(model, timm.models.MobileNetV3) and not isinstance(model, timm.models.GhostNet): + # FIXME mobilenetv3/ghostnet forward_features vs removed pooling differ + assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] + + # check classifier name matches default_cfg + classifier = cfg['classifier'] + if not isinstance(classifier, (tuple, list)): + classifier = classifier, + for c in classifier: + assert c + ".weight" in state_dict.keys(), f'{c} not in model params' + + # check first conv(s) names match default_cfg + first_conv = cfg['first_conv'] + if isinstance(first_conv, str): + first_conv = (first_conv,) + assert isinstance(first_conv, (tuple, list)) + for fc in first_conv: + assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params' + + +@pytest.mark.timeout(300) +@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_default_cfgs_non_std(model_name, batch_size): + """Run a single forward pass with each model""" + model = create_model(model_name, pretrained=False) + model.eval() + state_dict = model.state_dict() + cfg = model.default_cfg + + input_size = _get_input_size(model=model) + if max(input_size) > 320: # FIXME const + pytest.skip("Fixed input size model > limit.") + + input_tensor = torch.randn((batch_size, *input_size)) + + outputs = model.forward_features(input_tensor) + if isinstance(outputs, (tuple, list)): + outputs = outputs[0] + assert outputs.shape[1] == model.num_features + + # test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features + model.reset_classifier(0) + outputs = model.forward(input_tensor) + if isinstance(outputs, (tuple, list)): + outputs = outputs[0] + assert len(outputs.shape) == 2 + assert outputs.shape[1] == model.num_features + + model = create_model(model_name, pretrained=False, num_classes=0).eval() + outputs = model.forward(input_tensor) + if isinstance(outputs, (tuple, list)): + outputs = outputs[0] + assert len(outputs.shape) == 2 + assert outputs.shape[1] == model.num_features + + # check classifier name matches default_cfg + classifier = cfg['classifier'] + if not isinstance(classifier, (tuple, list)): + classifier = classifier, + for c in classifier: + assert c + ".weight" in state_dict.keys(), f'{c} not in model params' + + # check first conv(s) names match default_cfg + first_conv = cfg['first_conv'] + if isinstance(first_conv, str): + first_conv = (first_conv,) + assert isinstance(first_conv, (tuple, list)) + for fc in first_conv: + assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params' + + +if 'GITHUB_ACTIONS' not in os.environ: + @pytest.mark.timeout(120) + @pytest.mark.parametrize('model_name', list_models(pretrained=True)) + @pytest.mark.parametrize('batch_size', [1]) + def test_model_load_pretrained(model_name, batch_size): + """Create that pretrained weights load, verify support for in_chans != 3 while doing so.""" + in_chans = 3 if 'pruned' in model_name else 1 # pruning not currently supported with in_chans change + create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=5) + create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=0) + + @pytest.mark.timeout(120) + @pytest.mark.parametrize('model_name', list_models(pretrained=True, exclude_filters=NON_STD_FILTERS)) + @pytest.mark.parametrize('batch_size', [1]) + def test_model_features_pretrained(model_name, batch_size): + """Create that pretrained weights load when features_only==True.""" + create_model(model_name, pretrained=True, features_only=True) + +EXCLUDE_JIT_FILTERS = [ + '*iabn*', 'tresnet*', # models using inplace abn unlikely to ever be scriptable + 'dla*', 'hrnet*', 'ghostnet*', # hopefully fix at some point + 'vit_large_*', 'vit_huge_*', +] + + +@pytest.mark.timeout(120) +@pytest.mark.parametrize( + 'model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS, name_matches_cfg=True)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_forward_torchscript(model_name, batch_size): + """Run a single forward pass with each model""" + input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE) + if max(input_size) > MAX_JIT_SIZE: + pytest.skip("Fixed input size model > limit.") + + with set_scriptable(True): + model = create_model(model_name, pretrained=False) + model.eval() + + model = torch.jit.script(model) + outputs = model(torch.randn((batch_size, *input_size))) + + assert outputs.shape[0] == batch_size + assert not torch.isnan(outputs).any(), 'Output included NaNs' + + +EXCLUDE_FEAT_FILTERS = [ + '*pruned*', # hopefully fix at some point +] + NON_STD_FILTERS +if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system(): + # GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models + EXCLUDE_FEAT_FILTERS += ['*resnext101_32x32d', '*resnext101_32x16d'] + + +@pytest.mark.timeout(120) +@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FEAT_FILTERS)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_forward_features(model_name, batch_size): + """Run a single forward pass with each model in feature extraction mode""" + model = create_model(model_name, pretrained=False, features_only=True) + model.eval() + expected_channels = model.feature_info.channels() + assert len(expected_channels) >= 4 # all models here should have at least 4 feature levels by default, some 5 or 6 + + input_size = _get_input_size(model=model, target=TARGET_FFEAT_SIZE) + if max(input_size) > MAX_FFEAT_SIZE: + pytest.skip("Fixed input size model > limit.") + + outputs = model(torch.randn((batch_size, *input_size))) + assert len(expected_channels) == len(outputs) + for e, o in zip(expected_channels, outputs): + assert e == o.shape[1] + assert o.shape[0] == batch_size + assert not torch.isnan(o).any() diff --git a/testbed/huggingface__pytorch-image-models/tests/test_optim.py b/testbed/huggingface__pytorch-image-models/tests/test_optim.py new file mode 100644 index 0000000000000000000000000000000000000000..737674e5cf69876fc6fecc7bbccb5ea11daf2a66 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/tests/test_optim.py @@ -0,0 +1,733 @@ +""" Optimzier Tests + +These tests were adapted from PyTorch' optimizer tests. + +""" +import math +import pytest +import functools +from copy import deepcopy + +import torch +from torch.testing._internal.common_utils import TestCase +from torch.autograd import Variable +from timm.scheduler import PlateauLRScheduler + +from timm.optim import create_optimizer_v2 + + +# HACK relying on internal PyTorch test functionality for comparisons that I don't want to write +torch_tc = TestCase() + + +def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors): + weight = Variable(weight, requires_grad=True) + bias = Variable(bias, requires_grad=True) + input = Variable(input) + optimizer = constructor(weight, bias) + schedulers = [] + for scheduler_constructor in scheduler_constructors: + schedulers.append(scheduler_constructor(optimizer)) + + # to check if the optimizer can be printed as a string + optimizer.__repr__() + + def fn(): + optimizer.zero_grad() + y = weight.mv(input) + if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device(): + y = y.cuda(bias.get_device()) + loss = (y + bias).pow(2).sum() + loss.backward() + return loss + + initial_value = fn().item() + for _i in range(200): + for scheduler in schedulers: + if isinstance(scheduler, PlateauLRScheduler): + val_loss = fn() + scheduler.step(val_loss) + else: + scheduler.step() + optimizer.step(fn) + + assert fn().item() < initial_value + + +def _test_state_dict(weight, bias, input, constructor): + weight = Variable(weight, requires_grad=True) + bias = Variable(bias, requires_grad=True) + input = Variable(input) + + def fn_base(optimizer, weight, bias): + optimizer.zero_grad() + i = input_cuda if weight.is_cuda else input + loss = (weight.mv(i) + bias).pow(2).sum() + loss.backward() + return loss + + optimizer = constructor(weight, bias) + fn = functools.partial(fn_base, optimizer, weight, bias) + + # Prime the optimizer + for _i in range(20): + optimizer.step(fn) + # Clone the weights and construct new optimizer for them + weight_c = Variable(weight.data.clone(), requires_grad=True) + bias_c = Variable(bias.data.clone(), requires_grad=True) + optimizer_c = constructor(weight_c, bias_c) + fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c) + # Load state dict + state_dict = deepcopy(optimizer.state_dict()) + state_dict_c = deepcopy(optimizer.state_dict()) + optimizer_c.load_state_dict(state_dict_c) + + # Run both optimizations in parallel + for _i in range(20): + optimizer.step(fn) + optimizer_c.step(fn_c) + #assert torch.equal(weight, weight_c) + #assert torch.equal(bias, bias_c) + torch_tc.assertEqual(weight, weight_c) + torch_tc.assertEqual(bias, bias_c) + # Make sure state dict wasn't modified + torch_tc.assertEqual(state_dict, state_dict_c) + # Make sure state dict is deterministic with equal but not identical parameters + torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict()) + # Make sure repeated parameters have identical representation in state dict + optimizer_c.param_groups.extend(optimizer_c.param_groups) + torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1]) + + # Check that state dict can be loaded even when we cast parameters + # to a different type and move to a different device. + if not torch.cuda.is_available(): + return + + input_cuda = Variable(input.data.float().cuda()) + weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True) + bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True) + optimizer_cuda = constructor(weight_cuda, bias_cuda) + fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda) + + state_dict = deepcopy(optimizer.state_dict()) + state_dict_c = deepcopy(optimizer.state_dict()) + optimizer_cuda.load_state_dict(state_dict_c) + + # Make sure state dict wasn't modified + torch_tc.assertEqual(state_dict, state_dict_c) + + for _i in range(20): + optimizer.step(fn) + optimizer_cuda.step(fn_cuda) + torch_tc.assertEqual(weight, weight_cuda) + torch_tc.assertEqual(bias, bias_cuda) + + # validate deepcopy() copies all public attributes + def getPublicAttr(obj): + return set(k for k in obj.__dict__ if not k.startswith('_')) + + assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer)) + + +def _test_basic_cases(constructor, scheduler_constructors=None): + if scheduler_constructors is None: + scheduler_constructors = [] + _test_state_dict( + torch.randn(10, 5), + torch.randn(10), + torch.randn(5), + constructor + ) + _test_basic_cases_template( + torch.randn(10, 5), + torch.randn(10), + torch.randn(5), + constructor, + scheduler_constructors + ) + # non-contiguous parameters + _test_basic_cases_template( + torch.randn(10, 5, 2)[..., 0], + torch.randn(10, 2)[..., 0], + torch.randn(5), + constructor, + scheduler_constructors + ) + # CUDA + if not torch.cuda.is_available(): + return + _test_basic_cases_template( + torch.randn(10, 5).cuda(), + torch.randn(10).cuda(), + torch.randn(5).cuda(), + constructor, + scheduler_constructors + ) + + +def _test_model(optimizer, params, device=torch.device('cpu')): + weight = torch.tensor( + [[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]], + device=device, requires_grad=True) + bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True) + weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True) + bias2 = torch.tensor([-0.0711], device=device, requires_grad=True) + input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2) + + model = torch.nn.Sequential(torch.nn.Linear(2, 3), + torch.nn.Sigmoid(), + torch.nn.Linear(3, 1), + torch.nn.Sigmoid()) + model.to(device) + + pretrained_dict = model.state_dict() + pretrained_dict['0.weight'] = weight + pretrained_dict['0.bias'] = bias + pretrained_dict['2.weight'] = weight2 + pretrained_dict['2.bias'] = bias2 + model.load_state_dict(pretrained_dict) + + optimizer = create_optimizer_v2(model, opt=optimizer, **params) + + prev_loss = float('inf') + for i in range(20): + optimizer.zero_grad() + output = model(input) + loss = output.sum() + loss.backward() + loss = loss.item() + assert loss < prev_loss + prev_loss = loss + optimizer.step() + + +def rosenbrock(tensor): + x, y = tensor + return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 + + +def drosenbrock(tensor): + x, y = tensor + return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2))) + + +def _test_rosenbrock(constructor, scheduler_constructors=None): + if scheduler_constructors is None: + scheduler_constructors = [] + params_t = torch.tensor([1.5, 1.5]) + + params = Variable(params_t, requires_grad=True) + optimizer = constructor([params]) + schedulers = [] + for scheduler_constructor in scheduler_constructors: + schedulers.append(scheduler_constructor(optimizer)) + + solution = torch.tensor([1, 1]) + initial_dist = params.data.dist(solution) + + def eval(params, w): + # Depending on w, provide only the x or y gradient + optimizer.zero_grad() + loss = rosenbrock(params) + loss.backward() + grad = drosenbrock(params.data) + # NB: We torture test the optimizer by returning an + # uncoalesced sparse tensor + if w: + i = torch.LongTensor([[0, 0]]) + x = grad[0] + v = torch.tensor([x / 4., x - x / 4.]) + else: + i = torch.LongTensor([[1, 1]]) + y = grad[1] + v = torch.tensor([y - y / 4., y / 4.]) + x = torch.sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype) + with torch.no_grad(): + params.grad = x.to_dense() + return loss + + for i in range(2000): + # Do cyclic coordinate descent + w = i % 2 + optimizer.step(functools.partial(eval, params, w)) + for scheduler in schedulers: + if isinstance(scheduler, PlateauLRScheduler): + scheduler.step(rosenbrock(params)) + else: + scheduler.step() + + torch_tc.assertLessEqual(params.data.dist(solution), initial_dist) + + +def _build_params_dict(weight, bias, **kwargs): + return [{'params': [weight]}, dict(params=[bias], **kwargs)] + + +def _build_params_dict_single(weight, bias, **kwargs): + return [dict(params=bias, **kwargs)] + + +#@pytest.mark.parametrize('optimizer', ['sgd', 'momentum']) +# FIXME momentum variant frequently fails in GitHub runner, but never local after many attempts +@pytest.mark.parametrize('optimizer', ['sgd']) +def test_sgd(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=1e-2), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-2), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-2), optimizer) + ) + # _test_basic_cases( + # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), + # [lambda opt: StepLR(opt, gamma=0.9, step_size=10)] + # ) + # _test_basic_cases( + # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), + # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="linear")] + # ) + # _test_basic_cases( + # lambda weight, bias: optimizer([weight, bias], lr=1e-3), + # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="constant")] + # ) + # _test_basic_cases( + # lambda weight, bias: optimizer([weight, bias], lr=1e-3), + # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), + # lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4)] + # ) + # _test_basic_cases( + # lambda weight, bias: optimizer([weight, bias], lr=1e-3), + # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), + # lambda opt: ReduceLROnPlateau(opt)] + # ) + # _test_basic_cases( + # lambda weight, bias: optimizer([weight, bias], lr=1e-3), + # [lambda opt: StepLR(opt, gamma=0.99, step_size=10), + # lambda opt: ExponentialLR(opt, gamma=0.99), + # lambda opt: ReduceLROnPlateau(opt)] + # ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1, weight_decay=.1) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['adamw', 'adam', 'nadam', 'adamax']) +def test_adam(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['adabelief']) +def test_adabelief(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['radam', 'radabelief']) +def test_rectified(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['adadelta', 'adagrad']) +def test_adaother(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-1) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['adafactor']) +def test_adafactor(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2(_build_params_dict_single(weight, bias), optimizer) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['lamb', 'lambc']) +def test_lamb(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=1e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['lars', 'larc', 'nlars', 'nlarc']) +def test_lars(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=1e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['madgrad', 'madgradw']) +def test_madgrad(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) + ) + _test_model(optimizer, dict(lr=1e-2)) + + +@pytest.mark.parametrize('optimizer', ['novograd']) +def test_novograd(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['rmsprop', 'rmsproptf']) +def test_rmsprop(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) + ) + _test_model(optimizer, dict(lr=1e-2)) + + +@pytest.mark.parametrize('optimizer', ['adamp']) +def test_adamp(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['sgdp']) +def test_sgdp(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['lookahead_sgd', 'lookahead_momentum']) +def test_lookahead_sgd(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + + +@pytest.mark.parametrize('optimizer', ['lookahead_adamw', 'lookahead_adam']) +def test_lookahead_adam(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + + +@pytest.mark.parametrize('optimizer', ['lookahead_radam']) +def test_lookahead_radam(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-4) + ) + diff --git a/testbed/huggingface__pytorch-image-models/timm/data/__init__.py b/testbed/huggingface__pytorch-image-models/timm/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d3cb2b4d7e823aabb1d55781149579eeb94b024 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/__init__.py @@ -0,0 +1,12 @@ +from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\ + rand_augment_transform, auto_augment_transform +from .config import resolve_data_config +from .constants import * +from .dataset import ImageDataset, IterableImageDataset, AugMixDataset +from .dataset_factory import create_dataset +from .loader import create_loader +from .mixup import Mixup, FastCollateMixup +from .parsers import create_parser +from .real_labels import RealLabelsImagenet +from .transforms import * +from .transforms_factory import create_transform \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/data/auto_augment.py b/testbed/huggingface__pytorch-image-models/timm/data/auto_augment.py new file mode 100644 index 0000000000000000000000000000000000000000..7d80d702e5e5ff775e3279f1ec2c57b0fc6410f2 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/auto_augment.py @@ -0,0 +1,833 @@ +""" AutoAugment, RandAugment, and AugMix for PyTorch + +This code implements the searched ImageNet policies with various tweaks and improvements and +does not include any of the search code. + +AA and RA Implementation adapted from: + https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py + +AugMix adapted from: + https://github.com/google-research/augmix + +Papers: + AutoAugment: Learning Augmentation Policies from Data - https://arxiv.org/abs/1805.09501 + Learning Data Augmentation Strategies for Object Detection - https://arxiv.org/abs/1906.11172 + RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 + AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import random +import math +import re +from PIL import Image, ImageOps, ImageEnhance, ImageChops +import PIL +import numpy as np + + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) + +_FILL = (128, 128, 128) + +_LEVEL_DENOM = 10. # denominator for conversion from 'Mx' magnitude scale to fractional aug level for op arguments + +_HPARAMS_DEFAULT = dict( + translate_const=250, + img_mean=_FILL, +) + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _interpolation(kwargs): + interpolation = kwargs.pop('resample', Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if 'fillcolor' in kwargs and _PIL_VER < (5, 0): + kwargs.pop('fillcolor') + kwargs['resample'] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs['resample']) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _LEVEL_DENOM) * 30. + level = _randomly_negate(level) + return level, + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return (level / _LEVEL_DENOM) * 1.8 + 0.1, + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] if level <= _LEVEL_DENOM + level = (level / _LEVEL_DENOM) * .9 + level = max(0.1, 1.0 + _randomly_negate(level)) # keep it >= 0.1 + return level, + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _LEVEL_DENOM) * 0.3 + level = _randomly_negate(level) + return level, + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams['translate_const'] + level = (level / _LEVEL_DENOM) * float(translate_const) + level = _randomly_negate(level) + return level, + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get('translate_pct', 0.45) + level = (level / _LEVEL_DENOM) * translate_pct + level = _randomly_negate(level) + return level, + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 4), + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return 4 - _posterize_level_to_arg(level, hparams)[0], + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 4) + 4, + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 256), + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return 256 - _solarize_level_to_arg(level, _hparams)[0], + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return int((level / _LEVEL_DENOM) * 110), + + +LEVEL_TO_ARG = { + 'AutoContrast': None, + 'Equalize': None, + 'Invert': None, + 'Rotate': _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + 'Posterize': _posterize_level_to_arg, + 'PosterizeIncreasing': _posterize_increasing_level_to_arg, + 'PosterizeOriginal': _posterize_original_level_to_arg, + 'Solarize': _solarize_level_to_arg, + 'SolarizeIncreasing': _solarize_increasing_level_to_arg, + 'SolarizeAdd': _solarize_add_level_to_arg, + 'Color': _enhance_level_to_arg, + 'ColorIncreasing': _enhance_increasing_level_to_arg, + 'Contrast': _enhance_level_to_arg, + 'ContrastIncreasing': _enhance_increasing_level_to_arg, + 'Brightness': _enhance_level_to_arg, + 'BrightnessIncreasing': _enhance_increasing_level_to_arg, + 'Sharpness': _enhance_level_to_arg, + 'SharpnessIncreasing': _enhance_increasing_level_to_arg, + 'ShearX': _shear_level_to_arg, + 'ShearY': _shear_level_to_arg, + 'TranslateX': _translate_abs_level_to_arg, + 'TranslateY': _translate_abs_level_to_arg, + 'TranslateXRel': _translate_rel_level_to_arg, + 'TranslateYRel': _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + 'AutoContrast': auto_contrast, + 'Equalize': equalize, + 'Invert': invert, + 'Rotate': rotate, + 'Posterize': posterize, + 'PosterizeIncreasing': posterize, + 'PosterizeOriginal': posterize, + 'Solarize': solarize, + 'SolarizeIncreasing': solarize, + 'SolarizeAdd': solarize_add, + 'Color': color, + 'ColorIncreasing': color, + 'Contrast': contrast, + 'ContrastIncreasing': contrast, + 'Brightness': brightness, + 'BrightnessIncreasing': brightness, + 'Sharpness': sharpness, + 'SharpnessIncreasing': sharpness, + 'ShearX': shear_x, + 'ShearY': shear_y, + 'TranslateX': translate_x_abs, + 'TranslateY': translate_y_abs, + 'TranslateXRel': translate_x_rel, + 'TranslateYRel': translate_y_rel, +} + + +class AugmentOp: + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = dict( + fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, + resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION, + ) + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + # If magnitude_std is inf, we sample magnitude from a uniform distribution + self.magnitude_std = self.hparams.get('magnitude_std', 0) + self.magnitude_max = self.hparams.get('magnitude_max', None) + + def __call__(self, img): + if self.prob < 1.0 and random.random() > self.prob: + return img + magnitude = self.magnitude + if self.magnitude_std > 0: + # magnitude randomization enabled + if self.magnitude_std == float('inf'): + magnitude = random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + # default upper_bound for the timm RA impl is _LEVEL_DENOM (10) + # setting magnitude_max overrides this to allow M > 10 (behaviour closer to Google TF RA impl) + upper_bound = self.magnitude_max or _LEVEL_DENOM + magnitude = max(0., min(magnitude, upper_bound)) + level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() + return self.aug_fn(img, *level_args, **self.kwargs) + + +def auto_augment_policy_v0(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_v0r(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used + # in Google research implementation (number of bits discarded increases with magnitude) + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_original(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 + policy = [ + [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_originalr(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation + policy = [ + [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy(name='v0', hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + if name == 'original': + return auto_augment_policy_original(hparams) + elif name == 'originalr': + return auto_augment_policy_originalr(hparams) + elif name == 'v0': + return auto_augment_policy_v0(hparams) + elif name == 'v0r': + return auto_augment_policy_v0r(hparams) + else: + assert False, 'Unknown AA policy (%s)' % name + + +class AutoAugment: + + def __init__(self, policy): + self.policy = policy + + def __call__(self, img): + sub_policy = random.choice(self.policy) + for op in sub_policy: + img = op(img) + return img + + +def auto_augment_transform(config_str, hparams): + """ + Create a AutoAugment transform + + :param config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr'). + The remaining sections, not order sepecific determine + 'mstd' - float std deviation of magnitude noise applied + Ex 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5 + + :param hparams: Other hparams (kwargs) for the AutoAugmentation scheme + + :return: A PyTorch compatible Transform + """ + config = config_str.split('-') + policy_name = config[0] + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + else: + assert False, 'Unknown AutoAugment config section' + aa_policy = auto_augment_policy(policy_name, hparams=hparams) + return AutoAugment(aa_policy) + + +_RAND_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'Posterize', + 'Solarize', + 'SolarizeAdd', + 'Color', + 'Contrast', + 'Brightness', + 'Sharpness', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + +_RAND_INCREASING_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'SolarizeAdd', + 'ColorIncreasing', + 'ContrastIncreasing', + 'BrightnessIncreasing', + 'SharpnessIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + 'Rotate': 0.3, + 'ShearX': 0.2, + 'ShearY': 0.2, + 'TranslateXRel': 0.1, + 'TranslateYRel': 0.1, + 'Color': .025, + 'Sharpness': 0.025, + 'AutoContrast': 0.025, + 'Solarize': .005, + 'SolarizeAdd': .005, + 'Contrast': .005, + 'Brightness': .005, + 'Equalize': .005, + 'Posterize': 0, + 'Invert': 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [AugmentOp( + name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights) + for op in ops: + img = op(img) + return img + + +def rand_augment_transform(config_str, hparams): + """ + Create a RandAugment transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied, or uniform sampling if infinity (or > 100) + 'mmax' - set upper bound for magnitude to something other than default of _LEVEL_DENOM (10) + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + + :return: A PyTorch compatible Transform + """ + magnitude = _LEVEL_DENOM # default to _LEVEL_DENOM for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split('-') + assert config[0] == 'rand' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param / randomization of magnitude values + mstd = float(val) + if mstd > 100: + # use uniform sampling in 0 to magnitude if mstd is > 100 + mstd = float('inf') + hparams.setdefault('magnitude_std', mstd) + elif key == 'mmax': + # clip magnitude between [0, mmax] instead of default [0, _LEVEL_DENOM] + hparams.setdefault('magnitude_max', int(val)) + elif key == 'inc': + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == 'm': + magnitude = int(val) + elif key == 'n': + num_layers = int(val) + elif key == 'w': + weight_idx = int(val) + else: + assert False, 'Unknown RandAugment config section' + ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) + choice_weights = None if weight_idx is None else _select_rand_weights(weight_idx) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) + + +_AUGMIX_TRANSFORMS = [ + 'AutoContrast', + 'ColorIncreasing', # not in paper + 'ContrastIncreasing', # not in paper + 'BrightnessIncreasing', # not in paper + 'SharpnessIncreasing', # not in paper + 'Equalize', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', +] + + +def augmix_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _AUGMIX_TRANSFORMS + return [AugmentOp( + name, prob=1.0, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class AugMixAugment: + """ AugMix Transform + Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + """ + def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False): + self.ops = ops + self.alpha = alpha + self.width = width + self.depth = depth + self.blended = blended # blended mode is faster but not well tested + + def _calc_blended_weights(self, ws, m): + ws = ws * m + cump = 1. + rws = [] + for w in ws[::-1]: + alpha = w / cump + cump *= (1 - alpha) + rws.append(alpha) + return np.array(rws[::-1], dtype=np.float32) + + def _apply_blended(self, img, mixing_weights, m): + # This is my first crack and implementing a slightly faster mixed augmentation. Instead + # of accumulating the mix for each chain in a Numpy array and then blending with original, + # it recomputes the blending coefficients and applies one PIL image blend per chain. + # TODO the results appear in the right ballpark but they differ by more than rounding. + img_orig = img.copy() + ws = self._calc_blended_weights(mixing_weights, m) + for w in ws: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img_orig # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + img = Image.blend(img, img_aug, w) + return img + + def _apply_basic(self, img, mixing_weights, m): + # This is a literal adaptation of the paper/official implementation without normalizations and + # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the + # typical augmentation transforms, could use a GPU / Kornia implementation. + img_shape = img.size[0], img.size[1], len(img.getbands()) + mixed = np.zeros(img_shape, dtype=np.float32) + for mw in mixing_weights: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + mixed += mw * np.asarray(img_aug, dtype=np.float32) + np.clip(mixed, 0, 255., out=mixed) + mixed = Image.fromarray(mixed.astype(np.uint8)) + return Image.blend(img, mixed, m) + + def __call__(self, img): + mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) + m = np.float32(np.random.beta(self.alpha, self.alpha)) + if self.blended: + mixed = self._apply_blended(img, mixing_weights, m) + else: + mixed = self._apply_basic(img, mixing_weights, m) + return mixed + + +def augment_and_mix_transform(config_str, hparams): + """ Create AugMix PyTorch transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude (severity) of augmentation mix (default: 3) + 'w' - integer width of augmentation chain (default: 3) + 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1) + 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0) + 'mstd' - float std deviation of magnitude noise applied (default: 0) + Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2 + + :param hparams: Other hparams (kwargs) for the Augmentation transforms + + :return: A PyTorch compatible Transform + """ + magnitude = 3 + width = 3 + depth = -1 + alpha = 1. + blended = False + config = config_str.split('-') + assert config[0] == 'augmix' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + elif key == 'm': + magnitude = int(val) + elif key == 'w': + width = int(val) + elif key == 'd': + depth = int(val) + elif key == 'a': + alpha = float(val) + elif key == 'b': + blended = bool(val) + else: + assert False, 'Unknown AugMix config section' + hparams.setdefault('magnitude_std', float('inf')) # default to uniform sampling (if not set via mstd arg) + ops = augmix_ops(magnitude=magnitude, hparams=hparams) + return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended) diff --git a/testbed/huggingface__pytorch-image-models/timm/data/config.py b/testbed/huggingface__pytorch-image-models/timm/data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..38f5689a707f5602e38cb717ed6115f26a0d7ea2 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/config.py @@ -0,0 +1,78 @@ +import logging +from .constants import * + + +_logger = logging.getLogger(__name__) + + +def resolve_data_config(args, default_cfg={}, model=None, use_test_size=False, verbose=False): + new_config = {} + default_cfg = default_cfg + if not default_cfg and model is not None and hasattr(model, 'default_cfg'): + default_cfg = model.default_cfg + + # Resolve input/image size + in_chans = 3 + if 'chans' in args and args['chans'] is not None: + in_chans = args['chans'] + + input_size = (in_chans, 224, 224) + if 'input_size' in args and args['input_size'] is not None: + assert isinstance(args['input_size'], (tuple, list)) + assert len(args['input_size']) == 3 + input_size = tuple(args['input_size']) + in_chans = input_size[0] # input_size overrides in_chans + elif 'img_size' in args and args['img_size'] is not None: + assert isinstance(args['img_size'], int) + input_size = (in_chans, args['img_size'], args['img_size']) + else: + if use_test_size and 'test_input_size' in default_cfg: + input_size = default_cfg['test_input_size'] + elif 'input_size' in default_cfg: + input_size = default_cfg['input_size'] + new_config['input_size'] = input_size + + # resolve interpolation method + new_config['interpolation'] = 'bicubic' + if 'interpolation' in args and args['interpolation']: + new_config['interpolation'] = args['interpolation'] + elif 'interpolation' in default_cfg: + new_config['interpolation'] = default_cfg['interpolation'] + + # resolve dataset + model mean for normalization + new_config['mean'] = IMAGENET_DEFAULT_MEAN + if 'mean' in args and args['mean'] is not None: + mean = tuple(args['mean']) + if len(mean) == 1: + mean = tuple(list(mean) * in_chans) + else: + assert len(mean) == in_chans + new_config['mean'] = mean + elif 'mean' in default_cfg: + new_config['mean'] = default_cfg['mean'] + + # resolve dataset + model std deviation for normalization + new_config['std'] = IMAGENET_DEFAULT_STD + if 'std' in args and args['std'] is not None: + std = tuple(args['std']) + if len(std) == 1: + std = tuple(list(std) * in_chans) + else: + assert len(std) == in_chans + new_config['std'] = std + elif 'std' in default_cfg: + new_config['std'] = default_cfg['std'] + + # resolve default crop percentage + new_config['crop_pct'] = DEFAULT_CROP_PCT + if 'crop_pct' in args and args['crop_pct'] is not None: + new_config['crop_pct'] = args['crop_pct'] + elif 'crop_pct' in default_cfg: + new_config['crop_pct'] = default_cfg['crop_pct'] + + if verbose: + _logger.info('Data processing configuration for current model + dataset:') + for n, v in new_config.items(): + _logger.info('\t%s: %s' % (n, str(v))) + + return new_config diff --git a/testbed/huggingface__pytorch-image-models/timm/data/constants.py b/testbed/huggingface__pytorch-image-models/timm/data/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d4a01b0316989a3f5142167f1e384b098bc930 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/constants.py @@ -0,0 +1,7 @@ +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) +IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) +IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) diff --git a/testbed/huggingface__pytorch-image-models/timm/data/dataset.py b/testbed/huggingface__pytorch-image-models/timm/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..e719f3f6d7db178eb29fd902e85b64ac5ec09dd8 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/dataset.py @@ -0,0 +1,146 @@ +""" Quick n Simple Image Folder, Tarfile based DataSet + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.utils.data as data +import os +import torch +import logging + +from PIL import Image + +from .parsers import create_parser + +_logger = logging.getLogger(__name__) + + +_ERROR_RETRY = 50 + + +class ImageDataset(data.Dataset): + + def __init__( + self, + root, + parser=None, + class_map='', + load_bytes=False, + transform=None, + ): + if parser is None or isinstance(parser, str): + parser = create_parser(parser or '', root=root, class_map=class_map) + self.parser = parser + self.load_bytes = load_bytes + self.transform = transform + self._consecutive_errors = 0 + + def __getitem__(self, index): + img, target = self.parser[index] + try: + img = img.read() if self.load_bytes else Image.open(img).convert('RGB') + except Exception as e: + _logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}') + self._consecutive_errors += 1 + if self._consecutive_errors < _ERROR_RETRY: + return self.__getitem__((index + 1) % len(self.parser)) + else: + raise e + self._consecutive_errors = 0 + if self.transform is not None: + img = self.transform(img) + if target is None: + target = torch.tensor(-1, dtype=torch.long) + return img, target + + def __len__(self): + return len(self.parser) + + def filename(self, index, basename=False, absolute=False): + return self.parser.filename(index, basename, absolute) + + def filenames(self, basename=False, absolute=False): + return self.parser.filenames(basename, absolute) + + +class IterableImageDataset(data.IterableDataset): + + def __init__( + self, + root, + parser=None, + split='train', + is_training=False, + batch_size=None, + class_map='', + load_bytes=False, + repeats=0, + transform=None, + ): + assert parser is not None + if isinstance(parser, str): + self.parser = create_parser( + parser, root=root, split=split, is_training=is_training, batch_size=batch_size, repeats=repeats) + else: + self.parser = parser + self.transform = transform + self._consecutive_errors = 0 + + def __iter__(self): + for img, target in self.parser: + if self.transform is not None: + img = self.transform(img) + if target is None: + target = torch.tensor(-1, dtype=torch.long) + yield img, target + + def __len__(self): + if hasattr(self.parser, '__len__'): + return len(self.parser) + else: + return 0 + + def filename(self, index, basename=False, absolute=False): + assert False, 'Filename lookup by index not supported, use filenames().' + + def filenames(self, basename=False, absolute=False): + return self.parser.filenames(basename, absolute) + + +class AugMixDataset(torch.utils.data.Dataset): + """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" + + def __init__(self, dataset, num_splits=2): + self.augmentation = None + self.normalize = None + self.dataset = dataset + if self.dataset.transform is not None: + self._set_transforms(self.dataset.transform) + self.num_splits = num_splits + + def _set_transforms(self, x): + assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' + self.dataset.transform = x[0] + self.augmentation = x[1] + self.normalize = x[2] + + @property + def transform(self): + return self.dataset.transform + + @transform.setter + def transform(self, x): + self._set_transforms(x) + + def _normalize(self, x): + return x if self.normalize is None else self.normalize(x) + + def __getitem__(self, i): + x, y = self.dataset[i] # all splits share the same dataset base transform + x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) + # run the full augmentation on the remaining splits + for _ in range(self.num_splits - 1): + x_list.append(self._normalize(self.augmentation(x))) + return tuple(x_list), y + + def __len__(self): + return len(self.dataset) diff --git a/testbed/huggingface__pytorch-image-models/timm/data/dataset_factory.py b/testbed/huggingface__pytorch-image-models/timm/data/dataset_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..ccc99d5c2c19b480a30cad74dacccceff24df61e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/dataset_factory.py @@ -0,0 +1,30 @@ +import os + +from .dataset import IterableImageDataset, ImageDataset + + +def _search_split(root, split): + # look for sub-folder with name of split in root and use that if it exists + split_name = split.split('[')[0] + try_root = os.path.join(root, split_name) + if os.path.exists(try_root): + return try_root + if split_name == 'validation': + try_root = os.path.join(root, 'val') + if os.path.exists(try_root): + return try_root + return root + + +def create_dataset(name, root, split='validation', search_split=True, is_training=False, batch_size=None, **kwargs): + name = name.lower() + if name.startswith('tfds'): + ds = IterableImageDataset( + root, parser=name, split=split, is_training=is_training, batch_size=batch_size, **kwargs) + else: + # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future + kwargs.pop('repeats', 0) # FIXME currently only Iterable dataset support the repeat multiplier + if search_split and os.path.isdir(root): + root = _search_split(root, split) + ds = ImageDataset(root, parser=name, **kwargs) + return ds diff --git a/testbed/huggingface__pytorch-image-models/timm/data/distributed_sampler.py b/testbed/huggingface__pytorch-image-models/timm/data/distributed_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..fa403d0acc83379ff39f3055473e587421351b01 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/distributed_sampler.py @@ -0,0 +1,128 @@ +import math +import torch +from torch.utils.data import Sampler +import torch.distributed as dist + + +class OrderedDistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + indices = list(range(len(self.dataset))) + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + +class RepeatAugSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset for distributed, + with repeated augmentation. + It ensures that different each augmented version of a sample will be visible to a + different process (GPU). Heavily based on torch.utils.data.DistributedSampler + + This sampler was taken from https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py + Used in + Copyright (c) 2015-present, Facebook, Inc. + """ + + def __init__( + self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + num_repeats=3, + selected_round=256, + selected_ratio=0, + ): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.shuffle = shuffle + self.num_repeats = num_repeats + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + # Determine the number of samples to select per epoch for each rank. + # num_selected logic defaults to be the same as original RASampler impl, but this one can be tweaked + # via selected_ratio and selected_round args. + selected_ratio = selected_ratio or num_replicas # ratio to reduce selected samples by, num_replicas if 0 + if selected_round: + self.num_selected_samples = int(math.floor( + len(self.dataset) // selected_round * selected_round / selected_ratio)) + else: + self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio)) + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = list(range(len(self.dataset))) + + # produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....] + indices = [x for x in indices for _ in range(self.num_repeats)] + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + indices += indices[:padding_size] + assert len(indices) == self.total_size + + # subsample per rank + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + # return up to num selected samples + return iter(indices[:self.num_selected_samples]) + + def __len__(self): + return self.num_selected_samples + + def set_epoch(self, epoch): + self.epoch = epoch \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/data/loader.py b/testbed/huggingface__pytorch-image-models/timm/data/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..99cf132f07d38f47c80d843ed6fe5b9b3e1d7909 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/loader.py @@ -0,0 +1,268 @@ +""" Loader Factory, Fast Collate, CUDA Prefetcher + +Prefetcher and Fast Collate inspired by NVIDIA APEX example at +https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch.utils.data +import numpy as np + +from .transforms_factory import create_transform +from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler +from .random_erasing import RandomErasing +from .mixup import FastCollateMixup + + +def fast_collate(batch): + """ A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)""" + assert isinstance(batch[0], tuple) + batch_size = len(batch) + if isinstance(batch[0][0], tuple): + # This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position + # such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position + inner_tuple_size = len(batch[0][0]) + flattened_batch_size = batch_size * inner_tuple_size + targets = torch.zeros(flattened_batch_size, dtype=torch.int64) + tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length + for j in range(inner_tuple_size): + targets[i + j * batch_size] = batch[i][1] + tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j]) + return tensor, targets + elif isinstance(batch[0][0], np.ndarray): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i] += torch.from_numpy(batch[i][0]) + return tensor, targets + elif isinstance(batch[0][0], torch.Tensor): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i].copy_(batch[i][0]) + return tensor, targets + else: + assert False + + +class PrefetchLoader: + + def __init__(self, + loader, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + fp16=False, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0): + self.loader = loader + self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1) + self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1) + self.fp16 = fp16 + if fp16: + self.mean = self.mean.half() + self.std = self.std.half() + if re_prob > 0.: + self.random_erasing = RandomErasing( + probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits) + else: + self.random_erasing = None + + def __iter__(self): + stream = torch.cuda.Stream() + first = True + + for next_input, next_target in self.loader: + with torch.cuda.stream(stream): + next_input = next_input.cuda(non_blocking=True) + next_target = next_target.cuda(non_blocking=True) + if self.fp16: + next_input = next_input.half().sub_(self.mean).div_(self.std) + else: + next_input = next_input.float().sub_(self.mean).div_(self.std) + if self.random_erasing is not None: + next_input = self.random_erasing(next_input) + + if not first: + yield input, target + else: + first = False + + torch.cuda.current_stream().wait_stream(stream) + input = next_input + target = next_target + + yield input, target + + def __len__(self): + return len(self.loader) + + @property + def sampler(self): + return self.loader.sampler + + @property + def dataset(self): + return self.loader.dataset + + @property + def mixup_enabled(self): + if isinstance(self.loader.collate_fn, FastCollateMixup): + return self.loader.collate_fn.mixup_enabled + else: + return False + + @mixup_enabled.setter + def mixup_enabled(self, x): + if isinstance(self.loader.collate_fn, FastCollateMixup): + self.loader.collate_fn.mixup_enabled = x + + +def create_loader( + dataset, + input_size, + batch_size, + is_training=False, + use_prefetcher=True, + no_aug=False, + re_prob=0., + re_mode='const', + re_count=1, + re_split=False, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + num_aug_repeats=0, + num_aug_splits=0, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=1, + distributed=False, + crop_pct=None, + collate_fn=None, + pin_memory=False, + fp16=False, + tf_preprocessing=False, + use_multi_epochs_loader=False, + persistent_workers=True, +): + re_num_splits = 0 + if re_split: + # apply RE to second half of batch if no aug split otherwise line up with aug split + re_num_splits = num_aug_splits or 2 + dataset.transform = create_transform( + input_size, + is_training=is_training, + use_prefetcher=use_prefetcher, + no_aug=no_aug, + scale=scale, + ratio=ratio, + hflip=hflip, + vflip=vflip, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + mean=mean, + std=std, + crop_pct=crop_pct, + tf_preprocessing=tf_preprocessing, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=num_aug_splits > 0, + ) + + sampler = None + if distributed and not isinstance(dataset, torch.utils.data.IterableDataset): + if is_training: + if num_aug_repeats: + sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats) + else: + sampler = torch.utils.data.distributed.DistributedSampler(dataset) + else: + # This will add extra duplicate entries to result in equal num + # of samples per-process, will slightly alter validation results + sampler = OrderedDistributedSampler(dataset) + else: + assert num_aug_repeats == 0, "RepeatAugment not currently supported in non-distributed or IterableDataset use" + + if collate_fn is None: + collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate + + loader_class = torch.utils.data.DataLoader + + if use_multi_epochs_loader: + loader_class = MultiEpochsDataLoader + + loader_args = dict( + batch_size=batch_size, + shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training, + num_workers=num_workers, + sampler=sampler, + collate_fn=collate_fn, + pin_memory=pin_memory, + drop_last=is_training, + persistent_workers=persistent_workers) + try: + loader = loader_class(dataset, **loader_args) + except TypeError as e: + loader_args.pop('persistent_workers') # only in Pytorch 1.7+ + loader = loader_class(dataset, **loader_args) + if use_prefetcher: + prefetch_re_prob = re_prob if is_training and not no_aug else 0. + loader = PrefetchLoader( + loader, + mean=mean, + std=std, + fp16=fp16, + re_prob=prefetch_re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits + ) + + return loader + + +class MultiEpochsDataLoader(torch.utils.data.DataLoader): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._DataLoader__initialized = False + self.batch_sampler = _RepeatSampler(self.batch_sampler) + self._DataLoader__initialized = True + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever. + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) diff --git a/testbed/huggingface__pytorch-image-models/timm/data/mixup.py b/testbed/huggingface__pytorch-image-models/timm/data/mixup.py new file mode 100644 index 0000000000000000000000000000000000000000..38477548a070a1a338ed18ddc74cdaf5050f84be --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/mixup.py @@ -0,0 +1,316 @@ +""" Mixup and Cutmix + +Papers: +mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412) + +CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899) + +Code Reference: +CutMix: https://github.com/clovaai/CutMix-PyTorch + +Hacked together by / Copyright 2020 Ross Wightman +""" +import numpy as np +import torch + + +def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'): + x = x.long().view(-1, 1) + return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value) + + +def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'): + off_value = smoothing / num_classes + on_value = 1. - smoothing + off_value + y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device) + y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device) + return y1 * lam + y2 * (1. - lam) + + +def rand_bbox(img_shape, lam, margin=0., count=None): + """ Standard CutMix bounding-box + Generates a random square bbox based on lambda value. This impl includes + support for enforcing a border margin as percent of bbox dimensions. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image) + count (int): Number of bbox to generate + """ + ratio = np.sqrt(1 - lam) + img_h, img_w = img_shape[-2:] + cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) + margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) + cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) + cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) + yl = np.clip(cy - cut_h // 2, 0, img_h) + yh = np.clip(cy + cut_h // 2, 0, img_h) + xl = np.clip(cx - cut_w // 2, 0, img_w) + xh = np.clip(cx + cut_w // 2, 0, img_w) + return yl, yh, xl, xh + + +def rand_bbox_minmax(img_shape, minmax, count=None): + """ Min-Max CutMix bounding-box + Inspired by Darknet cutmix impl, generates a random rectangular bbox + based on min/max percent values applied to each dimension of the input image. + + Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max. + + Args: + img_shape (tuple): Image shape as tuple + minmax (tuple or list): Min and max bbox ratios (as percent of image size) + count (int): Number of bbox to generate + """ + assert len(minmax) == 2 + img_h, img_w = img_shape[-2:] + cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count) + cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count) + yl = np.random.randint(0, img_h - cut_h, size=count) + xl = np.random.randint(0, img_w - cut_w, size=count) + yu = yl + cut_h + xu = xl + cut_w + return yl, yu, xl, xu + + +def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None): + """ Generate bbox and apply lambda correction. + """ + if ratio_minmax is not None: + yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count) + else: + yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count) + if correct_lam or ratio_minmax is not None: + bbox_area = (yu - yl) * (xu - xl) + lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1]) + return (yl, yu, xl, xu), lam + + +class Mixup: + """ Mixup/Cutmix that applies different params to each element or whole batch + + Args: + mixup_alpha (float): mixup alpha value, mixup is active if > 0. + cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0. + cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None. + prob (float): probability of applying mixup or cutmix per batch or element + switch_prob (float): probability of switching to cutmix instead of mixup when both are active + mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element) + correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders + label_smoothing (float): apply label smoothing to the mixed target tensor + num_classes (int): number of classes for target + """ + def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5, + mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000): + self.mixup_alpha = mixup_alpha + self.cutmix_alpha = cutmix_alpha + self.cutmix_minmax = cutmix_minmax + if self.cutmix_minmax is not None: + assert len(self.cutmix_minmax) == 2 + # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe + self.cutmix_alpha = 1.0 + self.mix_prob = prob + self.switch_prob = switch_prob + self.label_smoothing = label_smoothing + self.num_classes = num_classes + self.mode = mode + self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix + self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop) + + def _params_per_elem(self, batch_size): + lam = np.ones(batch_size, dtype=np.float32) + use_cutmix = np.zeros(batch_size, dtype=np.bool) + if self.mixup_enabled: + if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: + use_cutmix = np.random.rand(batch_size) < self.switch_prob + lam_mix = np.where( + use_cutmix, + np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size), + np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)) + elif self.mixup_alpha > 0.: + lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size) + elif self.cutmix_alpha > 0.: + use_cutmix = np.ones(batch_size, dtype=np.bool) + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size) + else: + assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." + lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam) + return lam, use_cutmix + + def _params_per_batch(self): + lam = 1. + use_cutmix = False + if self.mixup_enabled and np.random.rand() < self.mix_prob: + if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: + use_cutmix = np.random.rand() < self.switch_prob + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \ + np.random.beta(self.mixup_alpha, self.mixup_alpha) + elif self.mixup_alpha > 0.: + lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha) + elif self.cutmix_alpha > 0.: + use_cutmix = True + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) + else: + assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." + lam = float(lam_mix) + return lam, use_cutmix + + def _mix_elem(self, x): + batch_size = len(x) + lam_batch, use_cutmix = self._params_per_elem(batch_size) + x_orig = x.clone() # need to keep an unmodified original for mixing source + for i in range(batch_size): + j = batch_size - i - 1 + lam = lam_batch[i] + if lam != 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + x[i] = x[i] * lam + x_orig[j] * (1 - lam) + return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) + + def _mix_pair(self, x): + batch_size = len(x) + lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) + x_orig = x.clone() # need to keep an unmodified original for mixing source + for i in range(batch_size // 2): + j = batch_size - i - 1 + lam = lam_batch[i] + if lam != 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] + x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + x[i] = x[i] * lam + x_orig[j] * (1 - lam) + x[j] = x[j] * lam + x_orig[i] * (1 - lam) + lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) + return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) + + def _mix_batch(self, x): + lam, use_cutmix = self._params_per_batch() + if lam == 1.: + return 1. + if use_cutmix: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] + else: + x_flipped = x.flip(0).mul_(1. - lam) + x.mul_(lam).add_(x_flipped) + return lam + + def __call__(self, x, target): + assert len(x) % 2 == 0, 'Batch size should be even when using this' + if self.mode == 'elem': + lam = self._mix_elem(x) + elif self.mode == 'pair': + lam = self._mix_pair(x) + else: + lam = self._mix_batch(x) + target = mixup_target(target, self.num_classes, lam, self.label_smoothing) + return x, target + + +class FastCollateMixup(Mixup): + """ Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch + + A Mixup impl that's performed while collating the batches. + """ + + def _mix_elem_collate(self, output, batch, half=False): + batch_size = len(batch) + num_elem = batch_size // 2 if half else batch_size + assert len(output) == num_elem + lam_batch, use_cutmix = self._params_per_elem(num_elem) + for i in range(num_elem): + j = batch_size - i - 1 + lam = lam_batch[i] + mixed = batch[i][0] + if lam != 1.: + if use_cutmix[i]: + if not half: + mixed = mixed.copy() + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) + np.rint(mixed, out=mixed) + output[i] += torch.from_numpy(mixed.astype(np.uint8)) + if half: + lam_batch = np.concatenate((lam_batch, np.ones(num_elem))) + return torch.tensor(lam_batch).unsqueeze(1) + + def _mix_pair_collate(self, output, batch): + batch_size = len(batch) + lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) + for i in range(batch_size // 2): + j = batch_size - i - 1 + lam = lam_batch[i] + mixed_i = batch[i][0] + mixed_j = batch[j][0] + assert 0 <= lam <= 1.0 + if lam < 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + patch_i = mixed_i[:, yl:yh, xl:xh].copy() + mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh] + mixed_j[:, yl:yh, xl:xh] = patch_i + lam_batch[i] = lam + else: + mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam) + mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam) + mixed_i = mixed_temp + np.rint(mixed_j, out=mixed_j) + np.rint(mixed_i, out=mixed_i) + output[i] += torch.from_numpy(mixed_i.astype(np.uint8)) + output[j] += torch.from_numpy(mixed_j.astype(np.uint8)) + lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) + return torch.tensor(lam_batch).unsqueeze(1) + + def _mix_batch_collate(self, output, batch): + batch_size = len(batch) + lam, use_cutmix = self._params_per_batch() + if use_cutmix: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + for i in range(batch_size): + j = batch_size - i - 1 + mixed = batch[i][0] + if lam != 1.: + if use_cutmix: + mixed = mixed.copy() # don't want to modify the original while iterating + mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] + else: + mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) + np.rint(mixed, out=mixed) + output[i] += torch.from_numpy(mixed.astype(np.uint8)) + return lam + + def __call__(self, batch, _=None): + batch_size = len(batch) + assert batch_size % 2 == 0, 'Batch size should be even when using this' + half = 'half' in self.mode + if half: + batch_size //= 2 + output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + if self.mode == 'elem' or self.mode == 'half': + lam = self._mix_elem_collate(output, batch, half=half) + elif self.mode == 'pair': + lam = self._mix_pair_collate(output, batch) + else: + lam = self._mix_batch_collate(output, batch) + target = torch.tensor([b[1] for b in batch], dtype=torch.int64) + target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu') + target = target[:batch_size] + return output, target + diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/__init__.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eeb44e3714eff75028e15214e0e65bf2afebd86c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/__init__.py @@ -0,0 +1 @@ +from .parser_factory import create_parser diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/class_map.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/class_map.py new file mode 100644 index 0000000000000000000000000000000000000000..9ef4d1fab4cb126c7737e6888420af76abed19bf --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/class_map.py @@ -0,0 +1,16 @@ +import os + + +def load_class_map(filename, root=''): + class_map_path = filename + if not os.path.exists(class_map_path): + class_map_path = os.path.join(root, filename) + assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % filename + class_map_ext = os.path.splitext(filename)[-1].lower() + if class_map_ext == '.txt': + with open(class_map_path) as f: + class_to_idx = {v.strip(): k for k, v in enumerate(f)} + else: + assert False, 'Unsupported class map extension' + return class_to_idx + diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/constants.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..e7ba484e729b7ac976b2cedaa43be1c3b308eeeb --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/constants.py @@ -0,0 +1 @@ +IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..76ab6d18283644702424d0ff2af5832d6d6dd3b7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser.py @@ -0,0 +1,17 @@ +from abc import abstractmethod + + +class Parser: + def __init__(self): + pass + + @abstractmethod + def _filename(self, index, basename=False, absolute=False): + pass + + def filename(self, index, basename=False, absolute=False): + return self._filename(index, basename=basename, absolute=absolute) + + def filenames(self, basename=False, absolute=False): + return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))] + diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_factory.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..419ffe899b476233dba84b6cb8d0851801da27a5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_factory.py @@ -0,0 +1,29 @@ +import os + +from .parser_image_folder import ParserImageFolder +from .parser_image_tar import ParserImageTar +from .parser_image_in_tar import ParserImageInTar + + +def create_parser(name, root, split='train', **kwargs): + name = name.lower() + name = name.split('/', 2) + prefix = '' + if len(name) > 1: + prefix = name[0] + name = name[-1] + + # FIXME improve the selection right now just tfds prefix or fallback path, will need options to + # explicitly select other options shortly + if prefix == 'tfds': + from .parser_tfds import ParserTfds # defer tensorflow import + parser = ParserTfds(root, name, split=split, shuffle=kwargs.pop('shuffle', False), **kwargs) + else: + assert os.path.exists(root) + # default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder + # FIXME support split here, in parser? + if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': + parser = ParserImageInTar(root, **kwargs) + else: + parser = ParserImageFolder(root, **kwargs) + return parser diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_folder.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_folder.py new file mode 100644 index 0000000000000000000000000000000000000000..ed349009a4caed92e290e05637eca20e46cc275b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_folder.py @@ -0,0 +1,69 @@ +""" A dataset parser that reads images from folders + +Folders are scannerd recursively to find image files. Labels are based +on the folder hierarchy, just leaf folders by default. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os + +from timm.utils.misc import natural_key + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS + + +def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True): + labels = [] + filenames = [] + for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True): + rel_path = os.path.relpath(root, folder) if (root != folder) else '' + label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') + for f in files: + base, ext = os.path.splitext(f) + if ext.lower() in types: + filenames.append(os.path.join(root, f)) + labels.append(label) + if class_to_idx is None: + # building class index + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx] + if sort: + images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) + return images_and_targets, class_to_idx + + +class ParserImageFolder(Parser): + + def __init__( + self, + root, + class_map=''): + super().__init__() + + self.root = root + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + self.samples, self.class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx) + if len(self.samples) == 0: + raise RuntimeError( + f'Found 0 images in subfolders of {root}. Supported image extensions are {", ".join(IMG_EXTENSIONS)}') + + def __getitem__(self, index): + path, target = self.samples[index] + return open(path, 'rb'), target + + def __len__(self): + return len(self.samples) + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0] + if basename: + filename = os.path.basename(filename) + elif not absolute: + filename = os.path.relpath(filename, self.root) + return filename diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_in_tar.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_in_tar.py new file mode 100644 index 0000000000000000000000000000000000000000..c6ada962ca96eaa7d770014ba130c6de5b36b6ec --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_in_tar.py @@ -0,0 +1,222 @@ +""" A dataset parser that reads tarfile based datasets + +This parser can read and extract image samples from: +* a single tar of image files +* a folder of multiple tarfiles containing imagefiles +* a tar of tars containing image files + +Labels are based on the combined folder and/or tar name structure. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import tarfile +import pickle +import logging +import numpy as np +from glob import glob +from typing import List, Dict + +from timm.utils.misc import natural_key + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS + + +_logger = logging.getLogger(__name__) +CACHE_FILENAME_SUFFIX = '_tarinfos.pickle' + + +class TarState: + + def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None): + self.tf: tarfile.TarFile = tf + self.ti: tarfile.TarInfo = ti + self.children: Dict[str, TarState] = {} # child states (tars within tars) + + def reset(self): + self.tf = None + + +def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions=IMG_EXTENSIONS): + sample_count = 0 + for i, ti in enumerate(tf): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + name, ext = os.path.splitext(basename) + ext = ext.lower() + if ext == '.tar': + with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf: + child_info = dict( + name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[]) + sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions) + _logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.') + parent_info['children'].append(child_info) + elif ext in extensions: + parent_info['samples'].append(ti) + sample_count += 1 + return sample_count + + +def extract_tarinfos(root, class_name_to_idx=None, cache_tarinfo=None, extensions=IMG_EXTENSIONS, sort=True): + root_is_tar = False + if os.path.isfile(root): + assert os.path.splitext(root)[-1].lower() == '.tar' + tar_filenames = [root] + root, root_name = os.path.split(root) + root_name = os.path.splitext(root_name)[0] + root_is_tar = True + else: + root_name = root.strip(os.path.sep).split(os.path.sep)[-1] + tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True) + num_tars = len(tar_filenames) + tar_bytes = sum([os.path.getsize(f) for f in tar_filenames]) + assert num_tars, f'No .tar files found at specified path ({root}).' + + _logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...') + info = dict(tartrees=[]) + cache_path = '' + if cache_tarinfo is None: + cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB + if cache_tarinfo: + cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX + cache_path = os.path.join(root, cache_filename) + if os.path.exists(cache_path): + _logger.info(f'Reading tar info from cache file {cache_path}.') + with open(cache_path, 'rb') as pf: + info = pickle.load(pf) + assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles" + else: + for i, fn in enumerate(tar_filenames): + path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0] + with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode + parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[]) + num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions) + num_children = len(parent_info["children"]) + _logger.debug( + f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.') + info['tartrees'].append(parent_info) + if cache_path: + _logger.info(f'Writing tar info to cache file {cache_path}.') + with open(cache_path, 'wb') as pf: + pickle.dump(info, pf) + + samples = [] + labels = [] + build_class_map = False + if class_name_to_idx is None: + build_class_map = True + + # Flatten tartree info into lists of samples and targets w/ targets based on label id via + # class map arg or from unique paths. + # NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children + # this covers my current use cases and keeps things a little easier to test for now. + tarfiles = [] + + def _label_from_paths(*path, leaf_only=True): + path = os.path.join(*path).strip(os.path.sep) + return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_') + + def _add_samples(info, fn): + added = 0 + for s in info['samples']: + label = _label_from_paths(info['path'], os.path.dirname(s.path)) + if not build_class_map and label not in class_name_to_idx: + continue + samples.append((s, fn, info['ti'])) + labels.append(label) + added += 1 + return added + + _logger.info(f'Collecting samples and building tar states.') + for parent_info in info['tartrees']: + # if tartree has children, we assume all samples are at the child level + tar_name = None if root_is_tar else parent_info['name'] + tar_state = TarState() + parent_added = 0 + for child_info in parent_info['children']: + child_added = _add_samples(child_info, fn=tar_name) + if child_added: + tar_state.children[child_info['name']] = TarState(ti=child_info['ti']) + parent_added += child_added + parent_added += _add_samples(parent_info, fn=tar_name) + if parent_added: + tarfiles.append((tar_name, tar_state)) + del info + + if build_class_map: + # build class index + sorted_labels = list(sorted(set(labels), key=natural_key)) + class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + + _logger.info(f'Mapping targets and sorting samples.') + samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx] + if sort: + samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path)) + samples, targets = zip(*samples_and_targets) + samples = np.array(samples) + targets = np.array(targets) + _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.') + return samples, targets, class_name_to_idx, tarfiles + + +class ParserImageInTar(Parser): + """ Multi-tarfile dataset parser where there is one .tar file per class + """ + + def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): + super().__init__() + + class_name_to_idx = None + if class_map: + class_name_to_idx = load_class_map(class_map, root) + self.root = root + self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos( + self.root, + class_name_to_idx=class_name_to_idx, + cache_tarinfo=cache_tarinfo, + extensions=IMG_EXTENSIONS) + self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()} + if len(tarfiles) == 1 and tarfiles[0][0] is None: + self.root_is_tar = True + self.tar_state = tarfiles[0][1] + else: + self.root_is_tar = False + self.tar_state = dict(tarfiles) + self.cache_tarfiles = cache_tarfiles + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + sample = self.samples[index] + target = self.targets[index] + sample_ti, parent_fn, child_ti = sample + parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root + + tf = None + cache_state = None + if self.cache_tarfiles: + cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn] + tf = cache_state.tf + if tf is None: + tf = tarfile.open(parent_abs) + if self.cache_tarfiles: + cache_state.tf = tf + if child_ti is not None: + ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None + if ctf is None: + ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) + if self.cache_tarfiles: + cache_state.children[child_ti.name].tf = ctf + tf = ctf + + return tf.extractfile(sample_ti), target + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0].name + if basename: + filename = os.path.basename(filename) + return filename diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_tar.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_tar.py new file mode 100644 index 0000000000000000000000000000000000000000..467537f479873bbc09fa2b576cdbde9d2a956e7b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_image_tar.py @@ -0,0 +1,72 @@ +""" A dataset parser that reads single tarfile based datasets + +This parser can read datasets consisting if a single tarfile containing images. +I am planning to deprecated it in favour of ParerImageInTar. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import tarfile + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS +from timm.utils.misc import natural_key + + +def extract_tarinfo(tarfile, class_to_idx=None, sort=True): + files = [] + labels = [] + for ti in tarfile.getmembers(): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + label = os.path.basename(dirname) + ext = os.path.splitext(basename)[1] + if ext.lower() in IMG_EXTENSIONS: + files.append(ti) + labels.append(label) + if class_to_idx is None: + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx] + if sort: + tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) + return tarinfo_and_targets, class_to_idx + + +class ParserImageTar(Parser): + """ Single tarfile dataset where classes are mapped to folders within tar + NOTE: This class is being deprecated in favour of the more capable ParserImageInTar that can + operate on folders of tars or tars in tars. + """ + def __init__(self, root, class_map=''): + super().__init__() + + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + assert os.path.isfile(root) + self.root = root + + with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later + self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx) + self.imgs = self.samples + self.tarfile = None # lazy init in __getitem__ + + def __getitem__(self, index): + if self.tarfile is None: + self.tarfile = tarfile.open(self.root) + tarinfo, target = self.samples[index] + fileobj = self.tarfile.extractfile(tarinfo) + return fileobj, target + + def __len__(self): + return len(self.samples) + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0].name + if basename: + filename = os.path.basename(filename) + return filename diff --git a/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_tfds.py b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_tfds.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff90b09f373a314ea24d0f84cf62c3dfcc02731 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/parsers/parser_tfds.py @@ -0,0 +1,223 @@ +""" Dataset parser interface that wraps TFDS datasets + +Wraps many (most?) TFDS image-classification datasets +from https://github.com/tensorflow/datasets +https://www.tensorflow.org/datasets/catalog/overview#image_classification + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import io +import math +import torch +import torch.distributed as dist +from PIL import Image + +try: + import tensorflow as tf + tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu) + import tensorflow_datasets as tfds +except ImportError as e: + print(e) + print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.") + exit(1) +from .parser import Parser + + +MAX_TP_SIZE = 8 # maximum TF threadpool size, only doing jpeg decodes and queuing activities +SHUFFLE_SIZE = 20480 # samples to shuffle in DS queue +PREFETCH_SIZE = 2048 # samples to prefetch + + +def even_split_indices(split, n, num_samples): + partitions = [round(i * num_samples / n) for i in range(n + 1)] + return [f"{split}[{partitions[i]}:{partitions[i+1]}]" for i in range(n)] + + +class ParserTfds(Parser): + """ Wrap Tensorflow Datasets for use in PyTorch + + There several things to be aware of: + * To prevent excessive samples being dropped per epoch w/ distributed training or multiplicity of + dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last + https://github.com/pytorch/pytorch/issues/33413 + * With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch + from each worker could be a different size. For training this is worked around by option above, for + validation extra samples are inserted iff distributed mode is enabled so that the batches being reduced + across replicas are of same size. This will slightly alter the results, distributed validation will not be + 100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse + since there are up to N * J extra samples with IterableDatasets. + * The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of + replicas and dataloader workers you can use. For really small datasets that only contain a few shards + you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the + benefit of distributed training or fast dataloading should be much less for small datasets. + * This wrapper is currently configured to return individual, decompressed image samples from the TFDS + dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible + to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream + components. + + """ + def __init__(self, root, name, split='train', shuffle=False, is_training=False, batch_size=None, repeats=0): + super().__init__() + self.root = root + self.split = split + self.shuffle = shuffle + self.is_training = is_training + if self.is_training: + assert batch_size is not None,\ + "Must specify batch_size in training mode for reasonable behaviour w/ TFDS wrapper" + self.batch_size = batch_size + self.repeats = repeats + self.subsplit = None + + self.builder = tfds.builder(name, data_dir=root) + # NOTE: please use tfds command line app to download & prepare datasets, I don't want to call + # download_and_prepare() by default here as it's caused issues generating unwanted paths. + self.num_samples = self.builder.info.splits[split].num_examples + self.ds = None # initialized lazily on each dataloader worker process + + self.worker_info = None + self.dist_rank = 0 + self.dist_num_replicas = 1 + if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1: + self.dist_rank = dist.get_rank() + self.dist_num_replicas = dist.get_world_size() + + def _lazy_init(self): + """ Lazily initialize the dataset. + + This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that + will be using the dataset instance. The __init__ method is called on the main process, + this will be called in a dataloader worker process. + + NOTE: There will be problems if you try to re-use this dataset across different loader/worker + instances once it has been initialized. Do not call any dataset methods that can call _lazy_init + before it is passed to dataloader. + """ + worker_info = torch.utils.data.get_worker_info() + + # setup input context to split dataset across distributed processes + split = self.split + num_workers = 1 + if worker_info is not None: + self.worker_info = worker_info + num_workers = worker_info.num_workers + global_num_workers = self.dist_num_replicas * num_workers + worker_id = worker_info.id + + # FIXME I need to spend more time figuring out the best way to distribute/split data across + # combo of distributed replicas + dataloader worker processes + """ + InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used. + My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True) + between the splits each iteration, but that understanding could be wrong. + Possible split options include: + * InputContext for both distributed & worker processes (current) + * InputContext for distributed and sub-splits for worker processes + * sub-splits for both + """ + # split_size = self.num_samples // num_workers + # start = worker_id * split_size + # if worker_id == num_workers - 1: + # split = split + '[{}:]'.format(start) + # else: + # split = split + '[{}:{}]'.format(start, start + split_size) + if not self.is_training and '[' not in self.split: + # If not training, and split doesn't define a subsplit, manually split the dataset + # for more even samples / worker + self.subsplit = even_split_indices(self.split, global_num_workers, self.num_samples)[ + self.dist_rank * num_workers + worker_id] + + if self.subsplit is None: + input_context = tf.distribute.InputContext( + num_input_pipelines=self.dist_num_replicas * num_workers, + input_pipeline_id=self.dist_rank * num_workers + worker_id, + num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact? + ) + else: + input_context = None + + read_config = tfds.ReadConfig( + shuffle_seed=42, + shuffle_reshuffle_each_iteration=True, + input_context=input_context) + ds = self.builder.as_dataset( + split=self.subsplit or self.split, shuffle_files=self.shuffle, read_config=read_config) + # avoid overloading threading w/ combo fo TF ds threads + PyTorch workers + options = tf.data.Options() + options.experimental_threading.private_threadpool_size = max(1, MAX_TP_SIZE // num_workers) + options.experimental_threading.max_intra_op_parallelism = 1 + ds = ds.with_options(options) + if self.is_training or self.repeats > 1: + # to prevent excessive drop_last batch behaviour w/ IterableDatasets + # see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading + ds = ds.repeat() # allow wrap around and break iteration manually + if self.shuffle: + ds = ds.shuffle(min(self.num_samples, SHUFFLE_SIZE) // self._num_pipelines, seed=0) + ds = ds.prefetch(min(self.num_samples // self._num_pipelines, PREFETCH_SIZE)) + self.ds = tfds.as_numpy(ds) + + def __iter__(self): + if self.ds is None: + self._lazy_init() + # compute a rounded up sample count that is used to: + # 1. make batches even cross workers & replicas in distributed validation. + # This adds extra samples and will slightly alter validation results. + # 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size + # batches are produced (underlying tfds iter wraps around) + target_sample_count = math.ceil(max(1, self.repeats) * self.num_samples / self._num_pipelines) + if self.is_training: + # round up to nearest batch_size per worker-replica + target_sample_count = math.ceil(target_sample_count / self.batch_size) * self.batch_size + sample_count = 0 + for sample in self.ds: + img = Image.fromarray(sample['image'], mode='RGB') + yield img, sample['label'] + sample_count += 1 + if self.is_training and sample_count >= target_sample_count: + # Need to break out of loop when repeat() is enabled for training w/ oversampling + # this results in extra samples per epoch but seems more desirable than dropping + # up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes) + break + if not self.is_training and self.dist_num_replicas and 0 < sample_count < target_sample_count: + # Validation batch padding only done for distributed training where results are reduced across nodes. + # For single process case, it won't matter if workers return different batch sizes. + # FIXME if using input_context or % based subsplits, sample count can vary by more than +/- 1 and this + # approach is not optimal + yield img, sample['label'] # yield prev sample again + sample_count += 1 + + @property + def _num_workers(self): + return 1 if self.worker_info is None else self.worker_info.num_workers + + @property + def _num_pipelines(self): + return self._num_workers * self.dist_num_replicas + + def __len__(self): + # this is just an estimate and does not factor in extra samples added to pad batches based on + # complete worker & replica info (not available until init in dataloader). + return math.ceil(max(1, self.repeats) * self.num_samples / self.dist_num_replicas) + + def _filename(self, index, basename=False, absolute=False): + assert False, "Not supported" # no random access to samples + + def filenames(self, basename=False, absolute=False): + """ Return all filenames in dataset, overrides base""" + if self.ds is None: + self._lazy_init() + names = [] + for sample in self.ds: + if len(names) > self.num_samples: + break # safety for ds.repeat() case + if 'file_name' in sample: + name = sample['file_name'] + elif 'filename' in sample: + name = sample['filename'] + elif 'id' in sample: + name = sample['id'] + else: + assert False, "No supported name field present" + names.append(name) + return names diff --git a/testbed/huggingface__pytorch-image-models/timm/data/random_erasing.py b/testbed/huggingface__pytorch-image-models/timm/data/random_erasing.py new file mode 100644 index 0000000000000000000000000000000000000000..78967d105dd77b56a3ccefb6ff1838a8058c0384 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/random_erasing.py @@ -0,0 +1,97 @@ +""" Random Erasing (Cutout) + +Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0 +Copyright Zhun Zhong & Liang Zheng + +Hacked together by / Copyright 2020 Ross Wightman +""" +import random +import math +import torch + + +def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + + +class RandomErasing: + """ Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None, + mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + mode = mode.lower() + self.rand_color = False + self.per_pixel = False + if mode == 'rand': + self.rand_color = True # per block random normal + elif mode == 'pixel': + self.per_pixel = True # per pixel random normal + else: + assert not mode or mode == 'const' + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = self.min_count if self.min_count == self.max_count else \ + random.randint(self.min_count, self.max_count) + for _ in range(count): + for attempt in range(10): + target_area = random.uniform(self.min_area, self.max_area) * area / count + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top:top + h, left:left + w] = _get_pixels( + self.per_pixel, self.rand_color, (chan, h, w), + dtype=dtype, device=self.device) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input diff --git a/testbed/huggingface__pytorch-image-models/timm/data/real_labels.py b/testbed/huggingface__pytorch-image-models/timm/data/real_labels.py new file mode 100644 index 0000000000000000000000000000000000000000..939c34867e7915ce3e4cc7da04a5bc1653ec4f2c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/real_labels.py @@ -0,0 +1,42 @@ +""" Real labels evaluator for ImageNet +Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159 +Based on Numpy example at https://github.com/google-research/reassessed-imagenet + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import json +import numpy as np + + +class RealLabelsImagenet: + + def __init__(self, filenames, real_json='real.json', topk=(1, 5)): + with open(real_json) as real_labels: + real_labels = json.load(real_labels) + real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)} + self.real_labels = real_labels + self.filenames = filenames + assert len(self.filenames) == len(self.real_labels) + self.topk = topk + self.is_correct = {k: [] for k in topk} + self.sample_idx = 0 + + def add_result(self, output): + maxk = max(self.topk) + _, pred_batch = output.topk(maxk, 1, True, True) + pred_batch = pred_batch.cpu().numpy() + for pred in pred_batch: + filename = self.filenames[self.sample_idx] + filename = os.path.basename(filename) + if self.real_labels[filename]: + for k in self.topk: + self.is_correct[k].append( + any([p in self.real_labels[filename] for p in pred[:k]])) + self.sample_idx += 1 + + def get_accuracy(self, k=None): + if k is None: + return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk} + else: + return float(np.mean(self.is_correct[k])) * 100 diff --git a/testbed/huggingface__pytorch-image-models/timm/data/tf_preprocessing.py b/testbed/huggingface__pytorch-image-models/timm/data/tf_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..44b4a3af7372c6865b1cdddda0a8da0ccc6b93a0 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/tf_preprocessing.py @@ -0,0 +1,232 @@ +""" Tensorflow Preprocessing Adapter + +Allows use of Tensorflow preprocessing pipeline in PyTorch Transform + +Copyright of original Tensorflow code below. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ImageNet preprocessing for MnasNet.""" +import tensorflow as tf +import numpy as np + +IMAGE_SIZE = 224 +CROP_PADDING = 32 + + +def distorted_bounding_box_crop(image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image_bytes: `Tensor` of binary image data. + bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` + where each coordinate is [0, 1) and the coordinates are arranged + as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding + box supplied. + aspect_ratio_range: An optional list of `float`s. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `float`s. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional `str` for name scope. + Returns: + cropped image `Tensor` + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): + shape = tf.image.extract_jpeg_shape(image_bytes) + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + shape, + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + offset_y, offset_x, _ = tf.unstack(bbox_begin) + target_height, target_width, _ = tf.unstack(bbox_size) + crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + + return image + + +def _at_least_x_are_equal(a, b, x): + """At least `x` of `a` and `b` `Tensors` are equal.""" + match = tf.equal(a, b) + match = tf.cast(match, tf.int32) + return tf.greater_equal(tf.reduce_sum(match), x) + + +def _decode_and_random_crop(image_bytes, image_size, resize_method): + """Make a random crop of image_size.""" + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + image = distorted_bounding_box_crop( + image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(3. / 4, 4. / 3.), + area_range=(0.08, 1.0), + max_attempts=10, + scope=None) + original_shape = tf.image.extract_jpeg_shape(image_bytes) + bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) + + image = tf.cond( + bad, + lambda: _decode_and_center_crop(image_bytes, image_size), + lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0]) + + return image + + +def _decode_and_center_crop(image_bytes, image_size, resize_method): + """Crops to center of image with padding then scales image_size.""" + shape = tf.image.extract_jpeg_shape(image_bytes) + image_height = shape[0] + image_width = shape[1] + + padded_center_crop_size = tf.cast( + ((image_size / (image_size + CROP_PADDING)) * + tf.cast(tf.minimum(image_height, image_width), tf.float32)), + tf.int32) + + offset_height = ((image_height - padded_center_crop_size) + 1) // 2 + offset_width = ((image_width - padded_center_crop_size) + 1) // 2 + crop_window = tf.stack([offset_height, offset_width, + padded_center_crop_size, padded_center_crop_size]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + image = tf.image.resize([image], [image_size, image_size], resize_method)[0] + + return image + + +def _flip(image): + """Random horizontal image flip.""" + image = tf.image.random_flip_left_right(image) + return image + + +def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_random_crop(image_bytes, image_size, resize_method) + image = _flip(image) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_center_crop(image_bytes, image_size, resize_method) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_image(image_bytes, + is_training=False, + use_bfloat16=False, + image_size=IMAGE_SIZE, + interpolation='bicubic'): + """Preprocesses the given image. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + is_training: `bool` for whether the preprocessing is for training. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor` with value range of [0, 255]. + """ + if is_training: + return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) + else: + return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) + + +class TfPreprocessTransform: + + def __init__(self, is_training=False, size=224, interpolation='bicubic'): + self.is_training = is_training + self.size = size[0] if isinstance(size, tuple) else size + self.interpolation = interpolation + self._image_bytes = None + self.process_image = self._build_tf_graph() + self.sess = None + + def _build_tf_graph(self): + with tf.device('/cpu:0'): + self._image_bytes = tf.placeholder( + shape=[], + dtype=tf.string, + ) + img = preprocess_image( + self._image_bytes, self.is_training, False, self.size, self.interpolation) + return img + + def __call__(self, image_bytes): + if self.sess is None: + self.sess = tf.Session() + img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) + img = img.round().clip(0, 255).astype(np.uint8) + if img.ndim < 3: + img = np.expand_dims(img, axis=-1) + img = np.rollaxis(img, 2) # HWC to CHW + return img diff --git a/testbed/huggingface__pytorch-image-models/timm/data/transforms.py b/testbed/huggingface__pytorch-image-models/timm/data/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..4220304f664d861cad64760a6cbe05dfafdf4fe6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/transforms.py @@ -0,0 +1,158 @@ +import torch +import torchvision.transforms.functional as F +from PIL import Image +import warnings +import math +import random +import numpy as np + + +class ToNumpy: + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return np_img + + +class ToTensor: + + def __init__(self, dtype=torch.float32): + self.dtype = dtype + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return torch.from_numpy(np_img).to(dtype=self.dtype) + + +_pil_interpolation_to_str = { + Image.NEAREST: 'PIL.Image.NEAREST', + Image.BILINEAR: 'PIL.Image.BILINEAR', + Image.BICUBIC: 'PIL.Image.BICUBIC', + Image.LANCZOS: 'PIL.Image.LANCZOS', + Image.HAMMING: 'PIL.Image.HAMMING', + Image.BOX: 'PIL.Image.BOX', +} + + +def _pil_interp(method): + if method == 'bicubic': + return Image.BICUBIC + elif method == 'lanczos': + return Image.LANCZOS + elif method == 'hamming': + return Image.HAMMING + else: + # default bilinear, do we want to allow nearest? + return Image.BILINEAR + + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), + interpolation='bilinear'): + if isinstance(size, (list, tuple)): + self.size = tuple(size) + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("range should be of kind (min, max)") + + if interpolation == 'random': + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = _pil_interp(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for attempt in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation]) + else: + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string + + diff --git a/testbed/huggingface__pytorch-image-models/timm/data/transforms_factory.py b/testbed/huggingface__pytorch-image-models/timm/data/transforms_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..df6e0de0338554bf410065a61810e69b167738fe --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/data/transforms_factory.py @@ -0,0 +1,236 @@ +""" Transforms Factory +Factory methods for building image transforms for use with TIMM (PyTorch Image Models) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +import torch +from torchvision import transforms + +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT +from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform +from timm.data.transforms import _pil_interp, RandomResizedCropAndInterpolation, ToNumpy, ToTensor +from timm.data.random_erasing import RandomErasing + + +def transforms_noaug_train( + img_size=224, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, +): + if interpolation == 'random': + # random interpolation not supported with no-aug + interpolation = 'bilinear' + tfl = [ + transforms.Resize(img_size, _pil_interp(interpolation)), + transforms.CenterCrop(img_size) + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + return transforms.Compose(tfl) + + +def transforms_imagenet_train( + img_size=224, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + interpolation='random', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + separate=False, +): + """ + If separate==True, the transforms are returned as a tuple of 3 separate transforms + for use in a mixing dataset that passes + * all data through the first (primary) transform, called the 'clean' data + * a portion of the data through the secondary transform + * normalizes and converts the branches above with the third, final transform + """ + scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range + ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range + primary_tfl = [ + RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)] + if hflip > 0.: + primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] + if vflip > 0.: + primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] + + secondary_tfl = [] + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, (tuple, list)): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), + ) + if interpolation and interpolation != 'random': + aa_params['interpolation'] = _pil_interp(interpolation) + if auto_augment.startswith('rand'): + secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] + elif auto_augment.startswith('augmix'): + aa_params['translate_pct'] = 0.3 + secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] + else: + secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] + elif color_jitter is not None: + # color jitter is enabled when not using AA + if isinstance(color_jitter, (list, tuple)): + # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation + # or 4 if also augmenting hue + assert len(color_jitter) in (3, 4) + else: + # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue + color_jitter = (float(color_jitter),) * 3 + secondary_tfl += [transforms.ColorJitter(*color_jitter)] + + final_tfl = [] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + final_tfl += [ToNumpy()] + else: + final_tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + if re_prob > 0.: + final_tfl.append( + RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu')) + + if separate: + return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl) + else: + return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) + + +def transforms_imagenet_eval( + img_size=224, + crop_pct=None, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD): + crop_pct = crop_pct or DEFAULT_CROP_PCT + + if isinstance(img_size, (tuple, list)): + assert len(img_size) == 2 + if img_size[-1] == img_size[-2]: + # fall-back to older behaviour so Resize scales to shortest edge if target is square + scale_size = int(math.floor(img_size[0] / crop_pct)) + else: + scale_size = tuple([int(x / crop_pct) for x in img_size]) + else: + scale_size = int(math.floor(img_size / crop_pct)) + + tfl = [ + transforms.Resize(scale_size, _pil_interp(interpolation)), + transforms.CenterCrop(img_size), + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + + return transforms.Compose(tfl) + + +def create_transform( + input_size, + is_training=False, + use_prefetcher=False, + no_aug=False, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + crop_pct=None, + tf_preprocessing=False, + separate=False): + + if isinstance(input_size, (tuple, list)): + img_size = input_size[-2:] + else: + img_size = input_size + + if tf_preprocessing and use_prefetcher: + assert not separate, "Separate transforms not supported for TF preprocessing" + from timm.data.tf_preprocessing import TfPreprocessTransform + transform = TfPreprocessTransform( + is_training=is_training, size=img_size, interpolation=interpolation) + else: + if is_training and no_aug: + assert not separate, "Cannot perform split augmentation with no_aug" + transform = transforms_noaug_train( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std) + elif is_training: + transform = transforms_imagenet_train( + img_size, + scale=scale, + ratio=ratio, + hflip=hflip, + vflip=vflip, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=separate) + else: + assert not separate, "Separate transforms not supported for validation preprocessing" + transform = transforms_imagenet_eval( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + crop_pct=crop_pct) + + return transform diff --git a/testbed/huggingface__pytorch-image-models/timm/loss/__init__.py b/testbed/huggingface__pytorch-image-models/timm/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a74bcb88c46288b8b155621464f7150a1bb6b2be --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/loss/__init__.py @@ -0,0 +1,4 @@ +from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel +from .binary_cross_entropy import DenseBinaryCrossEntropy +from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from .jsd import JsdCrossEntropy diff --git a/testbed/huggingface__pytorch-image-models/timm/loss/asymmetric_loss.py b/testbed/huggingface__pytorch-image-models/timm/loss/asymmetric_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..96a977882b9fa534990bfb1c8321e4c822c602ca --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/loss/asymmetric_loss.py @@ -0,0 +1,97 @@ +import torch +import torch.nn as nn + + +class AsymmetricLossMultiLabel(nn.Module): + def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False): + super(AsymmetricLossMultiLabel, self).__init__() + + self.gamma_neg = gamma_neg + self.gamma_pos = gamma_pos + self.clip = clip + self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss + self.eps = eps + + def forward(self, x, y): + """" + Parameters + ---------- + x: input logits + y: targets (multi-label binarized vector) + """ + + # Calculating Probabilities + x_sigmoid = torch.sigmoid(x) + xs_pos = x_sigmoid + xs_neg = 1 - x_sigmoid + + # Asymmetric Clipping + if self.clip is not None and self.clip > 0: + xs_neg = (xs_neg + self.clip).clamp(max=1) + + # Basic CE calculation + los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) + los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) + loss = los_pos + los_neg + + # Asymmetric Focusing + if self.gamma_neg > 0 or self.gamma_pos > 0: + if self.disable_torch_grad_focal_loss: + torch._C.set_grad_enabled(False) + pt0 = xs_pos * y + pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p + pt = pt0 + pt1 + one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) + one_sided_w = torch.pow(1 - pt, one_sided_gamma) + if self.disable_torch_grad_focal_loss: + torch._C.set_grad_enabled(True) + loss *= one_sided_w + + return -loss.sum() + + +class AsymmetricLossSingleLabel(nn.Module): + def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'): + super(AsymmetricLossSingleLabel, self).__init__() + + self.eps = eps + self.logsoftmax = nn.LogSoftmax(dim=-1) + self.targets_classes = [] # prevent gpu repeated memory allocation + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.reduction = reduction + + def forward(self, inputs, target, reduction=None): + """" + Parameters + ---------- + x: input logits + y: targets (1-hot vector) + """ + + num_classes = inputs.size()[-1] + log_preds = self.logsoftmax(inputs) + self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) + + # ASL weights + targets = self.targets_classes + anti_targets = 1 - targets + xs_pos = torch.exp(log_preds) + xs_neg = 1 - xs_pos + xs_pos = xs_pos * targets + xs_neg = xs_neg * anti_targets + asymmetric_w = torch.pow(1 - xs_pos - xs_neg, + self.gamma_pos * targets + self.gamma_neg * anti_targets) + log_preds = log_preds * asymmetric_w + + if self.eps > 0: # label smoothing + self.targets_classes.mul_(1 - self.eps).add_(self.eps / num_classes) + + # loss calculation + loss = - self.targets_classes.mul(log_preds) + + loss = loss.sum(dim=-1) + if self.reduction == 'mean': + loss = loss.mean() + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/loss/binary_cross_entropy.py b/testbed/huggingface__pytorch-image-models/timm/loss/binary_cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..6da04dba0f860e7dbf3d4efe82c65709dec3269e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/loss/binary_cross_entropy.py @@ -0,0 +1,23 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class DenseBinaryCrossEntropy(nn.Module): + """ BCE using one-hot from dense targets w/ label smoothing + NOTE for experiments comparing CE to BCE /w label smoothing, may remove + """ + def __init__(self, smoothing=0.1): + super(DenseBinaryCrossEntropy, self).__init__() + assert 0. <= smoothing < 1.0 + self.smoothing = smoothing + self.bce = nn.BCEWithLogitsLoss() + + def forward(self, x, target): + num_classes = x.shape[-1] + off_value = self.smoothing / num_classes + on_value = 1. - self.smoothing + off_value + target = target.long().view(-1, 1) + target = torch.full( + (target.size()[0], num_classes), off_value, device=x.device, dtype=x.dtype).scatter_(1, target, on_value) + return self.bce(x, target) diff --git a/testbed/huggingface__pytorch-image-models/timm/loss/cross_entropy.py b/testbed/huggingface__pytorch-image-models/timm/loss/cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..60bef646cc6c31fd734f234346dbc4255def6622 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/loss/cross_entropy.py @@ -0,0 +1,36 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LabelSmoothingCrossEntropy(nn.Module): + """ + NLL loss with label smoothing. + """ + def __init__(self, smoothing=0.1): + """ + Constructor for the LabelSmoothing module. + :param smoothing: label smoothing factor + """ + super(LabelSmoothingCrossEntropy, self).__init__() + assert smoothing < 1.0 + self.smoothing = smoothing + self.confidence = 1. - smoothing + + def forward(self, x, target): + logprobs = F.log_softmax(x, dim=-1) + nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) + nll_loss = nll_loss.squeeze(1) + smooth_loss = -logprobs.mean(dim=-1) + loss = self.confidence * nll_loss + self.smoothing * smooth_loss + return loss.mean() + + +class SoftTargetCrossEntropy(nn.Module): + + def __init__(self): + super(SoftTargetCrossEntropy, self).__init__() + + def forward(self, x, target): + loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) + return loss.mean() diff --git a/testbed/huggingface__pytorch-image-models/timm/loss/jsd.py b/testbed/huggingface__pytorch-image-models/timm/loss/jsd.py new file mode 100644 index 0000000000000000000000000000000000000000..dd64e156c23d27aa03817a587ae367e8175fc126 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/loss/jsd.py @@ -0,0 +1,39 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .cross_entropy import LabelSmoothingCrossEntropy + + +class JsdCrossEntropy(nn.Module): + """ Jensen-Shannon Divergence + Cross-Entropy Loss + + Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + + Hacked together by / Copyright 2020 Ross Wightman + """ + def __init__(self, num_splits=3, alpha=12, smoothing=0.1): + super().__init__() + self.num_splits = num_splits + self.alpha = alpha + if smoothing is not None and smoothing > 0: + self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) + else: + self.cross_entropy_loss = torch.nn.CrossEntropyLoss() + + def __call__(self, output, target): + split_size = output.shape[0] // self.num_splits + assert split_size * self.num_splits == output.shape[0] + logits_split = torch.split(output, split_size) + + # Cross-entropy is only computed on clean images + loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) + probs = [F.softmax(logits, dim=1) for logits in logits_split] + + # Clamp mixture distribution to avoid exploding KL divergence + logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() + loss += self.alpha * sum([F.kl_div( + logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/models/__init__.py b/testbed/huggingface__pytorch-image-models/timm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..56a753b175cf91c78176bfba8e0cc30f020f8f85 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/__init__.py @@ -0,0 +1,57 @@ +from .beit import * +from .byoanet import * +from .byobnet import * +from .cait import * +from .coat import * +from .convit import * +from .crossvit import * +from .cspnet import * +from .densenet import * +from .dla import * +from .dpn import * +from .efficientnet import * +from .ghostnet import * +from .gluon_resnet import * +from .gluon_xception import * +from .hardcorenas import * +from .hrnet import * +from .inception_resnet_v2 import * +from .inception_v3 import * +from .inception_v4 import * +from .levit import * +from .mlp_mixer import * +from .mobilenetv3 import * +from .nasnet import * +from .nest import * +from .nfnet import * +from .pit import * +from .pnasnet import * +from .regnet import * +from .res2net import * +from .resnest import * +from .resnet import * +from .resnetv2 import * +from .rexnet import * +from .selecsls import * +from .senet import * +from .sknet import * +from .swin_transformer import * +from .tnt import * +from .tresnet import * +from .twins import * +from .vgg import * +from .visformer import * +from .vision_transformer import * +from .vision_transformer_hybrid import * +from .vovnet import * +from .xception import * +from .xception_aligned import * +from .xcit import * + +from .factory import create_model, split_model_name, safe_model_name +from .helpers import load_checkpoint, resume_checkpoint, model_parameters +from .layers import TestTimePoolHead, apply_test_time_pool +from .layers import convert_splitbn_model +from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable, is_no_jit, set_no_jit +from .registry import register_model, model_entrypoint, list_models, is_model, list_modules, is_model_in_modules,\ + has_model_default_key, is_model_default_key, get_model_default_value, is_model_pretrained diff --git a/testbed/huggingface__pytorch-image-models/timm/models/beit.py b/testbed/huggingface__pytorch-image-models/timm/models/beit.py new file mode 100644 index 0000000000000000000000000000000000000000..e8d1dd2c7e1b67aad718beff02a14b293e5215d9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/beit.py @@ -0,0 +1,420 @@ +""" BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) + +Model from official source: https://github.com/microsoft/unilm/tree/master/beit + +At this point only the 1k fine-tuned classification weights and model configs have been added, +see original source above for pre-training models and procedure. + +Modifications by / Copyright 2021 Ross Wightman, original copyrights below +""" +# -------------------------------------------------------- +# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) +# Github source: https://github.com/microsoft/unilm/tree/master/beit +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# By Hangbo Bao +# Based on timm and DeiT code bases +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit/ +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_ +from .registry import register_model +from .vision_transformer import checkpoint_filter_fn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'beit_base_patch16_224': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth'), + 'beit_base_patch16_384': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'beit_base_patch16_224_in22k': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_base_patch16_224_pt22k_ft22k.pth', + num_classes=21841, + ), + 'beit_large_patch16_224': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth'), + 'beit_large_patch16_384': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'beit_large_patch16_512': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth', + input_size=(3, 512, 512), crop_pct=1.0, + ), + 'beit_large_patch16_224_in22k': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_large_patch16_224_pt22k_ft22k.pth', + num_classes=21841, + ), +} + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, attn_drop=0., + proj_drop=0., window_size=None, attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, rel_pos_bias: Optional[torch.Tensor] = None): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + if torch.jit.is_scripting(): + # FIXME requires_grad breaks w/ torchscript + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias), self.v_bias)) + else: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if rel_pos_bias is not None: + attn = attn + rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + window_size=None, attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + window_size=window_size, attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, rel_pos_bias: Optional[torch.Tensor] = None): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + + # trunc_normal_(self.relative_position_bias_table, std=.02) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class Beit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, + use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, + use_mean_pooling=True, init_scale=0.001): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_abs_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.grid_size, num_heads=num_heads) + else: + self.rel_pos_bias = None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.use_rel_pos_bias = use_rel_pos_bias + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values, window_size=self.patch_embed.grid_size if use_rel_pos_bias else None) + for i in range(depth)]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.mask_token, std=.02) + self.fix_init_weight() + if isinstance(self.head, nn.Linear): + trunc_normal_(self.head.weight, std=.02) + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + x = blk(x, rel_pos_bias=rel_pos_bias) + + x = self.norm(x) + if self.fc_norm is not None: + t = x[:, 1:, :] + return self.fc_norm(t.mean(1)) + else: + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_beit(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Beit models.') + + model = build_model_with_cfg( + Beit, variant, pretrained, + default_cfg=default_cfg, + # FIXME an updated filter fn needed to interpolate rel pos emb if fine tuning to diff model sizes + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def beit_base_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_base_patch16_384(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_base_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_384(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_512(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/byoanet.py b/testbed/huggingface__pytorch-image-models/timm/models/byoanet.py new file mode 100644 index 0000000000000000000000000000000000000000..6558de35e7389d9a5d2b7d425cc1a47474a46487 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/byoanet.py @@ -0,0 +1,280 @@ +""" Bring-Your-Own-Attention Network + +A flexible network w/ dataclass based config for stacking NN blocks including +self-attention (or similar) layers. + +Currently used to implement experimential variants of: + * Bottleneck Transformers + * Lambda ResNets + * HaloNets + +Consider all of the models definitions here as experimental WIP and likely to change. + +Hacked together by / copyright Ross Wightman, 2021. +""" +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks +from .helpers import build_model_with_cfg +from .registry import register_model + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + 'fixed_input_size': False, 'min_input_size': (3, 224, 224), + **kwargs + } + + +default_cfgs = { + # GPU-Efficient (ResNet) weights + 'botnet26t_256': _cfg( + url='', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'botnet50ts_256': _cfg( + url='', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'eca_botnext26ts_256': _cfg( + url='', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + + 'halonet_h1': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'halonet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_256-9b4bf0b3.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'sehalonet33ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'halonet50ts': _cfg( + url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'eca_halonext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_256-1e55880b.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + + 'lambda_resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_256-b040fce6.pth', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), +} + + +model_cfgs = dict( + + botnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + botnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + eca_botnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + act_layer='silu', + attn_layer='eca', + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + + halonet_h1=ByoModelCfg( + blocks=( + ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0), + ), + stem_chs=64, + stem_type='7x7', + stem_pool='maxpool', + + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3), + ), + halonet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) + ), + sehalonet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + num_features=1280, + attn_layer='se', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + halonet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + eca_halonext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) + ), + + lambda_resnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), +) + + +def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def botnet26t_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone. + NOTE: this isn't performing well, may remove + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def botnet50ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet50-T backbone, silu act. + NOTE: this isn't performing well, may remove + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_botnext26ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone, silu act. + NOTE: this isn't performing well, may remove + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet_h1(pretrained=False, **kwargs): + """ HaloNet-H1. Halo attention in all stages as per the paper. + NOTE: This runs very slowly! + """ + return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs) + + +@register_model +def halonet26t(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone. Halo attention in final two stages + """ + return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs) + + +@register_model +def sehalonet33ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, 1-2 Halo in stage 2,3,4. + """ + return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet50ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet50-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_halonext26ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26t(pretrained=False, **kwargs): + """ Lambda-ResNet-26T. Lambda layers in last two stages. + """ + return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/byobnet.py b/testbed/huggingface__pytorch-image-models/timm/models/byobnet.py new file mode 100644 index 0000000000000000000000000000000000000000..59105c4444fc572fe5c9d4e255d2063849c33dbd --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/byobnet.py @@ -0,0 +1,1354 @@ +""" Bring-Your-Own-Blocks Network + +A flexible network w/ dataclass based config for stacking those NN blocks. + +This model is currently used to implement the following networks: + +GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)). +Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 +Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0 + +RepVGG - repvgg_* +Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 +Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT + +In all cases the models have been modified to fit within the design of ByobNet. I've remapped +the original weights and verified accuracies. + +For GPU Efficient nets, I used the original names for the blocks since they were for the most part +the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some +changes introduced in RegNet were also present in the stem and bottleneck blocks for this model. + +A significant number of different network archs can be implemented here, including variants of the +above nets that include attention. + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field, replace +from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply +from .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \ + create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple +from .registry import register_model + +__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # GPU-Efficient (ResNet) weights + 'gernet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'), + 'gernet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'), + 'gernet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + + # RepVGG weights + 'repvgg_a2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b1g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b2g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b3g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + + # experimental configs + 'resnet51q': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', + first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), + test_input_size=(3, 288, 288), crop_pct=1.0), + 'resnet61q': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), + test_input_size=(3, 288, 288), crop_pct=1.0, interpolation='bicubic'), + + 'resnext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + 'gcresnext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + 'seresnext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + 'eca_resnext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + 'bat_resnext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic', + min_input_size=(3, 256, 256)), + + 'resnet32ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + 'resnet33ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + 'gcresnet33ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + 'seresnet33ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + 'eca_resnet33ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + + 'gcresnet50t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), + + 'gcresnext50ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth', + first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'), +} + + +@dataclass +class ByoBlockCfg: + type: Union[str, nn.Module] + d: int # block depth (number of block repeats in stage) + c: int # number of output channels for each block in stage + s: int = 2 # stride of stage (first block) + gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 + br: float = 1. # bottleneck-ratio of blocks in stage + + # NOTE: these config items override the model cfgs that are applied to all blocks by default + attn_layer: Optional[str] = None + attn_kwargs: Optional[Dict[str, Any]] = None + self_attn_layer: Optional[str] = None + self_attn_kwargs: Optional[Dict[str, Any]] = None + block_kwargs: Optional[Dict[str, Any]] = None + + +@dataclass +class ByoModelCfg: + blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] + downsample: str = 'conv1x1' + stem_type: str = '3x3' + stem_pool: Optional[str] = 'maxpool' + stem_chs: int = 32 + width_factor: float = 1.0 + num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0 + zero_init_last: bool = True # zero init last weight (usually bn) in residual path + fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation + + act_layer: str = 'relu' + norm_layer: str = 'batchnorm' + + # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there + attn_layer: Optional[str] = None + attn_kwargs: dict = field(default_factory=lambda: dict()) + self_attn_layer: Optional[str] = None + self_attn_kwargs: dict = field(default_factory=lambda: dict()) + block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict()) + + +def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0): + c = (64, 128, 256, 512) + group_size = 0 + if groups > 0: + group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 + bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)]) + return bcfg + + +def interleave_blocks( + types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs +) -> Tuple[ByoBlockCfg]: + """ interleave 2 block types in stack + """ + assert len(types) == 2 + if isinstance(every, int): + every = list(range(0 if first else every, d, every + 1)) + if not every: + every = [d - 1] + set(every) + blocks = [] + for i in range(d): + block_type = types[1] if i in every else types[0] + blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] + return tuple(blocks) + + +model_cfgs = dict( + gernet_l=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_m=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_s=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.), + ), + stem_chs=13, + stem_pool=None, + num_features=1920, + ), + + repvgg_a2=ByoModelCfg( + blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b0=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + + # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks + # DW convs in last block, 2048 pre-FC, silu act + resnet51q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad2', + stem_pool=None, + num_features=2048, + act_layer='silu', + ), + + # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks + # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act + resnet61q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad', + stem_pool=None, + num_features=2048, + act_layer='silu', + block_kwargs=dict(extra_conv=True), + ), + + # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act, + # and a tiered stem w/ maxpool + resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + ), + gcresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='gca', + ), + seresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='se', + ), + eca_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + ), + bat_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='bat', + attn_kwargs=dict(block_size=8) + ), + + # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool + resnet32ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=0, + act_layer='silu', + ), + + # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool + resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + ), + + # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat + # and a tiered stem w/ no maxpool + gcresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='gca', + ), + seresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='se', + ), + eca_resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='eca', + ), + + gcresnet50t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + attn_layer='gca', + ), + + gcresnext50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + # stem_pool=None, + act_layer='silu', + attn_layer='gca', + ), +) + + +@register_model +def gernet_l(pretrained=False, **kwargs): + """ GEResNet-Large (GENet-Large from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_m(pretrained=False, **kwargs): + """ GEResNet-Medium (GENet-Normal from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_s(pretrained=False, **kwargs): + """ EResNet-Small (GENet-Small from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_a2(pretrained=False, **kwargs): + """ RepVGG-A2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b0(pretrained=False, **kwargs): + """ RepVGG-B0 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1(pretrained=False, **kwargs): + """ RepVGG-B1 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1g4(pretrained=False, **kwargs): + """ RepVGG-B1g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2(pretrained=False, **kwargs): + """ RepVGG-B2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2g4(pretrained=False, **kwargs): + """ RepVGG-B2g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3(pretrained=False, **kwargs): + """ RepVGG-B3 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3g4(pretrained=False, **kwargs): + """ RepVGG-B3g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) + + +@register_model +def resnet51q(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) + + +@register_model +def resnet61q(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) + + +@register_model +def resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def bat_resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet32ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet50t(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext50ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) + + +def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: + if not isinstance(stage_blocks_cfg, Sequence): + stage_blocks_cfg = (stage_blocks_cfg,) + block_cfgs = [] + for i, cfg in enumerate(stage_blocks_cfg): + block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] + return block_cfgs + + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +@dataclass +class LayerFn: + conv_norm_act: Callable = ConvBnAct + norm_act: Callable = BatchNormAct2d + act: Callable = nn.ReLU + attn: Optional[Callable] = None + self_attn: Optional[Callable] = None + + +class DownsampleAvg(nn.Module): + def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None): + """ AvgPool Downsampling as in 'D' ResNet variants.""" + super(DownsampleAvg, self).__init__() + layers = layers or LayerFn() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) + + def forward(self, x): + return self.conv(self.pool(x)) + + +def create_downsample(downsample_type, layers: LayerFn, **kwargs): + if downsample_type == 'avg': + return DownsampleAvg(**kwargs) + else: + return layers.conv_norm_act(kwargs.pop('in_chs'), kwargs.pop('out_chs'), kernel_size=1, **kwargs) + + +class BasicBlock(nn.Module): + """ ResNet Basic Block - kxk + kxk + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0, + downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None, + drop_path_rate=0.): + super(BasicBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + self.shortcut = create_downsample( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0], + apply_act=False, layers=layers) + else: + self.shortcut = nn.Identity() + + self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = self.shortcut(x) + + # residual path + x = self.conv1_kxk(x) + x = self.conv2_kxk(x) + x = self.attn(x) + x = self.drop_path(x) + + x = self.act(x + shortcut) + return x + + +class BottleneckBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - kxk - 1x1 + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None, + downsample='avg', attn_last=False, linear_out=False, extra_conv=False, layers: LayerFn = None, + drop_block=None, drop_path_rate=0.): + super(BottleneckBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + self.shortcut = create_downsample( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0], + apply_act=False, layers=layers) + else: + self.shortcut = nn.Identity() + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + if extra_conv: + self.conv2b_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block) + else: + self.conv2b_kxk = nn.Identity() + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last: + nn.init.zeros_(self.conv3_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = self.shortcut(x) + + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.conv2b_kxk(x) + x = self.attn(x) + x = self.conv3_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + + x = self.act(x + shortcut) + return x + + +class DarkBlock(nn.Module): + """ DarkNet-like (1x1 + 3x3 w/ stride) block + + The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models. + This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet + uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats). + + If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1) + for more optimal compute. + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None, + drop_path_rate=0.): + super(DarkBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + self.shortcut = create_downsample( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0], + apply_act=False, layers=layers) + else: + self.shortcut = nn.Identity() + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = self.shortcut(x) + + x = self.conv1_1x1(x) + x = self.attn(x) + x = self.conv2_kxk(x) + x = self.attn_last(x) + x = self.drop_path(x) + x = self.act(x + shortcut) + return x + + +class EdgeBlock(nn.Module): + """ EdgeResidual-like (3x3 + 1x1) block + + A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed. + Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is + intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs. + + FIXME is there a more common 3x3 + 1x1 conv block to name this after? + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None, + drop_block=None, drop_path_rate=0.): + super(EdgeBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + self.shortcut = create_downsample( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0], + apply_act=False, layers=layers) + else: + self.shortcut = nn.Identity() + + self.conv1_kxk = layers.conv_norm_act( + in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last: + nn.init.zeros_(self.conv2_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = self.shortcut(x) + + x = self.conv1_kxk(x) + x = self.attn(x) + x = self.conv2_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + x = self.act(x + shortcut) + return x + + +class RepVggBlock(nn.Module): + """ RepVGG Block. + + Adapted from impl at https://github.com/DingXiaoH/RepVGG + + This version does not currently support the deploy optimization. It is currently fixed in 'train' mode. + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(RepVggBlock, self).__init__() + layers = layers or LayerFn() + groups = num_groups(group_size, in_chs) + + use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] + self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None + self.conv_kxk = layers.conv_norm_act( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block, apply_act=False) + self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) + self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() + self.act = layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + # NOTE this init overrides that base model init with specific changes for the block type + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + nn.init.normal_(m.weight, .1, .1) + nn.init.normal_(m.bias, 0, .1) + if hasattr(self.attn, 'reset_parameters'): + self.attn.reset_parameters() + + def forward(self, x): + if self.identity is None: + x = self.conv_1x1(x) + self.conv_kxk(x) + else: + identity = self.identity(x) + x = self.conv_1x1(x) + self.conv_kxk(x) + x = self.drop_path(x) # not in the paper / official impl, experimental + x = x + identity + x = self.attn(x) # no attn in the paper / official impl, experimental + x = self.act(x) + return x + + +class SelfAttnBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1 + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None, + downsample='avg', extra_conv=False, linear_out=False, post_attn_na=True, feat_size=None, + layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(SelfAttnBlock, self).__init__() + assert layers is not None + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + self.shortcut = create_downsample( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0], + apply_act=False, layers=layers) + else: + self.shortcut = nn.Identity() + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + if extra_conv: + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + stride = 1 # striding done via conv if enabled + else: + self.conv2_kxk = nn.Identity() + opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) + # FIXME need to dilate self attn to have dilated network support, moop moop + self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) + self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last: + nn.init.zeros_(self.conv3_1x1.bn.weight) + if hasattr(self.self_attn, 'reset_parameters'): + self.self_attn.reset_parameters() + + def forward(self, x): + shortcut = self.shortcut(x) + + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.self_attn(x) + x = self.post_attn(x) + x = self.conv3_1x1(x) + x = self.drop_path(x) + + x = self.act(x + shortcut) + return x + + +_block_registry = dict( + basic=BasicBlock, + bottle=BottleneckBlock, + dark=DarkBlock, + edge=EdgeBlock, + rep=RepVggBlock, + self_attn=SelfAttnBlock, +) + + +def register_block(block_type:str, block_fn: nn.Module): + _block_registry[block_type] = block_fn + + +def create_block(block: Union[str, nn.Module], **kwargs): + if isinstance(block, (nn.Module, partial)): + return block(**kwargs) + assert block in _block_registry, f'Unknown block type ({block}' + return _block_registry[block](**kwargs) + + +class Stem(nn.Sequential): + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool', + num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None): + super().__init__() + assert stride in (2, 4) + layers = layers or LayerFn() + + if isinstance(out_chs, (list, tuple)): + num_rep = len(out_chs) + stem_chs = out_chs + else: + stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] + + self.stride = stride + self.feature_info = [] # track intermediate features + prev_feat = '' + stem_strides = [2] + [1] * (num_rep - 1) + if stride == 4 and not pool: + # set last conv in stack to be strided if stride == 4 and no pooling layer + stem_strides[-1] = 2 + + num_act = num_rep if num_act is None else num_act + # if num_act < num_rep, first convs in stack won't have bn + act + stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act + prev_chs = in_chs + curr_stride = 1 + for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): + layer_fn = layers.conv_norm_act if na else create_conv2d + conv_name = f'conv{i + 1}' + if i > 0 and s > 1: + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) + prev_chs = ch + curr_stride *= s + prev_feat = conv_name + + if pool and 'max' in pool.lower(): + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + self.add_module('pool', nn.MaxPool2d(3, 2, 1)) + curr_stride *= 2 + prev_feat = 'pool' + + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + assert curr_stride == stride + + +def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None): + layers = layers or LayerFn() + assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3') + if 'quad' in stem_type: + # based on NFNet stem, stack of 4 3x3 convs + num_act = 2 if 'quad2' in stem_type else None + stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) + elif 'tiered' in stem_type: + # 3x3 stack of 3 convs as in my ResNet-T + stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) + elif 'deep' in stem_type: + # 3x3 stack of 3 convs as in ResNet-D + stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) + elif 'rep' in stem_type: + stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) + elif '7x7' in stem_type: + # 7x7 stem conv as in ResNet + if pool_type: + stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) + else: + # 3x3 stem conv as in RegNet is the default + if pool_type: + stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) + + if isinstance(stem, Stem): + feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] + else: + feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)] + return stem, feature_info + + +def reduce_feat_size(feat_size, stride=2): + return None if feat_size is None else tuple([s // stride for s in feat_size]) + + +def override_kwargs(block_kwargs, model_kwargs): + """ Override model level attn/self-attn/block kwargs w/ block level + + NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs + for the block if set to anything that isn't None. + + i.e. an empty block_kwargs dict will remove kwargs set at model level for that block + """ + out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs + return out_kwargs or {} # make sure None isn't returned + + +def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ): + layer_fns = block_kwargs['layers'] + + # override attn layer / args with block local config + attn_set = block_cfg.attn_layer is not None + if attn_set or block_cfg.attn_kwargs is not None: + # override attn layer config + if attn_set and not block_cfg.attn_layer: + # empty string for attn_layer type will disable attn for this block + attn_layer = None + else: + attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) + attn_layer = block_cfg.attn_layer or model_cfg.attn_layer + attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None + layer_fns = replace(layer_fns, attn=attn_layer) + + # override self-attn layer / args with block local cfg + self_attn_set = block_cfg.self_attn_layer is not None + if self_attn_set or block_cfg.self_attn_kwargs is not None: + # override attn layer config + if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == '' + # empty string for self_attn_layer type will disable attn for this block + self_attn_layer = None + else: + self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) + self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer + self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \ + if self_attn_layer is not None else None + layer_fns = replace(layer_fns, self_attn=self_attn_layer) + + block_kwargs['layers'] = layer_fns + + # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set + block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) + + +def create_byob_stages( + cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], + feat_size: Optional[int] = None, + layers: Optional[LayerFn] = None, + block_kwargs_fn: Optional[Callable] = update_block_kwargs): + + layers = layers or LayerFn() + feature_info = [] + block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] + depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + dilation = 1 + net_stride = stem_feat['reduction'] + prev_chs = stem_feat['num_chs'] + prev_feat = stem_feat + stages = [] + for stage_idx, stage_block_cfgs in enumerate(block_cfgs): + stride = stage_block_cfgs[0].s + if stride != 1 and prev_feat: + feature_info.append(prev_feat) + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx, block_cfg in enumerate(stage_block_cfgs): + out_chs = make_divisible(block_cfg.c * cfg.width_factor) + group_size = block_cfg.gs + if isinstance(group_size, Callable): + group_size = group_size(out_chs, block_idx) + block_kwargs = dict( # Blocks used in this model must accept these arguments + in_chs=prev_chs, + out_chs=out_chs, + stride=stride if block_idx == 0 else 1, + dilation=(first_dilation, dilation), + group_size=group_size, + bottle_ratio=block_cfg.br, + downsample=cfg.downsample, + drop_path_rate=dpr[stage_idx][block_idx], + layers=layers, + ) + if block_cfg.type in ('self_attn',): + # add feat_size arg for blocks that support/need it + block_kwargs['feat_size'] = feat_size + block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) + blocks += [create_block(block_cfg.type, **block_kwargs)] + first_dilation = dilation + prev_chs = out_chs + if stride > 1 and block_idx == 0: + feat_size = reduce_feat_size(feat_size, stride) + + stages += [nn.Sequential(*blocks)] + prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') + + feature_info.append(prev_feat) + return nn.Sequential(*stages), feature_info + + +def get_layer_fns(cfg: ByoModelCfg): + act = get_act_layer(cfg.act_layer) + norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act) + conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act) + attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None + layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) + return layer_fn + + +class ByobNet(nn.Module): + """ 'Bring-your-own-blocks' Net + + A flexible network backbone that allows building model stem + blocks via + dataclass cfg definition w/ factory functions for module instantiation. + + Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act). + """ + def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + layers = get_layer_fns(cfg) + if cfg.fixed_input_size: + assert img_size is not None, 'img_size argument is required for fixed input size model' + feat_size = to_2tuple(img_size) if img_size is not None else None + + self.feature_info = [] + stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) + self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers) + self.feature_info.extend(stem_feat[:-1]) + feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) + + self.stages, stage_feat = create_byob_stages( + cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size) + self.feature_info.extend(stage_feat[:-1]) + + prev_chs = stage_feat[-1]['num_chs'] + if cfg.num_features: + self.num_features = int(round(cfg.width_factor * cfg.num_features)) + self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1) + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.feature_info += [ + dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')] + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + # init weights + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.final_conv(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module, name='', zero_init_last=False): + if isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights(zero_init_last=zero_init_last) + + +def _create_byobnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/cait.py b/testbed/huggingface__pytorch-image-models/timm/models/cait.py new file mode 100644 index 0000000000000000000000000000000000000000..69b4ba06c889196a19022d5938a73600734ebc2d --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/cait.py @@ -0,0 +1,394 @@ +""" Class-Attention in Image Transformers (CaiT) + +Paper: 'Going deeper with Image Transformers' - https://arxiv.org/abs/2103.17239 + +Original code and weights from https://github.com/facebookresearch/deit, copyright below + +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +from copy import deepcopy + +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_ +from .registry import register_model + + +__all__ = ['Cait', 'ClassAttn', 'LayerScaleBlockClassAttn', 'LayerScaleBlock', 'TalkingHeadAttn'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 384, 384), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + cait_xxs24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS24_224.pth', + input_size=(3, 224, 224), + ), + cait_xxs24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS24_384.pth', + ), + cait_xxs36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS36_224.pth', + input_size=(3, 224, 224), + ), + cait_xxs36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS36_384.pth', + ), + cait_xs24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XS24_384.pth', + ), + cait_s24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S24_224.pth', + input_size=(3, 224, 224), + ), + cait_s24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S24_384.pth', + ), + cait_s36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S36_384.pth', + ), + cait_m36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/M36_384.pth', + ), + cait_m48_448=_cfg( + url='https://dl.fbaipublicfiles.com/deit/M48_448.pth', + input_size=(3, 448, 448), + ), +) + + +class ClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to do CA + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + q = q * self.scale + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C) + x_cls = self.proj(x_cls) + x_cls = self.proj_drop(x_cls) + + return x_cls + + +class LayerScaleBlockClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add CA and LayerScale + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=ClassAttn, + mlp_block=Mlp, init_values=1e-4): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + + def forward(self, x, x_cls): + u = torch.cat((x_cls, x), dim=1) + x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u))) + x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls))) + return x_cls + + +class TalkingHeadAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf) + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + + self.num_heads = num_heads + + head_dim = dim // num_heads + + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + + self.proj = nn.Linear(dim, dim) + + self.proj_l = nn.Linear(num_heads, num_heads) + self.proj_w = nn.Linear(num_heads, num_heads) + + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) + + attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + attn = attn.softmax(dim=-1) + + attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScaleBlock(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add layerScale + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, + mlp_block=Mlp, init_values=1e-4): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + + def forward(self, x): + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class Cait(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to adapt to our cait models + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), + global_pool=None, + block_layers=LayerScaleBlock, + block_layers_token=LayerScaleBlockClassAttn, + patch_layer=PatchEmbed, + act_layer=nn.GELU, + attn_block=TalkingHeadAttn, + mlp_block=Mlp, + init_scale=1e-4, + attn_block_token_only=ClassAttn, + mlp_block_token_only=Mlp, + depth_token_only=2, + mlp_ratio_clstk=4.0 + ): + super().__init__() + + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = patch_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [drop_path_rate for i in range(depth)] + self.blocks = nn.ModuleList([ + block_layers( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + act_layer=act_layer, attn_block=attn_block, mlp_block=mlp_block, init_values=init_scale) + for i in range(depth)]) + + self.blocks_token_only = nn.ModuleList([ + block_layers_token( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio_clstk, qkv_bias=qkv_bias, + drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=norm_layer, + act_layer=act_layer, attn_block=attn_block_token_only, + mlp_block=mlp_block_token_only, init_values=init_scale) + for i in range(depth_token_only)]) + + self.norm = norm_layer(embed_dim) + + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) + + x = x + self.pos_embed + x = self.pos_drop(x) + + for i, blk in enumerate(self.blocks): + x = blk(x) + + for i, blk in enumerate(self.blocks_token_only): + cls_tokens = blk(x, cls_tokens) + + x = torch.cat((cls_tokens, x), dim=1) + + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model=None): + if 'model' in state_dict: + state_dict = state_dict['model'] + checkpoint_no_module = {} + for k, v in state_dict.items(): + checkpoint_no_module[k.replace('module.', '')] = v + return checkpoint_no_module + + +def _create_cait(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + Cait, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def cait_xxs24_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs36_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xs24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=288, depth=24, num_heads=6, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xs24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s24_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_scale=1e-5, **kwargs) + model = _create_cait('cait_s24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_scale=1e-5, **kwargs) + model = _create_cait('cait_s24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=8, init_scale=1e-6, **kwargs) + model = _create_cait('cait_s36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_m36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=768, depth=36, num_heads=16, init_scale=1e-6, **kwargs) + model = _create_cait('cait_m36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_m48_448(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=768, depth=48, num_heads=16, init_scale=1e-6, **kwargs) + model = _create_cait('cait_m48_448', pretrained=pretrained, **model_args) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/coat.py b/testbed/huggingface__pytorch-image-models/timm/models/coat.py new file mode 100644 index 0000000000000000000000000000000000000000..f071715a347120ce0ca4710eceddda61de28ce8f --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/coat.py @@ -0,0 +1,660 @@ +""" +CoaT architecture. + +Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399 + +Official CoaT code at: https://github.com/mlpc-ucsd/CoaT + +Modified from timm/models/vision_transformer.py +""" +from copy import deepcopy +from functools import partial +from typing import Tuple, List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_ +from .registry import register_model + + +__all__ = [ + "coat_tiny", + "coat_mini", + "coat_lite_tiny", + "coat_lite_mini", + "coat_lite_small" +] + + +def _cfg_coat(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed1.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'coat_tiny': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_tiny-473c2a20.pth' + ), + 'coat_mini': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_mini-2c6baf49.pth' + ), + 'coat_lite_tiny': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_tiny-461b07a7.pth' + ), + 'coat_lite_mini': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_mini-d7842000.pth' + ), + 'coat_lite_small': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_small-fea1d5a1.pth' + ), +} + + +class ConvRelPosEnc(nn.Module): + """ Convolutional relative position encoding. """ + def __init__(self, Ch, h, window): + """ + Initialization. + Ch: Channels per head. + h: Number of heads. + window: Window size(s) in convolutional relative positional encoding. It can have two forms: + 1. An integer of window size, which assigns all attention heads with the same window s + size in ConvRelPosEnc. + 2. A dict mapping window size to #attention head splits ( + e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2}) + It will apply different window size to the attention head splits. + """ + super().__init__() + + if isinstance(window, int): + # Set the same window size for all attention heads. + window = {window: h} + self.window = window + elif isinstance(window, dict): + self.window = window + else: + raise ValueError() + + self.conv_list = nn.ModuleList() + self.head_splits = [] + for cur_window, cur_head_split in window.items(): + dilation = 1 + # Determine padding size. + # Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338 + padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2 + cur_conv = nn.Conv2d(cur_head_split*Ch, cur_head_split*Ch, + kernel_size=(cur_window, cur_window), + padding=(padding_size, padding_size), + dilation=(dilation, dilation), + groups=cur_head_split*Ch, + ) + self.conv_list.append(cur_conv) + self.head_splits.append(cur_head_split) + self.channel_splits = [x*Ch for x in self.head_splits] + + def forward(self, q, v, size: Tuple[int, int]): + B, h, N, Ch = q.shape + H, W = size + assert N == 1 + H * W + + # Convolutional relative position encoding. + q_img = q[:, :, 1:, :] # [B, h, H*W, Ch] + v_img = v[:, :, 1:, :] # [B, h, H*W, Ch] + + v_img = v_img.transpose(-1, -2).reshape(B, h * Ch, H, W) + v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels + conv_v_img_list = [] + for i, conv in enumerate(self.conv_list): + conv_v_img_list.append(conv(v_img_list[i])) + conv_v_img = torch.cat(conv_v_img_list, dim=1) + conv_v_img = conv_v_img.reshape(B, h, Ch, H * W).transpose(-1, -2) + + EV_hat = q_img * conv_v_img + EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch]. + return EV_hat + + +class FactorAtt_ConvRelPosEnc(nn.Module): + """ Factorized attention with convolutional relative position encoding class. """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., shared_crpe=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used. + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + # Shared convolutional relative position encoding. + self.crpe = shared_crpe + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + + # Generate Q, K, V. + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # [B, h, N, Ch] + + # Factorized attention. + k_softmax = k.softmax(dim=2) + factor_att = k_softmax.transpose(-1, -2) @ v + factor_att = q @ factor_att + + # Convolutional relative position encoding. + crpe = self.crpe(q, v, size=size) # [B, h, N, Ch] + + # Merge and reshape. + x = self.scale * factor_att + crpe + x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C] + + # Output projection. + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class ConvPosEnc(nn.Module): + """ Convolutional Position Encoding. + Note: This module is similar to the conditional position encoding in CPVT. + """ + def __init__(self, dim, k=3): + super(ConvPosEnc, self).__init__() + self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim) + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + H, W = size + assert N == 1 + H * W + + # Extract CLS token and image tokens. + cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C] + + # Depthwise convolution. + feat = img_tokens.transpose(1, 2).view(B, C, H, W) + x = self.proj(feat) + feat + x = x.flatten(2).transpose(1, 2) + + # Combine with CLS token. + x = torch.cat((cls_token, x), dim=1) + + return x + + +class SerialBlock(nn.Module): + """ Serial block class. + Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_cpe=None, shared_crpe=None): + super().__init__() + + # Conv-Attention. + self.cpe = shared_cpe + + self.norm1 = norm_layer(dim) + self.factoratt_crpe = FactorAtt_ConvRelPosEnc( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, shared_crpe=shared_crpe) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Tuple[int, int]): + # Conv-Attention. + x = self.cpe(x, size) + cur = self.norm1(x) + cur = self.factoratt_crpe(cur, size) + x = x + self.drop_path(cur) + + # MLP. + cur = self.norm2(x) + cur = self.mlp(cur) + x = x + self.drop_path(cur) + + return x + + +class ParallelBlock(nn.Module): + """ Parallel block class. """ + def __init__(self, dims, num_heads, mlp_ratios=[], qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_crpes=None): + super().__init__() + + # Conv-Attention. + self.norm12 = norm_layer(dims[1]) + self.norm13 = norm_layer(dims[2]) + self.norm14 = norm_layer(dims[3]) + self.factoratt_crpe2 = FactorAtt_ConvRelPosEnc( + dims[1], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[1] + ) + self.factoratt_crpe3 = FactorAtt_ConvRelPosEnc( + dims[2], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[2] + ) + self.factoratt_crpe4 = FactorAtt_ConvRelPosEnc( + dims[3], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[3] + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm22 = norm_layer(dims[1]) + self.norm23 = norm_layer(dims[2]) + self.norm24 = norm_layer(dims[3]) + # In parallel block, we assume dimensions are the same and share the linear transformation. + assert dims[1] == dims[2] == dims[3] + assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3] + mlp_hidden_dim = int(dims[1] * mlp_ratios[1]) + self.mlp2 = self.mlp3 = self.mlp4 = Mlp( + in_features=dims[1], hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def upsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map up-sampling. """ + return self.interpolate(x, scale_factor=factor, size=size) + + def downsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map down-sampling. """ + return self.interpolate(x, scale_factor=1.0/factor, size=size) + + def interpolate(self, x, scale_factor: float, size: Tuple[int, int]): + """ Feature map interpolation. """ + B, N, C = x.shape + H, W = size + assert N == 1 + H * W + + cls_token = x[:, :1, :] + img_tokens = x[:, 1:, :] + + img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W) + img_tokens = F.interpolate( + img_tokens, scale_factor=scale_factor, recompute_scale_factor=False, mode='bilinear', align_corners=False) + img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2) + + out = torch.cat((cls_token, img_tokens), dim=1) + + return out + + def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]): + _, S2, S3, S4 = sizes + cur2 = self.norm12(x2) + cur3 = self.norm13(x3) + cur4 = self.norm14(x4) + cur2 = self.factoratt_crpe2(cur2, size=S2) + cur3 = self.factoratt_crpe3(cur3, size=S3) + cur4 = self.factoratt_crpe4(cur4, size=S4) + upsample3_2 = self.upsample(cur3, factor=2., size=S3) + upsample4_3 = self.upsample(cur4, factor=2., size=S4) + upsample4_2 = self.upsample(cur4, factor=4., size=S4) + downsample2_3 = self.downsample(cur2, factor=2., size=S2) + downsample3_4 = self.downsample(cur3, factor=2., size=S3) + downsample2_4 = self.downsample(cur2, factor=4., size=S2) + cur2 = cur2 + upsample3_2 + upsample4_2 + cur3 = cur3 + upsample4_3 + downsample2_3 + cur4 = cur4 + downsample3_4 + downsample2_4 + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + # MLP. + cur2 = self.norm22(x2) + cur3 = self.norm23(x3) + cur4 = self.norm24(x4) + cur2 = self.mlp2(cur2) + cur3 = self.mlp3(cur3) + cur4 = self.mlp4(cur4) + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + return x1, x2, x3, x4 + + +class CoaT(nn.Module): + """ CoaT class. """ + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=(0, 0, 0, 0), + serial_depths=(0, 0, 0, 0), parallel_depth=0, num_heads=0, mlp_ratios=(0, 0, 0, 0), qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), + return_interm_layers=False, out_features=None, crpe_window=None, **kwargs): + super().__init__() + crpe_window = crpe_window or {3: 2, 5: 3, 7: 3} + self.return_interm_layers = return_interm_layers + self.out_features = out_features + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + self.num_classes = num_classes + + # Patch embeddings. + img_size = to_2tuple(img_size) + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dims[0], norm_layer=nn.LayerNorm) + self.patch_embed2 = PatchEmbed( + img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0], + embed_dim=embed_dims[1], norm_layer=nn.LayerNorm) + self.patch_embed3 = PatchEmbed( + img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1], + embed_dim=embed_dims[2], norm_layer=nn.LayerNorm) + self.patch_embed4 = PatchEmbed( + img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2], + embed_dim=embed_dims[3], norm_layer=nn.LayerNorm) + + # Class tokens. + self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0])) + self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1])) + self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2])) + self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3])) + + # Convolutional position encodings. + self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3) + self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3) + self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3) + self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3) + + # Convolutional relative position encodings. + self.crpe1 = ConvRelPosEnc(Ch=embed_dims[0] // num_heads, h=num_heads, window=crpe_window) + self.crpe2 = ConvRelPosEnc(Ch=embed_dims[1] // num_heads, h=num_heads, window=crpe_window) + self.crpe3 = ConvRelPosEnc(Ch=embed_dims[2] // num_heads, h=num_heads, window=crpe_window) + self.crpe4 = ConvRelPosEnc(Ch=embed_dims[3] // num_heads, h=num_heads, window=crpe_window) + + # Disable stochastic depth. + dpr = drop_path_rate + assert dpr == 0.0 + + # Serial blocks 1. + self.serial_blocks1 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[0], num_heads=num_heads, mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe1, shared_crpe=self.crpe1 + ) + for _ in range(serial_depths[0])] + ) + + # Serial blocks 2. + self.serial_blocks2 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[1], num_heads=num_heads, mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe2, shared_crpe=self.crpe2 + ) + for _ in range(serial_depths[1])] + ) + + # Serial blocks 3. + self.serial_blocks3 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[2], num_heads=num_heads, mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe3, shared_crpe=self.crpe3 + ) + for _ in range(serial_depths[2])] + ) + + # Serial blocks 4. + self.serial_blocks4 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[3], num_heads=num_heads, mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe4, shared_crpe=self.crpe4 + ) + for _ in range(serial_depths[3])] + ) + + # Parallel blocks. + self.parallel_depth = parallel_depth + if self.parallel_depth > 0: + self.parallel_blocks = nn.ModuleList([ + ParallelBlock( + dims=embed_dims, num_heads=num_heads, mlp_ratios=mlp_ratios, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4) + ) + for _ in range(parallel_depth)] + ) + else: + self.parallel_blocks = None + + # Classification head(s). + if not self.return_interm_layers: + if self.parallel_blocks is not None: + self.norm2 = norm_layer(embed_dims[1]) + self.norm3 = norm_layer(embed_dims[2]) + else: + self.norm2 = self.norm3 = None + self.norm4 = norm_layer(embed_dims[3]) + + if self.parallel_depth > 0: + # CoaT series: Aggregate features of last three scales for classification. + assert embed_dims[1] == embed_dims[2] == embed_dims[3] + self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + else: + # CoaT-Lite series: Use feature of last scale for classification. + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Initialize weights. + trunc_normal_(self.cls_token1, std=.02) + trunc_normal_(self.cls_token2, std=.02) + trunc_normal_(self.cls_token3, std=.02) + trunc_normal_(self.cls_token4, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def insert_cls(self, x, cls_token): + """ Insert CLS token. """ + cls_tokens = cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + return x + + def remove_cls(self, x): + """ Remove CLS token. """ + return x[:, 1:, :] + + def forward_features(self, x0): + B = x0.shape[0] + + # Serial blocks 1. + x1 = self.patch_embed1(x0) + H1, W1 = self.patch_embed1.grid_size + x1 = self.insert_cls(x1, self.cls_token1) + for blk in self.serial_blocks1: + x1 = blk(x1, size=(H1, W1)) + x1_nocls = self.remove_cls(x1) + x1_nocls = x1_nocls.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 2. + x2 = self.patch_embed2(x1_nocls) + H2, W2 = self.patch_embed2.grid_size + x2 = self.insert_cls(x2, self.cls_token2) + for blk in self.serial_blocks2: + x2 = blk(x2, size=(H2, W2)) + x2_nocls = self.remove_cls(x2) + x2_nocls = x2_nocls.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 3. + x3 = self.patch_embed3(x2_nocls) + H3, W3 = self.patch_embed3.grid_size + x3 = self.insert_cls(x3, self.cls_token3) + for blk in self.serial_blocks3: + x3 = blk(x3, size=(H3, W3)) + x3_nocls = self.remove_cls(x3) + x3_nocls = x3_nocls.reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 4. + x4 = self.patch_embed4(x3_nocls) + H4, W4 = self.patch_embed4.grid_size + x4 = self.insert_cls(x4, self.cls_token4) + for blk in self.serial_blocks4: + x4 = blk(x4, size=(H4, W4)) + x4_nocls = self.remove_cls(x4) + x4_nocls = x4_nocls.reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + + # Only serial blocks: Early return. + if self.parallel_blocks is None: + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + # Return features for classification. + x4 = self.norm4(x4) + x4_cls = x4[:, 0] + return x4_cls + + # Parallel blocks. + for blk in self.parallel_blocks: + x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4)) + x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)]) + + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + x1_nocls = self.remove_cls(x1) + x1_nocls = x1_nocls.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + x2_nocls = self.remove_cls(x2) + x2_nocls = x2_nocls.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + x3_nocls = self.remove_cls(x3) + x3_nocls = x3_nocls.reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + x4_nocls = self.remove_cls(x4) + x4_nocls = x4_nocls.reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + x2 = self.norm2(x2) + x3 = self.norm3(x3) + x4 = self.norm4(x4) + x2_cls = x2[:, :1] # [B, 1, C] + x3_cls = x3[:, :1] + x4_cls = x4[:, :1] + merged_cls = torch.cat((x2_cls, x3_cls, x4_cls), dim=1) # [B, 3, C] + merged_cls = self.aggregate(merged_cls).squeeze(dim=1) # Shape: [B, C] + return merged_cls + + def forward(self, x): + if self.return_interm_layers: + # Return intermediate features (for down-stream tasks). + return self.forward_features(x) + else: + # Return features for classification. + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + out_dict = {} + for k, v in state_dict.items(): + # original model had unused norm layers, removing them requires filtering pretrained checkpoints + if k.startswith('norm1') or \ + (model.norm2 is None and k.startswith('norm2')) or \ + (model.norm3 is None and k.startswith('norm3')): + continue + out_dict[k] = v + return out_dict + + +def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + CoaT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def coat_tiny(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6, + num_heads=8, mlp_ratios=[4, 4, 4, 4], **kwargs) + model = _create_coat('coat_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_mini(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6, + num_heads=8, mlp_ratios=[4, 4, 4, 4], **kwargs) + model = _create_coat('coat_mini', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_tiny(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_mini(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_mini', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_small(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_small', pretrained=pretrained, **model_cfg) + return model \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/convit.py b/testbed/huggingface__pytorch-image-models/timm/models/convit.py new file mode 100644 index 0000000000000000000000000000000000000000..f58249ec979dc32e5ddefa5aceb2e6143d6a4954 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/convit.py @@ -0,0 +1,349 @@ +""" ConViT Model + +@article{d2021convit, + title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases}, + author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent}, + journal={arXiv preprint arXiv:2103.10697}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.10697 +Original code: https://github.com/facebookresearch/convit, original copyright below +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the CC-by-NC license found in the +# LICENSE file in the root directory of this source tree. +# +'''These modules are adapted from those of timm, see +https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +''' + +import torch +import torch.nn as nn +from functools import partial +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_, PatchEmbed, Mlp +from .registry import register_model +from .vision_transformer_hybrid import HybridEmbed + +import torch +import torch.nn as nn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # ConViT + 'convit_tiny': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth"), + 'convit_small': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_small.pth"), + 'convit_base': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_base.pth") +} + + +class GPSA(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., + locality_strength=1.): + super().__init__() + self.num_heads = num_heads + self.dim = dim + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.locality_strength = locality_strength + + self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.pos_proj = nn.Linear(3, num_heads) + self.proj_drop = nn.Dropout(proj_drop) + self.gating_param = nn.Parameter(torch.ones(self.num_heads)) + self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None + + def forward(self, x): + B, N, C = x.shape + if self.rel_indices is None or self.rel_indices.shape[1] != N: + self.rel_indices = self.get_rel_indices(N) + attn = self.get_attention(x) + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def get_attention(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k = qk[0], qk[1] + pos_score = self.rel_indices.expand(B, -1, -1, -1) + pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) + patch_score = (q @ k.transpose(-2, -1)) * self.scale + patch_score = patch_score.softmax(dim=-1) + pos_score = pos_score.softmax(dim=-1) + + gating = self.gating_param.view(1, -1, 1, 1) + attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score + attn /= attn.sum(dim=-1).unsqueeze(-1) + attn = self.attn_drop(attn) + return attn + + def get_attention_map(self, x, return_map=False): + attn_map = self.get_attention(x).mean(0) # average over batch + distances = self.rel_indices.squeeze()[:, :, -1] ** .5 + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) + if return_map: + return dist, attn_map + else: + return dist + + def local_init(self): + self.v.weight.data.copy_(torch.eye(self.dim)) + locality_distance = 1 # max(1,1/locality_strength**.5) + + kernel_size = int(self.num_heads ** .5) + center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 + for h1 in range(kernel_size): + for h2 in range(kernel_size): + position = h1 + kernel_size * h2 + self.pos_proj.weight.data[position, 2] = -1 + self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance + self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance + self.pos_proj.weight.data *= self.locality_strength + + def get_rel_indices(self, num_patches: int) -> torch.Tensor: + img_size = int(num_patches ** .5) + rel_indices = torch.zeros(1, num_patches, num_patches, 3) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + rel_indices[:, :, :, 2] = indd.unsqueeze(0) + rel_indices[:, :, :, 1] = indy.unsqueeze(0) + rel_indices[:, :, :, 0] = indx.unsqueeze(0) + device = self.qk.weight.device + return rel_indices.to(device) + + +class MHSA(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def get_attention_map(self, x, return_map=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + attn_map = (q @ k.transpose(-2, -1)) * self.scale + attn_map = attn_map.softmax(dim=-1).mean(0) + + img_size = int(N ** .5) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + distances = indd ** .5 + distances = distances.to('cuda') + + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N + if return_map: + return dist, attn_map + else: + return dist + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs): + super().__init__() + self.norm1 = norm_layer(dim) + self.use_gpsa = use_gpsa + if self.use_gpsa: + self.attn = GPSA( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, **kwargs) + else: + self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, global_pool=None, + local_up_to_layer=3, locality_strength=1., use_pos_embed=True): + super().__init__() + embed_dim *= num_heads + self.num_classes = num_classes + self.local_up_to_layer = local_up_to_layer + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.locality_strength = locality_strength + self.use_pos_embed = use_pos_embed + + if hybrid_backbone is not None: + self.patch_embed = HybridEmbed( + hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) + else: + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + self.num_patches = num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + if self.use_pos_embed: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.pos_embed, std=.02) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_gpsa=True, + locality_strength=locality_strength) + if i < local_up_to_layer else + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_gpsa=False) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Classifier head + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + for n, m in self.named_modules(): + if hasattr(m, 'local_init'): + m.local_init() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) + + if self.use_pos_embed: + x = x + self.pos_embed + x = self.pos_drop(x) + + for u, blk in enumerate(self.blocks): + if u == self.local_up_to_layer: + x = torch.cat((cls_tokens, x), dim=1) + x = blk(x) + + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_convit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + return build_model_with_cfg( + ConViT, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def convit_tiny(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_tiny', pretrained=pretrained, **model_args) + return model + + +@register_model +def convit_small(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=9, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_small', pretrained=pretrained, **model_args) + return model + + +@register_model +def convit_base(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=16, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_base', pretrained=pretrained, **model_args) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/crossvit.py b/testbed/huggingface__pytorch-image-models/timm/models/crossvit.py new file mode 100644 index 0000000000000000000000000000000000000000..6e0160f9e6f7ce6a1e7d37da125841ce3b97008e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/crossvit.py @@ -0,0 +1,497 @@ +""" CrossViT Model + +@inproceedings{ + chen2021crossvit, + title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}}, + author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda}, + booktitle={International Conference on Computer Vision (ICCV)}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.14899 +Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py + +NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408 +""" + +# Copyright IBM All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +""" +Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.hub +from functools import partial +from typing import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_ +from .registry import register_model +from .vision_transformer import Mlp, Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'), + 'classifier': ('head.0', 'head.1'), + **kwargs + } + + +default_cfgs = { + 'crossvit_15_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_224.pth'), + 'crossvit_15_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_15_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_18_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_224.pth'), + 'crossvit_18_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_18_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_9_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_224.pth'), + 'crossvit_9_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_base_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_base_224.pth'), + 'crossvit_small_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_small_224.pth'), + 'crossvit_tiny_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_tiny_224.pth'), +} + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + if multi_conv: + if patch_size[0] == 12: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1), + ) + elif patch_size[0] == 16: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1), + ) + else: + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class CrossAttention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.wq = nn.Linear(dim, dim, bias=qkv_bias) + self.wk = nn.Linear(dim, dim, bias=qkv_bias) + self.wv = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # B1C -> B1H(C/H) -> BH1(C/H) + q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class CrossAttentionBlock(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = CrossAttention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x))) + + return x + + +class MultiScaleBlock(nn.Module): + + def __init__(self, dim, patches, depth, num_heads, mlp_ratio, qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + + num_branches = len(dim) + self.num_branches = num_branches + # different branch could have different embedding size, the first one is the base + self.blocks = nn.ModuleList() + for d in range(num_branches): + tmp = [] + for i in range(depth[d]): + tmp.append(Block( + dim=dim[d], num_heads=num_heads[d], mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer)) + if len(tmp) != 0: + self.blocks.append(nn.Sequential(*tmp)) + + if len(self.blocks) == 0: + self.blocks = None + + self.projs = nn.ModuleList() + for d in range(num_branches): + if dim[d] == dim[(d + 1) % num_branches] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])] + self.projs.append(nn.Sequential(*tmp)) + + self.fusion = nn.ModuleList() + for d in range(num_branches): + d_ = (d + 1) % num_branches + nh = num_heads[d_] + if depth[-1] == 0: # backward capability: + self.fusion.append( + CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + else: + tmp = [] + for _ in range(depth[-1]): + tmp.append(CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + self.fusion.append(nn.Sequential(*tmp)) + + self.revert_projs = nn.ModuleList() + for d in range(num_branches): + if dim[(d + 1) % num_branches] == dim[d] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(), + nn.Linear(dim[(d + 1) % num_branches], dim[d])] + self.revert_projs.append(nn.Sequential(*tmp)) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + + outs_b = [] + for i, block in enumerate(self.blocks): + outs_b.append(block(x[i])) + + # only take the cls token out + proj_cls_token = torch.jit.annotate(List[torch.Tensor], []) + for i, proj in enumerate(self.projs): + proj_cls_token.append(proj(outs_b[i][:, 0:1, ...])) + + # cross attention + outs = [] + for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)): + tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1) + tmp = fusion(tmp) + reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...]) + tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1) + outs.append(tmp) + return outs + + +def _compute_num_patches(img_size, patches): + return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)] + + +class CrossViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, img_size=224, img_scale=(1.0, 1.0), patch_size=(8, 16), in_chans=3, num_classes=1000, + embed_dim=(192, 384), depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), num_heads=(6, 12), mlp_ratio=(2., 2., 4.), + qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), multi_conv=False, crop_scale=False, + ): + super().__init__() + + self.num_classes = num_classes + self.img_size = to_2tuple(img_size) + img_scale = to_2tuple(img_scale) + self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale] + self.crop_scale = crop_scale # crop instead of interpolate for scale + num_patches = _compute_num_patches(self.img_size_scaled, patch_size) + self.num_branches = len(patch_size) + self.embed_dim = embed_dim + self.num_features = embed_dim[0] # to pass the tests + self.patch_embed = nn.ModuleList() + + # hard-coded for torch jit script + for i in range(self.num_branches): + setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i]))) + setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i]))) + + for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim): + self.patch_embed.append( + PatchEmbed(img_size=im_s, patch_size=p, in_chans=in_chans, embed_dim=d, multi_conv=multi_conv)) + + self.pos_drop = nn.Dropout(p=drop_rate) + + total_depth = sum([sum(x[-2:]) for x in depth]) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule + dpr_ptr = 0 + self.blocks = nn.ModuleList() + for idx, block_cfg in enumerate(depth): + curr_depth = max(block_cfg[:-1]) + block_cfg[-1] + dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth] + blk = MultiScaleBlock( + embed_dim, num_patches, block_cfg, num_heads=num_heads, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer) + dpr_ptr += curr_depth + self.blocks.append(blk) + + self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)]) + self.head = nn.ModuleList([ + nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() + for i in range(self.num_branches)]) + + for i in range(self.num_branches): + trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02) + trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + out = set() + for i in range(self.num_branches): + out.add(f'cls_token_{i}') + pe = getattr(self, f'pos_embed_{i}', None) + if pe is not None and pe.requires_grad: + out.add(f'pos_embed_{i}') + return out + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.ModuleList( + [nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in + range(self.num_branches)]) + + def forward_features(self, x): + B, C, H, W = x.shape + xs = [] + for i, patch_embed in enumerate(self.patch_embed): + x_ = x + ss = self.img_size_scaled[i] + if H != ss[0] or W != ss[1]: + if self.crop_scale and ss[0] <= H and ss[1] <= W: + cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.)) + x_ = x_[:, :, cu:cu + ss[0], cl:cl + ss[1]] + else: + x_ = torch.nn.functional.interpolate(x_, size=ss, mode='bicubic', align_corners=False) + x_ = patch_embed(x_) + cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script + cls_tokens = cls_tokens.expand(B, -1, -1) + x_ = torch.cat((cls_tokens, x_), dim=1) + pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script + x_ = x_ + pos_embed + x_ = self.pos_drop(x_) + xs.append(x_) + + for i, blk in enumerate(self.blocks): + xs = blk(xs) + + # NOTE: was before branch token section, move to here to assure all branch token are before layer norm + xs = [norm(xs[i]) for i, norm in enumerate(self.norm)] + return [xo[:, 0] for xo in xs] + + def forward(self, x): + xs = self.forward_features(x) + ce_logits = [head(xs[i]) for i, head in enumerate(self.head)] + if not isinstance(self.head[0], nn.Identity): + ce_logits = torch.mean(torch.stack(ce_logits, dim=0), dim=0) + return ce_logits + + +def _create_crossvit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + def pretrained_filter_fn(state_dict): + new_state_dict = {} + for key in state_dict.keys(): + if 'pos_embed' in key or 'cls_token' in key: + new_key = key.replace(".", "_") + else: + new_key = key + new_state_dict[new_key] = state_dict[key] + return new_state_dict + + return build_model_with_cfg( + CrossViT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=pretrained_filter_fn, + **kwargs) + + +@register_model +def crossvit_tiny_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[3, 3], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_small_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[6, 6], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_base_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[12, 12], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **model_args) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/cspnet.py b/testbed/huggingface__pytorch-image-models/timm/models/cspnet.py new file mode 100644 index 0000000000000000000000000000000000000000..39d16200f869f352cf9289f962c8fd77ebd3f9f9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/cspnet.py @@ -0,0 +1,457 @@ +"""PyTorch CspNet + +A PyTorch implementation of Cross Stage Partial Networks including: +* CSPResNet50 +* CSPResNeXt50 +* CSPDarkNet53 +* and DarkNet53 for good measure + +Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + +Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct, DropPath, create_attn, get_norm_act_layer +from .registry import register_model + + +__all__ = ['CspNet'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.887, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'cspresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'), + 'cspresnet50d': _cfg(url=''), + 'cspresnet50w': _cfg(url=''), + 'cspresnext50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth', + input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.875 # FIXME I trained this at 224x224, not 256 like ref impl + ), + 'cspresnext50_iabn': _cfg(url=''), + 'cspdarknet53': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'), + 'cspdarknet53_iabn': _cfg(url=''), + 'darknet53': _cfg(url=''), +} + + +model_cfgs = dict( + cspresnet50=dict( + stem=dict(out_chs=64, kernel_size=7, stride=2, pool='max'), + stage=dict( + out_chs=(128, 256, 512, 1024), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(2.,) * 4, + bottle_ratio=(0.5,) * 4, + block_ratio=(1.,) * 4, + cross_linear=True, + ) + ), + cspresnet50d=dict( + stem=dict(out_chs=[32, 32, 64], kernel_size=3, stride=2, pool='max'), + stage=dict( + out_chs=(128, 256, 512, 1024), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(2.,) * 4, + bottle_ratio=(0.5,) * 4, + block_ratio=(1.,) * 4, + cross_linear=True, + ) + ), + cspresnet50w=dict( + stem=dict(out_chs=[32, 32, 64], kernel_size=3, stride=2, pool='max'), + stage=dict( + out_chs=(256, 512, 1024, 2048), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(1.,) * 4, + bottle_ratio=(0.25,) * 4, + block_ratio=(0.5,) * 4, + cross_linear=True, + ) + ), + cspresnext50=dict( + stem=dict(out_chs=64, kernel_size=7, stride=2, pool='max'), + stage=dict( + out_chs=(256, 512, 1024, 2048), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + groups=(32,) * 4, + exp_ratio=(1.,) * 4, + bottle_ratio=(1.,) * 4, + block_ratio=(0.5,) * 4, + cross_linear=True, + ) + ), + cspdarknet53=dict( + stem=dict(out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2,) * 5, + exp_ratio=(2.,) + (1.,) * 4, + bottle_ratio=(0.5,) + (1.0,) * 4, + block_ratio=(1.,) + (0.5,) * 4, + down_growth=True, + ) + ), + darknet53=dict( + stem=dict(out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2,) * 5, + bottle_ratio=(0.5,) * 5, + block_ratio=(1.,) * 5, + ) + ) +) + + +def create_stem( + in_chans=3, out_chs=32, kernel_size=3, stride=2, pool='', + act_layer=None, norm_layer=None, aa_layer=None): + stem = nn.Sequential() + if not isinstance(out_chs, (tuple, list)): + out_chs = [out_chs] + assert len(out_chs) + in_c = in_chans + for i, out_c in enumerate(out_chs): + conv_name = f'conv{i + 1}' + stem.add_module(conv_name, ConvBnAct( + in_c, out_c, kernel_size, stride=stride if i == 0 else 1, + act_layer=act_layer, norm_layer=norm_layer)) + in_c = out_c + last_conv = conv_name + if pool: + if aa_layer is not None: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + stem.add_module('aa', aa_layer(channels=in_c, stride=2)) + else: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + return stem, dict(num_chs=in_c, reduction=stride, module='.'.join(['stem', last_conv])) + + +class ResBottleneck(nn.Module): + """ ResNe(X)t Bottleneck Block + """ + + def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.25, groups=1, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_last=False, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResBottleneck, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct(mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs) + self.attn2 = create_attn(attn_layer, channels=mid_chs) if not attn_last else None + self.conv3 = ConvBnAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs) + self.attn3 = create_attn(attn_layer, channels=out_chs) if attn_last else None + self.drop_path = drop_path + self.act3 = act_layer(inplace=True) + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.attn2 is not None: + x = self.attn2(x) + x = self.conv3(x) + if self.attn3 is not None: + x = self.attn3(x) + if self.drop_path is not None: + x = self.drop_path(x) + x = x + shortcut + # FIXME partial shortcut needed if first block handled as per original, not used for my current impl + #x[:, :shortcut.size(1)] += shortcut + x = self.act3(x) + return x + + +class DarkBlock(nn.Module): + """ DarkNet Block + """ + + def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, + drop_block=None, drop_path=None): + super(DarkBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct(mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs) + self.attn = create_attn(attn_layer, channels=out_chs) + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + x = x + shortcut + return x + + +class CrossStage(nn.Module): + """Cross Stage.""" + def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., exp_ratio=1., + groups=1, first_dilation=None, down_growth=False, cross_linear=False, block_dpr=None, + block_fn=ResBottleneck, **block_kwargs): + super(CrossStage, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels + exp_chs = int(round(out_chs * exp_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + + if stride != 1 or first_dilation != dilation: + self.conv_down = ConvBnAct( + in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=block_kwargs.get('aa_layer', None), **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = None + prev_chs = in_chs + + # FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also, + # there is also special case for the first stage for some of the model that results in uneven split + # across the two paths. I did it this way for simplicity for now. + self.conv_exp = ConvBnAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) + prev_chs = exp_chs // 2 # output of conv_exp is always split in two + + self.blocks = nn.Sequential() + for i in range(depth): + drop_path = DropPath(block_dpr[i]) if block_dpr and block_dpr[i] else None + self.blocks.add_module(str(i), block_fn( + prev_chs, block_out_chs, dilation, bottle_ratio, groups, drop_path=drop_path, **block_kwargs)) + prev_chs = block_out_chs + + # transition convs + self.conv_transition_b = ConvBnAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) + self.conv_transition = ConvBnAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + if self.conv_down is not None: + x = self.conv_down(x) + x = self.conv_exp(x) + split = x.shape[1] // 2 + xs, xb = x[:, :split], x[:, split:] + xb = self.blocks(xb) + xb = self.conv_transition_b(xb).contiguous() + out = self.conv_transition(torch.cat([xs, xb], dim=1)) + return out + + +class DarkStage(nn.Module): + """DarkNet stage.""" + + def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., groups=1, + first_dilation=None, block_fn=ResBottleneck, block_dpr=None, **block_kwargs): + super(DarkStage, self).__init__() + first_dilation = first_dilation or dilation + + self.conv_down = ConvBnAct( + in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'), + aa_layer=block_kwargs.get('aa_layer', None)) + + prev_chs = out_chs + block_out_chs = int(round(out_chs * block_ratio)) + self.blocks = nn.Sequential() + for i in range(depth): + drop_path = DropPath(block_dpr[i]) if block_dpr and block_dpr[i] else None + self.blocks.add_module(str(i), block_fn( + prev_chs, block_out_chs, dilation, bottle_ratio, groups, drop_path=drop_path, **block_kwargs)) + prev_chs = block_out_chs + + def forward(self, x): + x = self.conv_down(x) + x = self.blocks(x) + return x + + +def _cfg_to_stage_args(cfg, curr_stride=2, output_stride=32, drop_path_rate=0.): + # get per stage args for stage and containing blocks, calculate strides to meet target output_stride + num_stages = len(cfg['depth']) + if 'groups' not in cfg: + cfg['groups'] = (1,) * num_stages + if 'down_growth' in cfg and not isinstance(cfg['down_growth'], (list, tuple)): + cfg['down_growth'] = (cfg['down_growth'],) * num_stages + if 'cross_linear' in cfg and not isinstance(cfg['cross_linear'], (list, tuple)): + cfg['cross_linear'] = (cfg['cross_linear'],) * num_stages + cfg['block_dpr'] = [None] * num_stages if not drop_path_rate else \ + [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg['depth'])).split(cfg['depth'])] + stage_strides = [] + stage_dilations = [] + stage_first_dilations = [] + dilation = 1 + for cfg_stride in cfg['stride']: + stage_first_dilations.append(dilation) + if curr_stride >= output_stride: + dilation *= cfg_stride + stride = 1 + else: + stride = cfg_stride + curr_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + cfg['stride'] = stage_strides + cfg['dilation'] = stage_dilations + cfg['first_dilation'] = stage_first_dilations + stage_args = [dict(zip(cfg.keys(), values)) for values in zip(*cfg.values())] + return stage_args + + +class CspNet(nn.Module): + """Cross Stage Partial base model. + + Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks + + NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the + darknet impl. I did it this way for simplicity and less special cases. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., + act_layer=nn.LeakyReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_path_rate=0., + zero_init_last_bn=True, stage_fn=CrossStage, block_fn=ResBottleneck): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + + # Construct the stem + self.stem, stem_feat_info = create_stem(in_chans, **cfg['stem'], **layer_args) + self.feature_info = [stem_feat_info] + prev_chs = stem_feat_info['num_chs'] + curr_stride = stem_feat_info['reduction'] # reduction does not include pool + if cfg['stem']['pool']: + curr_stride *= 2 + + # Construct the stages + per_stage_args = _cfg_to_stage_args( + cfg['stage'], curr_stride=curr_stride, output_stride=output_stride, drop_path_rate=drop_path_rate) + self.stages = nn.Sequential() + for i, sa in enumerate(per_stage_args): + self.stages.add_module( + str(i), stage_fn(prev_chs, **sa, **layer_args, block_fn=block_fn)) + prev_chs = sa['out_chs'] + curr_stride *= sa['stride'] + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] + + # Construct the head + self.num_features = prev_chs + self.head = ClassifierHead( + in_chs=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, mean=0.0, std=0.01) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_cspnet(variant, pretrained=False, **kwargs): + cfg_variant = variant.split('_')[0] + return build_model_with_cfg( + CspNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), model_cfg=model_cfgs[cfg_variant], + **kwargs) + + +@register_model +def cspresnet50(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50d(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50w(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50(pretrained=False, **kwargs): + return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_cspnet('cspresnext50_iabn', pretrained=pretrained, norm_layer=norm_layer, **kwargs) + + +@register_model +def cspdarknet53(pretrained=False, **kwargs): + return _create_cspnet('cspdarknet53', pretrained=pretrained, block_fn=DarkBlock, **kwargs) + + +@register_model +def cspdarknet53_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_cspnet('cspdarknet53_iabn', pretrained=pretrained, block_fn=DarkBlock, norm_layer=norm_layer, **kwargs) + + +@register_model +def darknet53(pretrained=False, **kwargs): + return _create_cspnet('darknet53', pretrained=pretrained, block_fn=DarkBlock, stage_fn=DarkStage, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/densenet.py b/testbed/huggingface__pytorch-image-models/timm/models/densenet.py new file mode 100644 index 0000000000000000000000000000000000000000..38a19727874bfb08cf20281ec8e85b8cc7c60688 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/densenet.py @@ -0,0 +1,387 @@ +"""Pytorch Densenet implementation w/ tweaks +This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with +fixed kwargs passthrough and addition of dynamic global avg/max pool. +""" +import re +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from torch.jit.annotations import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, create_norm_act, BlurPool2d, create_classifier +from .registry import register_model + +__all__ = ['DenseNet'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.conv0', 'classifier': 'classifier', + } + + +default_cfgs = { + 'densenet121': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth'), + 'densenet121d': _cfg(url=''), + 'densenetblur121d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth'), + 'densenet169': _cfg(url='https://download.pytorch.org/models/densenet169-b2777c0a.pth'), + 'densenet201': _cfg(url='https://download.pytorch.org/models/densenet201-c1103571.pth'), + 'densenet161': _cfg(url='https://download.pytorch.org/models/densenet161-8d451a50.pth'), + 'densenet264': _cfg(url=''), + 'densenet264d_iabn': _cfg(url=''), + 'tv_densenet121': _cfg(url='https://download.pytorch.org/models/densenet121-a639ec97.pth'), +} + + +class DenseLayer(nn.Module): + def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, + drop_rate=0., memory_efficient=False): + super(DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('conv1', nn.Conv2d( + num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('conv2', nn.Conv2d( + bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = float(drop_rate) + self.memory_efficient = memory_efficient + + def bottleneck_fn(self, xs): + # type: (List[torch.Tensor]) -> torch.Tensor + concated_features = torch.cat(xs, 1) + bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, x): + # type: (List[torch.Tensor]) -> bool + for tensor in x: + if tensor.requires_grad: + return True + return False + + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, x): + # type: (List[torch.Tensor]) -> torch.Tensor + def closure(*xs): + return self.bottleneck_fn(xs) + + return cp.checkpoint(closure, *x) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + # torchscript does not yet support *args, so we overload method + # allowing it to take either a List[Tensor] or single Tensor + def forward(self, x): # noqa: F811 + if isinstance(x, torch.Tensor): + prev_features = [x] + else: + prev_features = x + + if self.memory_efficient and self.any_requires_grad(prev_features): + if torch.jit.is_scripting(): + raise Exception("Memory Efficient not supported in JIT") + bottleneck_output = self.call_checkpoint_bottleneck(prev_features) + else: + bottleneck_output = self.bottleneck_fn(prev_features) + + new_features = self.conv2(self.norm2(bottleneck_output)) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return new_features + + +class DenseBlock(nn.ModuleDict): + _version = 2 + + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=nn.ReLU, + drop_rate=0., memory_efficient=False): + super(DenseBlock, self).__init__() + for i in range(num_layers): + layer = DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features): + features = [init_features] + for name, layer in self.items(): + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class DenseTransition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d, aa_layer=None): + super(DenseTransition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('conv', nn.Conv2d( + num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) + if aa_layer is not None: + self.add_module('pool', aa_layer(num_output_features, stride=2)) + else: + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + + def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), bn_size=4, stem_type='', + num_classes=1000, in_chans=3, global_pool='avg', + norm_layer=BatchNormAct2d, aa_layer=None, drop_rate=0, memory_efficient=False, + aa_stem_only=True): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(DenseNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type # 3x3 deep stem + num_init_features = growth_rate * 2 + if aa_layer is None: + stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + else: + stem_pool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=num_init_features, stride=2)]) + if deep_stem: + stem_chs_1 = stem_chs_2 = growth_rate + if 'tiered' in stem_type: + stem_chs_1 = 3 * (growth_rate // 4) + stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), + ('norm0', norm_layer(stem_chs_1)), + ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), + ('norm1', norm_layer(stem_chs_2)), + ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), + ('norm2', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + else: + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + self.feature_info = [ + dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] + current_stride = 4 + + # DenseBlocks + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient + ) + module_name = f'denseblock{(i + 1)}' + self.features.add_module(module_name, block) + num_features = num_features + num_layers * growth_rate + transition_aa_layer = None if aa_stem_only else aa_layer + if i != len(block_config) - 1: + self.feature_info += [ + dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] + current_stride *= 2 + trans = DenseTransition( + num_input_features=num_features, num_output_features=num_features // 2, + norm_layer=norm_layer, aa_layer=transition_aa_layer) + self.features.add_module(f'transition{i + 1}', trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] + self.num_features = num_features + + # Linear layer + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + # both classifier and block drop? + # if self.drop_rate > 0.: + # x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +def _filter_torchvision_pretrained(state_dict): + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + return state_dict + + +def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): + kwargs['growth_rate'] = growth_rate + kwargs['block_config'] = block_config + return build_model_with_cfg( + DenseNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, + **kwargs) + + +@register_model +def densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenetblur121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep', + aa_layer=BlurPool2d, **kwargs) + return model + + +@register_model +def densenet121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121d', growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', + pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet169(pretrained=False, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet169', growth_rate=32, block_config=(6, 12, 32, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet201(pretrained=False, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet201', growth_rate=32, block_config=(6, 12, 48, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet161(pretrained=False, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet161', growth_rate=48, block_config=(6, 12, 36, 24), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264(pretrained=False, **kwargs): + r"""Densenet-264 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet264', growth_rate=48, block_config=(6, 12, 64, 48), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264d_iabn(pretrained=False, **kwargs): + r"""Densenet-264 model with deep stem and Inplace-ABN + """ + def norm_act_fn(num_features, **kwargs): + return create_norm_act('iabn', num_features, **kwargs) + model = _create_densenet( + 'densenet264d_iabn', growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep', + norm_layer=norm_act_fn, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tv_densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model with original Torchvision weights, from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'tv_densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/dla.py b/testbed/huggingface__pytorch-image-models/timm/models/dla.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e4dd285db53cd547ecb1f913219890517e3c00 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/dla.py @@ -0,0 +1,443 @@ +""" Deep Layer Aggregation and DLA w/ Res2Net +DLA original adapted from Official Pytorch impl at: +DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 + +Res2Net additions from: https://github.com/gasvn/Res2Net/ +Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['DLA'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'base_layer.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'), + 'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'), + 'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'), + 'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'), + 'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'), + 'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'), + 'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'), + 'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'), + 'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'), + 'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'), + 'dla60_res2net': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'), + 'dla60_res2next': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'), +} + + +class DlaBasic(nn.Module): + """DLA Basic""" + + def __init__(self, inplanes, planes, stride=1, dilation=1, **_): + super(DlaBasic, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) + self.bn2 = nn.BatchNorm2d(planes) + self.stride = stride + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottleneck(nn.Module): + """DLA/DLA-X Bottleneck""" + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): + super(DlaBottleneck, self).__init__() + self.stride = stride + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + + self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes) + self.conv2 = nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, + bias=False, dilation=dilation, groups=cardinality) + self.bn2 = nn.BatchNorm2d(mid_planes) + self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottle2neck(nn.Module): + """ Res2Net/Res2NeXT DLA Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py + """ + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): + super(DlaBottle2neck, self).__init__() + self.is_first = stride > 1 + self.scale = scale + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + self.width = mid_planes + + self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes * scale) + + num_scale_convs = max(1, scale - 1) + convs = [] + bns = [] + for _ in range(num_scale_convs): + convs.append(nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, + padding=dilation, dilation=dilation, groups=cardinality, bias=False)) + bns.append(nn.BatchNorm2d(mid_planes)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + + self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + sp = spx[i] if i == 0 or self.is_first else sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + spo.append(self.pool(spx[-1]) if self.is_first else spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaRoot(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, shortcut): + super(DlaRoot, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.shortcut = shortcut + + def forward(self, *x): + children = x + x = self.conv(torch.cat(x, 1)) + x = self.bn(x) + if self.shortcut: + x += children[0] + x = self.relu(x) + + return x + + +class DlaTree(nn.Module): + def __init__(self, levels, block, in_channels, out_channels, stride=1, + dilation=1, cardinality=1, base_width=64, + level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False): + super(DlaTree, self).__init__() + if root_dim == 0: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() + self.project = nn.Identity() + cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) + if levels == 1: + self.tree1 = block(in_channels, out_channels, stride, **cargs) + self.tree2 = block(out_channels, out_channels, 1, **cargs) + if in_channels != out_channels: + # NOTE the official impl/weights have project layers in levels > 1 case that are never + # used, I've moved the project layer here to avoid wasted params but old checkpoints will + # need strict=False while loading. + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), + nn.BatchNorm2d(out_channels)) + else: + cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) + self.tree1 = DlaTree( + levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs) + self.tree2 = DlaTree( + levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs) + if levels == 1: + self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) + self.level_root = level_root + self.root_dim = root_dim + self.levels = levels + + def forward(self, x, shortcut=None, children=None): + children = [] if children is None else children + bottom = self.downsample(x) + shortcut = self.project(bottom) + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, shortcut) + if self.levels == 1: + x2 = self.tree2(x1) + x = self.root(x2, x1, *children) + else: + children.append(x1) + x = self.tree2(x1, children=children) + return x + + +class DLA(nn.Module): + def __init__(self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, + cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, + drop_rate=0.0, global_pool='avg'): + super(DLA, self).__init__() + self.channels = channels + self.num_classes = num_classes + self.cardinality = cardinality + self.base_width = base_width + self.drop_rate = drop_rate + assert output_stride == 32 # FIXME support dilation + + self.base_layer = nn.Sequential( + nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), + nn.BatchNorm2d(channels[0]), + nn.ReLU(inplace=True)) + self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) + self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) + cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) + self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) + self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) + self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) + self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) + self.feature_info = [ + dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level + dict(num_chs=channels[1], reduction=2, module='level1'), + dict(num_chs=channels[2], reduction=4, module='level2'), + dict(num_chs=channels[3], reduction=8, module='level3'), + dict(num_chs=channels[4], reduction=16, module='level4'), + dict(num_chs=channels[5], reduction=32, module='level5'), + ] + + self.num_features = channels[-1] + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): + modules = [] + for i in range(convs): + modules.extend([ + nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, + padding=dilation, bias=False, dilation=dilation), + nn.BatchNorm2d(planes), + nn.ReLU(inplace=True)]) + inplanes = planes + return nn.Sequential(*modules) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + x = self.base_layer(x) + x = self.level0(x) + x = self.level1(x) + x = self.level2(x) + x = self.level3(x) + x = self.level4(x) + x = self.level5(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + x = self.flatten(x) + return x + + +def _create_dla(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DLA, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=False, + feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), + **kwargs) + + +@register_model +def dla60_res2net(pretrained=False, **kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=1, base_width=28, **kwargs) + return _create_dla('dla60_res2net', pretrained, **model_kwargs) + + +@register_model +def dla60_res2next(pretrained=False,**kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=8, base_width=4, **kwargs) + return _create_dla('dla60_res2next', pretrained, **model_kwargs) + + +@register_model +def dla34(pretrained=False, **kwargs): # DLA-34 + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], + block=DlaBasic, **kwargs) + return _create_dla('dla34', pretrained, **model_kwargs) + + +@register_model +def dla46_c(pretrained=False, **kwargs): # DLA-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, **kwargs) + return _create_dla('dla46_c', pretrained, **model_kwargs) + + +@register_model +def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla46x_c', pretrained, **model_kwargs) + + +@register_model +def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x_c', pretrained, **model_kwargs) + + +@register_model +def dla60(pretrained=False, **kwargs): # DLA-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, **kwargs) + return _create_dla('dla60', pretrained, **model_kwargs) + + +@register_model +def dla60x(pretrained=False, **kwargs): # DLA-X-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x', pretrained, **model_kwargs) + + +@register_model +def dla102(pretrained=False, **kwargs): # DLA-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla102', pretrained, **model_kwargs) + + +@register_model +def dla102x(pretrained=False, **kwargs): # DLA-X-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x', pretrained, **model_kwargs) + + +@register_model +def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x2', pretrained, **model_kwargs) + + +@register_model +def dla169(pretrained=False, **kwargs): # DLA-169 + model_kwargs = dict( + levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla169', pretrained, **model_kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/dpn.py b/testbed/huggingface__pytorch-image-models/timm/models/dpn.py new file mode 100644 index 0000000000000000000000000000000000000000..c4e380b1e31d63a6a6381352eb2ce1555fbbff3b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/dpn.py @@ -0,0 +1,317 @@ +""" PyTorch implementation of DualPathNetworks +Based on original MXNet implementation https://github.com/cypw/DPNs with +many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. + +This implementation is compatible with the pretrained weights from cypw's MXNet implementation. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict +from functools import partial +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, ConvBnAct, create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['DPN'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, + 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'dpn68': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn68-66bebafa7.pth'), + 'dpn68b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dpn68b_ra-a31ca160.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'dpn92': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn92_extra-b040e4a9b.pth'), + 'dpn98': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn98-5b90dec4d.pth'), + 'dpn131': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn131-71dfe43e0.pth'), + 'dpn107': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn107_extra-1ac7121e2.pth') +} + + +class CatBnAct(nn.Module): + def __init__(self, in_chs, norm_layer=BatchNormAct2d): + super(CatBnAct, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + def forward(self, x): + if isinstance(x, tuple): + x = torch.cat(x, dim=1) + return self.bn(x) + + +class BnActConv2d(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): + super(BnActConv2d, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) + + def forward(self, x): + return self.conv(self.bn(x)) + + +class DualPathBlock(nn.Module): + def __init__( + self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False): + super(DualPathBlock, self).__init__() + self.num_1x1_c = num_1x1_c + self.inc = inc + self.b = b + if block_type == 'proj': + self.key_stride = 1 + self.has_proj = True + elif block_type == 'down': + self.key_stride = 2 + self.has_proj = True + else: + assert block_type == 'normal' + self.key_stride = 1 + self.has_proj = False + + self.c1x1_w_s1 = None + self.c1x1_w_s2 = None + if self.has_proj: + # Using different member names here to allow easier parameter key matching for conversion + if self.key_stride == 2: + self.c1x1_w_s2 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) + else: + self.c1x1_w_s1 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) + + self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) + self.c3x3_b = BnActConv2d( + in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) + if b: + self.c1x1_c = CatBnAct(in_chs=num_3x3_b) + self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) + self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) + else: + self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) + self.c1x1_c1 = None + self.c1x1_c2 = None + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + pass + + def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: + if isinstance(x, tuple): + x_in = torch.cat(x, dim=1) + else: + x_in = x + if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: + # self.has_proj == False, torchscript requires condition on module == None + x_s1 = x[0] + x_s2 = x[1] + else: + # self.has_proj == True + if self.c1x1_w_s1 is not None: + # self.key_stride = 1 + x_s = self.c1x1_w_s1(x_in) + else: + # self.key_stride = 2 + x_s = self.c1x1_w_s2(x_in) + x_s1 = x_s[:, :self.num_1x1_c, :, :] + x_s2 = x_s[:, self.num_1x1_c:, :, :] + x_in = self.c1x1_a(x_in) + x_in = self.c3x3_b(x_in) + x_in = self.c1x1_c(x_in) + if self.c1x1_c1 is not None: + # self.b == True, using None check for torchscript compat + out1 = self.c1x1_c1(x_in) + out2 = self.c1x1_c2(x_in) + else: + out1 = x_in[:, :self.num_1x1_c, :, :] + out2 = x_in[:, self.num_1x1_c:, :, :] + resid = x_s1 + out1 + dense = torch.cat([x_s2, out2], dim=1) + return resid, dense + + +class DPN(nn.Module): + def __init__(self, small=False, num_init_features=64, k_r=96, groups=32, + b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), output_stride=32, + num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', fc_act=nn.ELU): + super(DPN, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.b = b + assert output_stride == 32 # FIXME look into dilation support + norm_layer = partial(BatchNormAct2d, eps=.001) + fc_norm_layer = partial(BatchNormAct2d, eps=.001, act_layer=fc_act, inplace=False) + bw_factor = 1 if small else 4 + blocks = OrderedDict() + + # conv1 + blocks['conv1_1'] = ConvBnAct( + in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) + blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] + + # conv2 + bw = 64 * bw_factor + inc = inc_sec[0] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[0] + 1): + blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] + + # conv3 + bw = 128 * bw_factor + inc = inc_sec[1] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[1] + 1): + blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] + + # conv4 + bw = 256 * bw_factor + inc = inc_sec[2] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[2] + 1): + blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] + + # conv5 + bw = 512 * bw_factor + inc = inc_sec[3] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[3] + 1): + blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] + + blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) + + self.num_features = in_chs + self.features = nn.Sequential(blocks) + + # Using 1x1 conv for the FC layer to allow the extra pooling scheme + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + x = self.flatten(x) + return x + + +def _create_dpn(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DPN, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_concat=True, flatten_sequential=True), + **kwargs) + + +@register_model +def dpn68(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn68b(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68b', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn92(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=64, k_r=96, groups=32, + k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), **kwargs) + return _create_dpn('dpn92', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn98(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=96, k_r=160, groups=40, + k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn98', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn131(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=160, groups=40, + k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn131', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn107(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=200, groups=50, + k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128), **kwargs) + return _create_dpn('dpn107', pretrained=pretrained, **model_kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/efficientnet.py b/testbed/huggingface__pytorch-image-models/timm/models/efficientnet.py new file mode 100644 index 0000000000000000000000000000000000000000..3d50b704cdff751b9dd397f051abfde40c34c2b1 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/efficientnet.py @@ -0,0 +1,2211 @@ +""" The EfficientNet Family in PyTorch + +An implementation of EfficienNet that covers variety of related models with efficient architectures: + +* EfficientNet-V2 + - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + +* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) + - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 + - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 + - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 + - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 + +* MixNet (Small, Medium, and Large) + - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 + +* MNasNet B1, A1 (SE), Small + - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 + +* FBNet-C + - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 + +* Single-Path NAS Pixel1 + - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 + +* And likely more... + +The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available +by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing +the models and weights open source! + +Hacked together by / Copyright 2021 Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['EfficientNet', 'EfficientNetFeatures'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mnasnet_050': _cfg(url=''), + 'mnasnet_075': _cfg(url=''), + 'mnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth'), + 'mnasnet_140': _cfg(url=''), + + 'semnasnet_050': _cfg(url=''), + 'semnasnet_075': _cfg(url=''), + 'semnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth'), + 'semnasnet_140': _cfg(url=''), + 'mnasnet_small': _cfg(url=''), + + 'mobilenetv2_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth'), + 'mobilenetv2_110d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth'), + 'mobilenetv2_120d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth'), + 'mobilenetv2_140': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth'), + + 'fbnetc_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', + interpolation='bilinear'), + 'spnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', + interpolation='bilinear'), + + # NOTE experimenting with alternate attention + 'efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth'), + 'efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', + test_input_size=(3, 256, 256), crop_pct=1.0), + 'efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), crop_pct=1.0), + 'efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + 'efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', + input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), crop_pct=1.0), + 'efficientnet_b5': _cfg( + url='', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'efficientnet_b6': _cfg( + url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'efficientnet_b7': _cfg( + url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'efficientnet_b8': _cfg( + url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + 'efficientnet_l2': _cfg( + url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), + + 'efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth'), + 'efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_el': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_es_pruned': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_es_pruned75.pth'), + 'efficientnet_el_pruned': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el_pruned70.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_cc_b0_4e': _cfg(url=''), + 'efficientnet_cc_b0_8e': _cfg(url=''), + 'efficientnet_cc_b1_8e': _cfg(url='', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth'), + 'efficientnet_lite1': _cfg( + url='', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_lite2': _cfg( + url='', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'efficientnet_lite3': _cfg( + url='', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'efficientnet_lite4': _cfg( + url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + + 'efficientnet_b1_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b2_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b3_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'gc_efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'efficientnetv2_rw_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_rw_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + + 'efficientnetv2_s': _cfg( + url='', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_m': _cfg( + url='', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + 'efficientnetv2_l': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'efficientnetv2_xl': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_l2_ns_475': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', + input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), + 'tf_efficientnet_l2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', + input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), + + 'tf_efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), ), + 'tf_efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_el': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'tf_efficientnet_cc_b0_4e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b0_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b1_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'tf_efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), + 'tf_efficientnet_lite4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), + + 'tf_efficientnetv2_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', + input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), + 'tf_efficientnetv2_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', + input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), + 'tf_efficientnetv2_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', + input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), + 'tf_efficientnetv2_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', + input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), + + 'mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth'), + 'mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth'), + 'mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth'), + 'mixnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth'), + 'mixnet_xxl': _cfg(), + + 'tf_mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'), + 'tf_mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'), + 'tf_mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'), +} + + +class EfficientNet(nn.Module): + """ (Generic) EfficientNet + + A flexible and performant PyTorch implementation of efficient network architectures, including: + * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 + * EfficientNet B0-B8, L2 + * EfficientNet-EdgeTPU + * EfficientNet-CondConv + * MixNet S, M, L, XL + * MnasNet A1, B1, and small + * FBNet C + * Single-Path NAS Pixel1 + + """ + + def __init__(self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False, + output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, + se_layer=None, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(EfficientNet, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type) + self.bn2 = norm_layer(self.num_features) + self.act2 = act_layer(inplace=True) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool]) + layers.extend([nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.conv_head(x) + x = self.bn2(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class EfficientNetFeatures(nn.Module): + """ EfficientNet Feature Extractor + + A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, + act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(EfficientNetFeatures, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, + feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_effnet(variant, pretrained=False, **kwargs): + features_only = False + model_cls = EfficientNet + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') + model_cls = EfficientNetFeatures + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-a1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r2_k3_s2_e6_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r4_k3_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r3_k5_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + ['ds_r1_k3_s1_c8'], + ['ir_r1_k3_s2_e3_c16'], + ['ir_r2_k3_s2_e6_c16'], + ['ir_r4_k5_s2_e6_c32_se0.25'], + ['ir_r3_k3_s1_e6_c32_se0.25'], + ['ir_r3_k5_s2_e6_c88_se0.25'], + ['ir_r1_k3_s1_e6_c144'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=8, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v2( + variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): + """ Generate MobileNet-V2 network + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['ds_r1_k3_s1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r3_k3_s2_e6_c32'], + ['ir_r4_k3_s2_e6_c64'], + ['ir_r3_k3_s1_e6_c96'], + ['ir_r3_k3_s2_e6_c160'], + ['ir_r1_k3_s1_e6_c320'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), + num_features=1280 if fix_stem_head else round_chs_fn(1280), + stem_size=32, + fix_stem=fix_stem_head, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu6'), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNet-C + + Paper: https://arxiv.org/abs/1812.03443 + Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py + + NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, + it was used to confirm some building block details + """ + arch_def = [ + ['ir_r1_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], + ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], + ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], + ['ir_r4_k5_s2_e6_c184'], + ['ir_r1_k3_s1_e6_c352'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + num_features=1984, # paper suggests this, but is not 100% clear + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates the Single-Path NAS model from search targeted for Pixel1 phone. + + Paper: https://arxiv.org/abs/1904.02877 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], + # stage 4, 14x14in + ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-EdgeTPU model + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu + """ + + arch_def = [ + # NOTE `fc` is present to override a mismatch between stem channels and in chs not + # present in other models + ['er_r1_k3_s1_e4_c24_fc24_noskip'], + ['er_r2_k3_s2_e8_c32'], + ['er_r4_k3_s2_e8_c48'], + ['ir_r5_k5_s2_e8_c96'], + ['ir_r4_k5_s1_e8_c144'], + ['ir_r2_k5_s2_e8_c192'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_condconv( + variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): + """Creates an EfficientNet-CondConv model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], + ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], + ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], + ] + # NOTE unlike official impl, this one uses `cc` option where x is the base number of experts for each stage and + # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'swish'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet-Lite model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), + 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), + 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), + 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), + 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r2_k5_s2_e6_c40'], + ['ir_r3_k3_s2_e6_c80'], + ['ir_r3_k5_s1_e6_c112'], + ['ir_r4_k5_s2_e6_c192'], + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), + num_features=1280, + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + act_layer=resolve_act_layer(kwargs, 'relu6'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_base( + variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 base model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + arch_def = [ + ['cn_r1_k3_s1_e1_c16_skip'], + ['er_r2_k3_s2_e4_c32'], + ['er_r2_k3_s2_e4_c48'], + ['ir_r3_k3_s2_e4_c96_se0.25'], + ['ir_r5_k3_s1_e6_c112_se0.25'], + ['ir_r8_k3_s2_e6_c192_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_s( + variant, channel_multiplier=1.0, depth_multiplier=1.0, rw=False, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Small model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + + NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, + before ref the impl was released. + """ + arch_def = [ + ['cn_r2_k3_s1_e1_c24_skip'], + ['er_r4_k3_s2_e4_c48'], + ['er_r4_k3_s2_e4_c64'], + ['ir_r6_k3_s2_e4_c128_se0.25'], + ['ir_r9_k3_s1_e6_c160_se0.25'], + ['ir_r15_k3_s2_e6_c256_se0.25'], + ] + num_features = 1280 + if rw: + # my original variant, based on paper figure differs from the official release + arch_def[0] = ['er_r2_k3_s1_e1_c24'] + arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] + num_features = 1792 + + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(num_features), + stem_size=24, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Medium model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r3_k3_s1_e1_c24_skip'], + ['er_r5_k3_s2_e4_c48'], + ['er_r5_k3_s2_e4_c80'], + ['ir_r7_k3_s2_e4_c160_se0.25'], + ['ir_r14_k3_s1_e6_c176_se0.25'], + ['ir_r18_k3_s2_e6_c304_se0.25'], + ['ir_r5_k3_s1_e6_c512_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r7_k3_s2_e4_c64'], + ['er_r7_k3_s2_e4_c96'], + ['ir_r10_k3_s2_e4_c192_se0.25'], + ['ir_r19_k3_s1_e6_c224_se0.25'], + ['ir_r25_k3_s2_e6_c384_se0.25'], + ['ir_r7_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Xtra-Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r8_k3_s2_e4_c64'], + ['er_r8_k3_s2_e4_c96'], + ['ir_r16_k3_s2_e4_c192_se0.25'], + ['ir_r24_k3_s1_e6_c256_se0.25'], + ['ir_r32_k3_s2_e6_c512_se0.25'], + ['ir_r8_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Small model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1536, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Medium-Large model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c24'], # relu + # stage 1, 112x112 in + ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=1536, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mnasnet_050(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.5. """ + model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_075(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.75. """ + model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_100(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_b1(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + return mnasnet_100(pretrained, **kwargs) + + +@register_model +def mnasnet_140(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.4 """ + model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_050(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ + model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_075(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ + model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_100(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_a1(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + return semnasnet_100(pretrained, **kwargs) + + +@register_model +def semnasnet_140(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ + model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_small(pretrained=False, **kwargs): + """ MNASNet Small, depth multiplier of 1.0. """ + model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_100(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.0 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_140(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.4 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_110d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" + model = _gen_mobilenet_v2( + 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_120d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ + model = _gen_mobilenet_v2( + 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetc_100(pretrained=False, **kwargs): + """ FBNet-C """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def spnasnet_100(pretrained=False, **kwargs): + """ Single-Path NAS Pixel1""" + model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2a(pretrained=False, **kwargs): + """ EfficientNet-B2 @ 288x288 w/ 1.0 test crop""" + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b2(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3a(pretrained=False, **kwargs): + """ EfficientNet-B3 @ 320x320 w/ 1.0 test crop-pct """ + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b3(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_l2(pretrained=False, **kwargs): + """ EfficientNet-L2.""" + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. """ + model = _gen_efficientnet_edge( + 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_es_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. """ + model = _gen_efficientnet_edge( + 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. """ + model = _gen_efficientnet_edge( + 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_el_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1_pruned(pretrained=False, **kwargs): + """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + variant = 'efficientnet_b1_pruned' + model = _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2_pruned(pretrained=False, **kwargs): + """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3_pruned(pretrained=False, **kwargs): + """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) + return model + + +@register_model +def gc_efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, + rw=False, se_layer='gc', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small (RW variant). + NOTE: This is my initial (pre official code release) w/ some differences. + See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding + """ + model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium (RW variant). + """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. """ + model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. """ + model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. """ + model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_xl(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. """ + model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ap(pretrained=False, **kwargs): + """ EfficientNet-B0 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ap(pretrained=False, **kwargs): + """ EfficientNet-B1 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ap(pretrained=False, **kwargs): + """ EfficientNet-B2 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ap(pretrained=False, **kwargs): + """ EfficientNet-B3 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ap(pretrained=False, **kwargs): + """ EfficientNet-B4 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ap(pretrained=False, **kwargs): + """ EfficientNet-B5 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ap(pretrained=False, **kwargs): + """ EfficientNet-B6 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ap(pretrained=False, **kwargs): + """ EfficientNet-B7 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8_ap(pretrained=False, **kwargs): + """ EfficientNet-B8 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ns(pretrained=False, **kwargs): + """ EfficientNet-B0 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ns(pretrained=False, **kwargs): + """ EfficientNet-B1 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ns(pretrained=False, **kwargs): + """ EfficientNet-B2 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ns(pretrained=False, **kwargs): + """ EfficientNet-B3 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ns(pretrained=False, **kwargs): + """ EfficientNet-B4 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ns(pretrained=False, **kwargs): + """ EfficientNet-B5 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ns(pretrained=False, **kwargs): + """ EfficientNet-B6 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ns(pretrained=False, **kwargs): + """ EfficientNet-B7 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + + +@register_model +def tf_efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b0(pretrained=False, **kwargs): + """ EfficientNet-V2-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b1(pretrained=False, **kwargs): + """ EfficientNet-V2-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b2(pretrained=False, **kwargs): + """ EfficientNet-V2-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b3(pretrained=False, **kwargs): + """ EfficientNet-V2-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. + """ + model = _gen_mixnet_s( + 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. + """ + model = _gen_mixnet_m( + 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. + """ + model = _gen_mixnet_m( + 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xl(pretrained=False, **kwargs): + """Creates a MixNet Extra-Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xxl(pretrained=False, **kwargs): + """Creates a MixNet Double Extra Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_s( + 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/efficientnet_blocks.py b/testbed/huggingface__pytorch-image-models/timm/models/efficientnet_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..b43f38f5868cd78a9ba821154c7c6247b7572a0d --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/efficientnet_blocks.py @@ -0,0 +1,324 @@ +""" EfficientNet, MobileNetV3, etc Blocks + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from .layers import create_conv2d, drop_path, make_divisible, create_act_layer +from .layers.activations import sigmoid + +__all__ = [ + 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual'] + + +class SqueezeExcite(nn.Module): + """ Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family + + Args: + in_chs (int): input channels to layer + rd_ratio (float): ratio of squeeze reduction + act_layer (nn.Module): activation layer of containing block + gate_layer (Callable): attention gate function + force_act_layer (nn.Module): override block's activation fn if this is set/bound + rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs + """ + + def __init__( + self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU, + gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None): + super(SqueezeExcite, self).__init__() + if rd_channels is None: + rd_round_fn = rd_round_fn or round + rd_channels = rd_round_fn(in_chs * rd_ratio) + act_layer = force_act_layer or act_layer + self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True) + self.act1 = create_act_layer(act_layer, inplace=True) + self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + return x * self.gate(x_se) + + +class ConvBnAct(nn.Module): + """ Conv + Norm Layer + Activation w/ optional skip connection + """ + def __init__( + self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='', + skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.): + super(ConvBnAct, self).__init__() + self.has_residual = skip and stride == 1 and in_chs == out_chs + self.drop_path_rate = drop_path_rate + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(out_chs) + self.act1 = act_layer(inplace=True) + + def feature_info(self, location): + if location == 'expansion': # output of conv after act, same as block coutput + info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv.out_channels) + return info + + def forward(self, x): + shortcut = x + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + se_layer=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.drop_path_rate = drop_path_rate + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity() + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs) + self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PW + info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + return info + + def forward(self, x): + shortcut = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE + + Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often + referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in + * MNasNet - https://arxiv.org/abs/1807.11626 + * EfficientNet - https://arxiv.org/abs/1905.11946 + * MobileNet-V3 - https://arxiv.org/abs/1905.02244 + """ + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs) + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + shortcut = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + + return x + + +class CondConvResidual(InvertedResidual): + """ Inverted residual block w/ CondConv routing""" + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.): + + self.num_experts = num_experts + conv_kwargs = dict(num_experts=self.num_experts) + + super(CondConvResidual, self).__init__( + in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type, + act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs, + drop_path_rate=drop_path_rate) + + self.routing_fn = nn.Linear(in_chs, self.num_experts) + + def forward(self, x): + shortcut = x + + # CondConv routing + pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) + routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) + + # Point-wise expansion + x = self.conv_pw(x, routing_weights) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x, routing_weights) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x, routing_weights) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class EdgeResidual(nn.Module): + """ Residual block with expansion convolution followed by pointwise-linear w/ stride + + Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML` + - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + + This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers + * MobileDet - https://arxiv.org/abs/2004.14525 + * EfficientNet-X - https://arxiv.org/abs/2102.05610 + * EfficientNet-V2 - https://arxiv.org/abs/2104.00298 + """ + + def __init__( + self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, pad_type='', + force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.): + super(EdgeResidual, self).__init__() + if force_in_chs > 0: + mid_chs = make_divisible(force_in_chs * exp_ratio) + else: + mid_chs = make_divisible(in_chs * exp_ratio) + has_se = se_layer is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Expansion convolution + self.conv_exp = create_conv2d( + in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(mid_chs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs) + + def feature_info(self, location): + if location == 'expansion': # after SE, before PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + shortcut = x + + # Expansion convolution + x = self.conv_exp(x) + x = self.bn1(x) + x = self.act1(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn2(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/efficientnet_builder.py b/testbed/huggingface__pytorch-image-models/timm/models/efficientnet_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..a23e8273d98891b4663e7e8fa409700ef0818122 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/efficientnet_builder.py @@ -0,0 +1,463 @@ +""" EfficientNet, MobileNetV3, etc Builder + +Assembles EfficieNet and related network feature blocks from string definitions. +Handles stride, dilation calculations, and selects feature extraction points. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +import math +import re +from copy import deepcopy +from functools import partial + +import torch.nn as nn + +from .efficientnet_blocks import * +from .layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible + +__all__ = ["EfficientNetBuilder", "decode_arch_def", "efficientnet_init_weights", + 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] + +_logger = logging.getLogger(__name__) + + +_DEBUG_BUILDER = False + +# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per +# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) +# NOTE: momentum varies btw .99 and .9997 depending on source +# .99 in official TF TPU impl +# .9997 (/w .999 in search space) for paper +BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 +BN_EPS_TF_DEFAULT = 1e-3 +_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) + + +def get_bn_args_tf(): + return _BN_ARGS_TF.copy() + + +def resolve_bn_args(kwargs): + bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + + +def resolve_act_layer(kwargs, default='relu'): + return get_act_layer(kwargs.pop('act_layer', default)) + + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) + + +def _log_info_if(msg, condition): + if condition: + _logger.info(msg) + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str): + """ Decode block definition string + + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + skip = None + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + skip = False # force no skip connection + elif op == 'skip': + skip = True # force a skip connection + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = get_act_layer('relu') + elif v == 'r6': + value = get_act_layer('relu6') + elif v == 'hs': + value = get_act_layer('hard_swish') + elif v == 'sw': + value = get_act_layer('swish') # aka SiLU + elif v == 'mi': + value = get_act_layer('mish') + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + noskip=skip is False, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or skip is False, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + force_in_chs=force_in_chs, + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + noskip=skip is False, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + skip=skip is True, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): + arch_args = [] + if isinstance(depth_multiplier, tuple): + assert len(depth_multiplier) == len(arch_def) + else: + depth_multiplier = (depth_multiplier,) * len(arch_def) + for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): + arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) + else: + arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) + return arch_args + + +class EfficientNetBuilder: + """ Build Trunk Blocks + + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + + """ + def __init__(self, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=False, + act_layer=None, norm_layer=None, se_layer=None, drop_path_rate=0., feature_location=''): + self.output_stride = output_stride + self.pad_type = pad_type + self.round_chs_fn = round_chs_fn + self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs + self.act_layer = act_layer + self.norm_layer = norm_layer + self.se_layer = get_attn(se_layer) + try: + self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg + self.se_has_ratio = True + except TypeError: + self.se_has_ratio = False + self.drop_path_rate = drop_path_rate + if feature_location == 'depthwise': + # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense + _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") + feature_location = 'expansion' + self.feature_location = feature_location + assert feature_location in ('bottleneck', 'expansion', '') + self.verbose = _DEBUG_BUILDER + + # state updated during build, consumed by model + self.in_chs = None + self.features = [] + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self.round_chs_fn(ba['out_chs']) + if 'force_in_chs' in ba and ba['force_in_chs']: + # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl + ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + ba['norm_layer'] = self.norm_layer + ba['drop_path_rate'] = drop_path_rate + if bt != 'cn': + se_ratio = ba.pop('se_ratio') + if se_ratio and self.se_layer is not None: + if not self.se_from_exp: + # adjust se_ratio by expansion ratio if calculating se channels from block input + se_ratio /= ba.get('exp_ratio', 1.0) + if self.se_has_ratio: + ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) + else: + ba['se_layer'] = self.se_layer + + if bt == 'ir': + _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = EdgeResidual(**ba) + elif bt == 'cn': + _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + stages = [] + if model_block_args[0][0]['stride'] > 1: + # if the first block starts with a stride, we need to extract first level feat from stem + feature_info = dict( + module='act1', num_chs=in_chs, stage=0, reduction=current_stride, + hook_type='forward' if self.feature_location != 'bottleneck' else '') + self.features.append(feature_info) + + # outer list of block_args defines the stacks + for stack_idx, stack_args in enumerate(model_block_args): + last_stack = stack_idx + 1 == len(model_block_args) + _log_info_if('Stack: {}'.format(stack_idx), self.verbose) + assert isinstance(stack_args, list) + + blocks = [] + # each stack (stage of blocks) contains a list of block arguments + for block_idx, block_args in enumerate(stack_args): + last_block = block_idx + 1 == len(stack_args) + _log_info_if(' Block: {}'.format(block_idx), self.verbose) + + assert block_args['stride'] in (1, 2) + if block_idx >= 1: # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + extract_features = False + if last_block: + next_stack_idx = stack_idx + 1 + extract_features = next_stack_idx >= len(model_block_args) or \ + model_block_args[next_stack_idx][0]['stride'] > 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride), self.verbose) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_info = dict( + stage=stack_idx + 1, reduction=current_stride, **block.feature_info(self.feature_location)) + module_name = f'blocks.{stack_idx}.{block_idx}' + leaf_name = feature_info.get('module', '') + feature_info['module'] = '.'.join([module_name, leaf_name]) if leaf_name else module_name + self.features.append(feature_info) + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + + +def _init_weight_goog(m, n='', fix_group_fanout=True): + """ Weight initialization as per Tensorflow official implementations. + + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + nn.init.uniform_(m.weight, -init_range, init_range) + nn.init.zeros_(m.bias) + + +def efficientnet_init_weights(model: nn.Module, init_fn=None): + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n) + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/factory.py b/testbed/huggingface__pytorch-image-models/timm/models/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..d040a9ff62c0a4089536078fee0e9552ab3cdabc --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/factory.py @@ -0,0 +1,86 @@ +from .registry import is_model, is_model_in_modules, model_entrypoint +from .helpers import load_checkpoint +from .layers import set_layer_config +from .hub import load_model_config_from_hf + + +def split_model_name(model_name): + model_split = model_name.split(':', 1) + if len(model_split) == 1: + return '', model_split[0] + else: + source_name, model_name = model_split + assert source_name in ('timm', 'hf_hub') + return source_name, model_name + + +def safe_model_name(model_name, remove_source=True): + def make_safe(name): + return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') + if remove_source: + model_name = split_model_name(model_name)[-1] + return make_safe(model_name) + + +def create_model( + model_name, + pretrained=False, + checkpoint_path='', + scriptable=None, + exportable=None, + no_jit=None, + **kwargs): + """Create a model + + Args: + model_name (str): name of model to instantiate + pretrained (bool): load pretrained ImageNet-1k weights if true + checkpoint_path (str): path of checkpoint to load after model is initialized + scriptable (bool): set layer config so that model is jit scriptable (not working for all models yet) + exportable (bool): set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet) + no_jit (bool): set layer config so that model doesn't utilize jit scripted layers (so far activations only) + + Keyword Args: + drop_rate (float): dropout rate for training (default: 0.0) + global_pool (str): global pool type (default: 'avg') + **: other kwargs are model specific + """ + source_name, model_name = split_model_name(model_name) + + # Only EfficientNet and MobileNetV3 models have support for batchnorm params or drop_connect_rate passed as args + is_efficientnet = is_model_in_modules(model_name, ['efficientnet', 'mobilenetv3']) + if not is_efficientnet: + kwargs.pop('bn_tf', None) + kwargs.pop('bn_momentum', None) + kwargs.pop('bn_eps', None) + + # handle backwards compat with drop_connect -> drop_path change + drop_connect_rate = kwargs.pop('drop_connect_rate', None) + if drop_connect_rate is not None and kwargs.get('drop_path_rate', None) is None: + print("WARNING: 'drop_connect' as an argument is deprecated, please use 'drop_path'." + " Setting drop_path to %f." % drop_connect_rate) + kwargs['drop_path_rate'] = drop_connect_rate + + # Parameters that aren't supported by all models or are intended to only override model defaults if set + # should default to None in command line args/cfg. Remove them if they are present and not set so that + # non-supporting models don't break and default args remain in effect. + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + if source_name == 'hf_hub': + # For model names specified in the form `hf_hub:path/architecture_name#revision`, + # load model weights + default_cfg from Hugging Face hub. + hf_default_cfg, model_name = load_model_config_from_hf(model_name) + kwargs['external_default_cfg'] = hf_default_cfg # FIXME revamp default_cfg interface someday + + if is_model(model_name): + create_fn = model_entrypoint(model_name) + else: + raise RuntimeError('Unknown model (%s)' % model_name) + + with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): + model = create_fn(pretrained=pretrained, **kwargs) + + if checkpoint_path: + load_checkpoint(model, checkpoint_path) + + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/features.py b/testbed/huggingface__pytorch-image-models/timm/models/features.py new file mode 100644 index 0000000000000000000000000000000000000000..b1d6890f3ed07311c5484b4a397c3b1da555880a --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/features.py @@ -0,0 +1,284 @@ +""" PyTorch Feature Extraction Helpers + +A collection of classes, functions, modules to help extract features from models +and provide a common interface for describing them. + +The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter +https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict, defaultdict +from copy import deepcopy +from functools import partial +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn + + +class FeatureInfo: + + def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]): + prev_reduction = 1 + for fi in feature_info: + # sanity check the mandatory fields, there may be additional fields depending on the model + assert 'num_chs' in fi and fi['num_chs'] > 0 + assert 'reduction' in fi and fi['reduction'] >= prev_reduction + prev_reduction = fi['reduction'] + assert 'module' in fi + self.out_indices = out_indices + self.info = feature_info + + def from_other(self, out_indices: Tuple[int]): + return FeatureInfo(deepcopy(self.info), out_indices) + + def get(self, key, idx=None): + """ Get value by key at specified index (indices) + if idx == None, returns value for key at each output index + if idx is an integer, return value for that feature module index (ignoring output indices) + if idx is a list/tupple, return value for each module index (ignoring output indices) + """ + if idx is None: + return [self.info[i][key] for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i][key] for i in idx] + else: + return self.info[idx][key] + + def get_dicts(self, keys=None, idx=None): + """ return info dicts for specified keys (or all if None) at specified indices (or out_indices if None) + """ + if idx is None: + if keys is None: + return [self.info[i] for i in self.out_indices] + else: + return [{k: self.info[i][k] for k in keys} for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx] + else: + return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys} + + def channels(self, idx=None): + """ feature channels accessor + """ + return self.get('num_chs', idx) + + def reduction(self, idx=None): + """ feature reduction (output stride) accessor + """ + return self.get('reduction', idx) + + def module_name(self, idx=None): + """ feature module name accessor + """ + return self.get('module', idx) + + def __getitem__(self, item): + return self.info[item] + + def __len__(self): + return len(self.info) + + +class FeatureHooks: + """ Feature Hook Helper + + This module helps with the setup and extraction of hooks for extracting features from + internal nodes in a model by node name. This works quite well in eager Python but needs + redesign for torcscript. + """ + + def __init__(self, hooks, named_modules, out_map=None, default_hook_type='forward'): + # setup feature hooks + modules = {k: v for k, v in named_modules} + for i, h in enumerate(hooks): + hook_name = h['module'] + m = modules[hook_name] + hook_id = out_map[i] if out_map else hook_name + hook_fn = partial(self._collect_output_hook, hook_id) + hook_type = h['hook_type'] if 'hook_type' in h else default_hook_type + if hook_type == 'forward_pre': + m.register_forward_pre_hook(hook_fn) + elif hook_type == 'forward': + m.register_forward_hook(hook_fn) + else: + assert False, "Unsupported hook type" + self._feature_outputs = defaultdict(OrderedDict) + + def _collect_output_hook(self, hook_id, *args): + x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre + if isinstance(x, tuple): + x = x[0] # unwrap input tuple + self._feature_outputs[x.device][hook_id] = x + + def get_output(self, device) -> Dict[str, torch.tensor]: + output = self._feature_outputs[device] + self._feature_outputs[device] = OrderedDict() # clear after reading + return output + + +def _module_list(module, flatten_sequential=False): + # a yield/iter would be better for this but wouldn't be compatible with torchscript + ml = [] + for name, module in module.named_children(): + if flatten_sequential and isinstance(module, nn.Sequential): + # first level of Sequential containers is flattened into containing model + for child_name, child_module in module.named_children(): + combined = [name, child_name] + ml.append(('_'.join(combined), '.'.join(combined), child_module)) + else: + ml.append((name, name, module)) + return ml + + +def _get_feature_info(net, out_indices): + feature_info = getattr(net, 'feature_info') + if isinstance(feature_info, FeatureInfo): + return feature_info.from_other(out_indices) + elif isinstance(feature_info, (list, tuple)): + return FeatureInfo(net.feature_info, out_indices) + else: + assert False, "Provided feature_info is not valid" + + +def _get_return_layers(feature_info, out_map): + module_names = feature_info.module_name() + return_layers = {} + for i, name in enumerate(module_names): + return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i] + return return_layers + + +class FeatureDictNet(nn.ModuleDict): + """ Feature extractor with OrderedDict return + + Wrap a model and extract features as specified by the out indices, the network is + partially re-built from contained modules. + + There is a strong assumption that the modules have been registered into the model in the same + order as they are used. There should be no reuse of the same nn.Module more than once, including + trivial modules like `self.relu = nn.ReLU`. + + Only submodules that are directly assigned to the model class (`model.feature1`) or at most + one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured. + All Sequential containers that are directly assigned to the original model will have their + modules assigned to this module with the name `model.features.1` being changed to `model.features_1` + + Arguments: + model (nn.Module): model from which we will extract the features + out_indices (tuple[int]): model output indices to extract features for + out_map (sequence): list or tuple specifying desired return id for each out index, + otherwise str(index) is used + feature_concat (bool): whether to concatenate intermediate features that are lists or tuples + vs select element [0] + flatten_sequential (bool): whether to flatten sequential modules assigned to model + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureDictNet, self).__init__() + self.feature_info = _get_feature_info(model, out_indices) + self.concat = feature_concat + self.return_layers = {} + return_layers = _get_return_layers(self.feature_info, out_map) + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = set(return_layers.keys()) + layers = OrderedDict() + for new_name, old_name, module in modules: + layers[new_name] = module + if old_name in remaining: + # return id has to be consistently str type for torchscript + self.return_layers[new_name] = str(return_layers[old_name]) + remaining.remove(old_name) + if not remaining: + break + assert not remaining and len(self.return_layers) == len(return_layers), \ + f'Return layers ({remaining}) are not present in model' + self.update(layers) + + def _collect(self, x) -> (Dict[str, torch.Tensor]): + out = OrderedDict() + for name, module in self.items(): + x = module(x) + if name in self.return_layers: + out_id = self.return_layers[name] + if isinstance(x, (tuple, list)): + # If model tap is a tuple or list, concat or select first element + # FIXME this may need to be more generic / flexible for some nets + out[out_id] = torch.cat(x, 1) if self.concat else x[0] + else: + out[out_id] = x + return out + + def forward(self, x) -> Dict[str, torch.Tensor]: + return self._collect(x) + + +class FeatureListNet(FeatureDictNet): + """ Feature extractor with list return + + See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints. + In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool. + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureListNet, self).__init__( + model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat, + flatten_sequential=flatten_sequential) + + def forward(self, x) -> (List[torch.Tensor]): + return list(self._collect(x).values()) + + +class FeatureHookNet(nn.ModuleDict): + """ FeatureHookNet + + Wrap a model and extract features specified by the out indices using forward/forward-pre hooks. + + If `no_rewrite` is True, features are extracted via hooks without modifying the underlying + network in any way. + + If `no_rewrite` is False, the model will be re-written as in the + FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one. + + FIXME this does not currently work with Torchscript, see FeatureHooks class + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False, + feature_concat=False, flatten_sequential=False, default_hook_type='forward'): + super(FeatureHookNet, self).__init__() + assert not torch.jit.is_scripting() + self.feature_info = _get_feature_info(model, out_indices) + self.out_as_dict = out_as_dict + layers = OrderedDict() + hooks = [] + if no_rewrite: + assert not flatten_sequential + if hasattr(model, 'reset_classifier'): # make sure classifier is removed? + model.reset_classifier(0) + layers['body'] = model + hooks.extend(self.feature_info.get_dicts()) + else: + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = {f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type + for f in self.feature_info.get_dicts()} + for new_name, old_name, module in modules: + layers[new_name] = module + for fn, fm in module.named_modules(prefix=old_name): + if fn in remaining: + hooks.append(dict(module=fn, hook_type=remaining[fn])) + del remaining[fn] + if not remaining: + break + assert not remaining, f'Return layers ({remaining}) are not present in model' + self.update(layers) + self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map) + + def forward(self, x): + for name, module in self.items(): + x = module(x) + out = self.hooks.get_output(x.device) + return out if self.out_as_dict else list(out.values()) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/ghostnet.py b/testbed/huggingface__pytorch-image-models/timm/models/ghostnet.py new file mode 100644 index 0000000000000000000000000000000000000000..3b6f90a42fa02099f7c2d769d97ec359bd82733d --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/ghostnet.py @@ -0,0 +1,276 @@ +""" +An implementation of GhostNet Model as defined in: +GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907 +The train script of the model is similar to that of MobileNetV3 +Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import SelectAdaptivePool2d, Linear, make_divisible +from .efficientnet_blocks import SqueezeExcite, ConvBnAct +from .helpers import build_model_with_cfg +from .registry import register_model + + +__all__ = ['GhostNet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'ghostnet_050': _cfg(url=''), + 'ghostnet_100': _cfg( + url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'), + 'ghostnet_130': _cfg(url=''), +} + + +_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) + + +class GhostModule(nn.Module): + def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True): + super(GhostModule, self).__init__() + self.oup = oup + init_channels = math.ceil(oup / ratio) + new_channels = init_channels * (ratio - 1) + + self.primary_conv = nn.Sequential( + nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False), + nn.BatchNorm2d(init_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + self.cheap_operation = nn.Sequential( + nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False), + nn.BatchNorm2d(new_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + def forward(self, x): + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + out = torch.cat([x1, x2], dim=1) + return out[:, :self.oup, :, :] + + +class GhostBottleneck(nn.Module): + """ Ghost bottleneck w/ optional SE""" + + def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3, + stride=1, act_layer=nn.ReLU, se_ratio=0.): + super(GhostBottleneck, self).__init__() + has_se = se_ratio is not None and se_ratio > 0. + self.stride = stride + + # Point-wise expansion + self.ghost1 = GhostModule(in_chs, mid_chs, relu=True) + + # Depth-wise convolution + if self.stride > 1: + self.conv_dw = nn.Conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) + self.bn_dw = nn.BatchNorm2d(mid_chs) + else: + self.conv_dw = None + self.bn_dw = None + + # Squeeze-and-excitation + self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None + + # Point-wise linear projection + self.ghost2 = GhostModule(mid_chs, out_chs, relu=False) + + # shortcut + if in_chs == out_chs and self.stride == 1: + self.shortcut = nn.Sequential() + else: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), + nn.BatchNorm2d(in_chs), + nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(out_chs), + ) + + def forward(self, x): + shortcut = x + + # 1st ghost bottleneck + x = self.ghost1(x) + + # Depth-wise convolution + if self.conv_dw is not None: + x = self.conv_dw(x) + x = self.bn_dw(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # 2nd ghost bottleneck + x = self.ghost2(x) + + x += self.shortcut(shortcut) + return x + + +class GhostNet(nn.Module): + def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2, in_chans=3, output_stride=32, global_pool='avg'): + super(GhostNet, self).__init__() + # setting of inverted residual blocks + assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' + self.cfgs = cfgs + self.num_classes = num_classes + self.dropout = dropout + self.feature_info = [] + + # building first layer + stem_chs = make_divisible(16 * width, 4) + self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) + self.bn1 = nn.BatchNorm2d(stem_chs) + self.act1 = nn.ReLU(inplace=True) + prev_chs = stem_chs + + # building inverted residual blocks + stages = nn.ModuleList([]) + block = GhostBottleneck + stage_idx = 0 + net_stride = 2 + for cfg in self.cfgs: + layers = [] + s = 1 + for k, exp_size, c, se_ratio, s in cfg: + out_chs = make_divisible(c * width, 4) + mid_chs = make_divisible(exp_size * width, 4) + layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio)) + prev_chs = out_chs + if s > 1: + net_stride *= 2 + self.feature_info.append(dict( + num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) + stages.append(nn.Sequential(*layers)) + stage_idx += 1 + + out_chs = make_divisible(exp_size * width, 4) + stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) + self.pool_dim = prev_chs = out_chs + + self.blocks = nn.Sequential(*stages) + + # building last several layers + self.num_features = out_chs = 1280 + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) + self.act2 = nn.ReLU(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.pool_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.flatten(x) + if self.dropout > 0.: + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.classifier(x) + return x + + +def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): + """ + Constructs a GhostNet model + """ + cfgs = [ + # k, t, c, SE, s + # stage1 + [[3, 16, 16, 0, 1]], + # stage2 + [[3, 48, 24, 0, 2]], + [[3, 72, 24, 0, 1]], + # stage3 + [[5, 72, 40, 0.25, 2]], + [[5, 120, 40, 0.25, 1]], + # stage4 + [[3, 240, 80, 0, 2]], + [[3, 200, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 480, 112, 0.25, 1], + [3, 672, 112, 0.25, 1] + ], + # stage5 + [[5, 672, 160, 0.25, 2]], + [[5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1], + [5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1] + ] + ] + model_kwargs = dict( + cfgs=cfgs, + width=width, + **kwargs, + ) + return build_model_with_cfg( + GhostNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **model_kwargs) + + +@register_model +def ghostnet_050(pretrained=False, **kwargs): + """ GhostNet-0.5x """ + model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_100(pretrained=False, **kwargs): + """ GhostNet-1.0x """ + model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_130(pretrained=False, **kwargs): + """ GhostNet-1.3x """ + model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/gluon_resnet.py b/testbed/huggingface__pytorch-image-models/timm/models/gluon_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..027a10b534b50e94775d463304235256f1cfc16f --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/gluon_resnet.py @@ -0,0 +1,248 @@ +"""Pytorch impl of MxNet Gluon ResNet/(SE)ResNeXt variants +This file evolved from https://github.com/pytorch/vision 'resnet.py' with (SE)-ResNeXt additions +and ports of Gluon variations (https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnet.py) +by Ross Wightman +""" + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SEModule +from .registry import register_model +from .resnet import ResNet, Bottleneck, BasicBlock + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'gluon_resnet18_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), + 'gluon_resnet34_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), + 'gluon_resnet50_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), + 'gluon_resnet101_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), + 'gluon_resnet152_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), + 'gluon_resnet50_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', + first_conv='conv1.0'), + 'gluon_resnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), + 'gluon_resnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), + 'gluon_resnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), + 'gluon_seresnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), + 'gluon_seresnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), + 'gluon_seresnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), + 'gluon_senet154': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', + first_conv='conv1.0'), +} + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def gluon_resnet18_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('gluon_resnet18_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet34_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet34_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet50_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('gluon_resnet101_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('gluon_resnet152_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet50_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet101_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet152_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnet101_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1s', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt50-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_64x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-64x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_senet154(pretrained=False, **kwargs): + """Constructs an SENet-154 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_senet154', pretrained, **model_args) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/gluon_xception.py b/testbed/huggingface__pytorch-image-models/timm/models/gluon_xception.py new file mode 100644 index 0000000000000000000000000000000000000000..fbd668a585e676726a7a6f8bd43642e57e4566e2 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/gluon_xception.py @@ -0,0 +1,246 @@ +"""Pytorch impl of Gluon Xception +This is a port of the Gluon Xception code and weights, itself ported from a PyTorch DeepLab impl. + +Gluon model: (https://gluon-cv.mxnet.io/_modules/gluoncv/model_zoo/xception.html) +Original PyTorch DeepLab impl: https://github.com/jfzhang95/pytorch-deeplab-xception + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier, get_padding +from .registry import register_model + +__all__ = ['Xception65'] + +default_cfgs = { + 'gluon_xception65': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth', + 'input_size': (3, 299, 299), + 'crop_pct': 0.903, + 'pool_size': (10, 10), + 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + }, +} + +""" PADDING NOTES +The original PyTorch and Gluon impl of these models dutifully reproduced the +aligned padding added to Tensorflow models for Deeplab. This padding was compensating +for Tensorflow 'SAME' padding. PyTorch symmetric padding behaves the way we'd want it to. +""" + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + padding = get_padding(kernel_size, stride, dilation) + self.conv_dw = nn.Conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=inplanes, bias=bias) + self.bn = norm_layer(num_features=inplanes) + # pointwise convolution + self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias) + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn(x) + x = self.conv_pw(x) + return x + + +class Block(nn.Module): + def __init__(self, inplanes, planes, stride=1, dilation=1, start_with_relu=True, norm_layer=None): + super(Block, self).__init__() + if isinstance(planes, (list, tuple)): + assert len(planes) == 3 + else: + planes = (planes,) * 3 + outplanes = planes[-1] + + if outplanes != inplanes or stride != 1: + self.skip = nn.Sequential() + self.skip.add_module('conv1', nn.Conv2d( + inplanes, outplanes, 1, stride=stride, bias=False)), + self.skip.add_module('bn1', norm_layer(num_features=outplanes)) + else: + self.skip = None + + rep = OrderedDict() + for i in range(3): + rep['act%d' % (i + 1)] = nn.ReLU(inplace=True) + rep['conv%d' % (i + 1)] = SeparableConv2d( + inplanes, planes[i], 3, stride=stride if i == 2 else 1, dilation=dilation, norm_layer=norm_layer) + rep['bn%d' % (i + 1)] = norm_layer(planes[i]) + inplanes = planes[i] + + if not start_with_relu: + del rep['act1'] + else: + rep['act1'] = nn.ReLU(inplace=False) + self.rep = nn.Sequential(rep) + + def forward(self, x): + skip = x + if self.skip is not None: + skip = self.skip(skip) + x = self.rep(x) + skip + return x + + +class Xception65(nn.Module): + """Modified Aligned Xception. + + NOTE: only the 65 layer version is included here, the 71 layer variant + was not correct and had no pretrained weights + """ + + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d, + drop_rate=0., global_pool='avg'): + super(Xception65, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_dilation = 1 + exit_dilation = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_dilation = 1 + exit_dilation = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_dilation = 2 + exit_dilation = (2, 4) + else: + raise NotImplementedError + + # Entry flow + self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = norm_layer(num_features=32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = norm_layer(num_features=64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block1_act = nn.ReLU(inplace=True) + self.block2 = Block(128, 256, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block3 = Block(256, 728, stride=entry_block3_stride, norm_layer=norm_layer) + + # Middle flow + self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block( + 728, 728, stride=1, dilation=middle_dilation, norm_layer=norm_layer)) for i in range(4, 20)])) + + # Exit flow + self.block20 = Block( + 728, (728, 1024, 1024), stride=exit_block20_stride, dilation=exit_dilation[0], norm_layer=norm_layer) + self.block20_act = nn.ReLU(inplace=True) + + self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn3 = norm_layer(num_features=1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn4 = norm_layer(num_features=1536) + self.act4 = nn.ReLU(inplace=True) + + self.num_features = 2048 + self.conv5 = SeparableConv2d( + 1536, self.num_features, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn5 = norm_layer(num_features=self.num_features) + self.act5 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block1_act'), + dict(num_chs=256, reduction=8, module='block3.rep.act1'), + dict(num_chs=728, reduction=16, module='block20.rep.act1'), + dict(num_chs=2048, reduction=32, module='act5'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block1_act(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.mid(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.block20_act(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.act5(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _create_gluon_xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception65, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def gluon_xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + return _create_gluon_xception('gluon_xception65', pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/hardcorenas.py b/testbed/huggingface__pytorch-image-models/timm/models/hardcorenas.py new file mode 100644 index 0000000000000000000000000000000000000000..9988a0444558d9e7f4b640ff468cc63b1dc1d7f4 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/hardcorenas.py @@ -0,0 +1,152 @@ +from functools import partial + +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import get_act_fn +from .mobilenetv3 import MobileNetV3, MobileNetV3Features +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hardcorenas_a': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_A_Green_38ms_75.9_23474aeb.pth'), + 'hardcorenas_b': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_B_Green_40ms_76.5_1f882d1e.pth'), + 'hardcorenas_c': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_C_Green_44ms_77.1_d4148c9e.pth'), + 'hardcorenas_d': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_D_Green_50ms_77.4_23e3cdde.pth'), + 'hardcorenas_e': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_E_Green_55ms_77.9_90f20e8a.pth'), + 'hardcorenas_f': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_F_Green_60ms_78.1_2855edf1.pth'), +} + + +def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): + """Creates a hardcorenas model + + Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS + Paper: https://arxiv.org/abs/2102.11646 + + """ + num_features = 1280 + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=32, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=se_layer, + **kwargs, + ) + + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if model_kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hardcorenas_a(pretrained=False, **kwargs): + """ hardcorenas_A """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_b(pretrained=False, **kwargs): + """ hardcorenas_B """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], + ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_c(pretrained=False, **kwargs): + """ hardcorenas_C """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', + 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_d(pretrained=False, **kwargs): + """ hardcorenas_D """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], + ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_e(pretrained=False, **kwargs): + """ hardcorenas_E """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', + 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_f(pretrained=False, **kwargs): + """ hardcorenas_F """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k3_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/helpers.py b/testbed/huggingface__pytorch-image-models/timm/models/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..662a7a483b1e40f9f00d931e84762878c612c0c6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/helpers.py @@ -0,0 +1,508 @@ +""" Model creation / weight loading / state_dict helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import os +import math +from collections import OrderedDict +from copy import deepcopy +from typing import Any, Callable, Optional, Tuple + +import torch +import torch.nn as nn + + +from .features import FeatureListNet, FeatureDictNet, FeatureHookNet +from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf, load_state_dict_from_url +from .layers import Conv2dSame, Linear + + +_logger = logging.getLogger(__name__) + + +def load_state_dict(checkpoint_path, use_ema=False): + if checkpoint_path and os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + state_dict_key = 'state_dict' + if isinstance(checkpoint, dict): + if use_ema and 'state_dict_ema' in checkpoint: + state_dict_key = 'state_dict_ema' + if state_dict_key and state_dict_key in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint[state_dict_key].items(): + # strip `module.` prefix + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + state_dict = new_state_dict + else: + state_dict = checkpoint + _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) + return state_dict + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True): + if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): + # numpy checkpoint, try to load via model specific load_pretrained fn + if hasattr(model, 'load_pretrained'): + model.load_pretrained(checkpoint_path) + else: + raise NotImplementedError('Model cannot load numpy checkpoint') + return + state_dict = load_state_dict(checkpoint_path, use_ema) + model.load_state_dict(state_dict, strict=strict) + + +def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True): + resume_epoch = None + if os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + if log_info: + _logger.info('Restoring model state from checkpoint...') + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict'].items(): + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + + if optimizer is not None and 'optimizer' in checkpoint: + if log_info: + _logger.info('Restoring optimizer state from checkpoint...') + optimizer.load_state_dict(checkpoint['optimizer']) + + if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: + if log_info: + _logger.info('Restoring AMP loss scaler state from checkpoint...') + loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) + + if 'epoch' in checkpoint: + resume_epoch = checkpoint['epoch'] + if 'version' in checkpoint and checkpoint['version'] > 1: + resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save + + if log_info: + _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) + else: + model.load_state_dict(checkpoint) + if log_info: + _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) + return resume_epoch + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_custom_pretrained(model, default_cfg=None, load_fn=None, progress=False, check_hash=False): + r"""Loads a custom (read non .pth) weight file + + Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls + a passed in custom load fun, or the `load_pretrained` model member fn. + + If the object is already present in `model_dir`, it's deserialized and returned. + The default value of `model_dir` is ``/checkpoints`` where + `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. + + Args: + model: The instantiated model to load weights into + default_cfg (dict): Default pretrained model cfg + load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named + 'laod_pretrained' on the model will be called if it exists + progress (bool, optional): whether or not to display a progress bar to stderr. Default: False + check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention + ``filename-.ext`` where ```` is the first eight or more + digits of the SHA256 hash of the contents of the file. The hash is used to + ensure unique names and to verify the contents of the file. Default: False + """ + default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {} + pretrained_url = default_cfg.get('url', None) + if not pretrained_url: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + cached_file = download_cached_file(default_cfg['url'], check_hash=check_hash, progress=progress) + + if load_fn is not None: + load_fn(model, cached_file) + elif hasattr(model, 'load_pretrained'): + model.load_pretrained(cached_file) + else: + _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") + + +def adapt_input_conv(in_chans, conv_weight): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + conv_weight = conv_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + # NOTE this strategy should be better than random init, but there could be other combinations of + # the original RGB input layer weights that'd work better for specific cases. + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + conv_weight = conv_weight.to(conv_type) + return conv_weight + + +def load_pretrained(model, default_cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False): + """ Load pretrained checkpoint + + Args: + model (nn.Module) : PyTorch model module + default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset + num_classes (int): num_classes for model + in_chans (int): in_chans for model + filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args) + strict (bool): strict load of checkpoint + progress (bool): enable progress bar for weight download + + """ + default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {} + pretrained_url = default_cfg.get('url', None) + hf_hub_id = default_cfg.get('hf_hub', None) + if not pretrained_url and not hf_hub_id: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + if hf_hub_id and has_hf_hub(necessary=not pretrained_url): + _logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})') + state_dict = load_state_dict_from_hf(hf_hub_id) + else: + _logger.info(f'Loading pretrained weights from url ({pretrained_url})') + state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu') + if filter_fn is not None: + # for backwards compat with filter fn that take one arg, try one first, the two + try: + state_dict = filter_fn(state_dict) + except TypeError: + state_dict = filter_fn(state_dict, model) + + input_convs = default_cfg.get('first_conv', None) + if input_convs is not None and in_chans != 3: + if isinstance(input_convs, str): + input_convs = (input_convs,) + for input_conv_name in input_convs: + weight_name = input_conv_name + '.weight' + try: + state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) + _logger.info( + f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') + except NotImplementedError as e: + del state_dict[weight_name] + strict = False + _logger.warning( + f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') + + classifiers = default_cfg.get('classifier', None) + label_offset = default_cfg.get('label_offset', 0) + if classifiers is not None: + if isinstance(classifiers, str): + classifiers = (classifiers,) + if num_classes != default_cfg['num_classes']: + for classifier_name in classifiers: + # completely discard fully connected if model num_classes doesn't match pretrained weights + del state_dict[classifier_name + '.weight'] + del state_dict[classifier_name + '.bias'] + strict = False + elif label_offset > 0: + for classifier_name in classifiers: + # special case for pretrained weights with an extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] + + model.load_state_dict(state_dict, strict=strict) + + +def extract_layer(model, layer): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + if not hasattr(model, 'module') and layer[0] == 'module': + layer = layer[1:] + for l in layer: + if hasattr(module, l): + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + else: + return module + return module + + +def set_layer(model, layer, val): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + lst_index = 0 + module2 = module + for l in layer: + if hasattr(module2, l): + if not l.isdigit(): + module2 = getattr(module2, l) + else: + module2 = module2[int(l)] + lst_index += 1 + lst_index -= 1 + for l in layer[:lst_index]: + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + l = layer[lst_index] + setattr(module, l, val) + + +def adapt_model_from_string(parent_module, model_string): + separator = '***' + state_dict = {} + lst_shape = model_string.split(separator) + for k in lst_shape: + k = k.split(':') + key = k[0] + shape = k[1][1:-1].split(',') + if shape[0] != '': + state_dict[key] = [int(i) for i in shape] + + new_module = deepcopy(parent_module) + for n, m in parent_module.named_modules(): + old_module = extract_layer(parent_module, n) + if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame): + if isinstance(old_module, Conv2dSame): + conv = Conv2dSame + else: + conv = nn.Conv2d + s = state_dict[n + '.weight'] + in_channels = s[1] + out_channels = s[0] + g = 1 + if old_module.groups > 1: + in_channels = out_channels + g = in_channels + new_conv = conv( + in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size, + bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation, + groups=g, stride=old_module.stride) + set_layer(new_module, n, new_conv) + if isinstance(old_module, nn.BatchNorm2d): + new_bn = nn.BatchNorm2d( + num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, + affine=old_module.affine, track_running_stats=True) + set_layer(new_module, n, new_bn) + if isinstance(old_module, nn.Linear): + # FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer? + num_features = state_dict[n + '.weight'][1] + new_fc = Linear( + in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None) + set_layer(new_module, n, new_fc) + if hasattr(new_module, 'num_features'): + new_module.num_features = num_features + new_module.eval() + parent_module.eval() + + return new_module + + +def adapt_model_from_file(parent_module, model_variant): + adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt') + with open(adapt_file, 'r') as f: + return adapt_model_from_string(parent_module, f.read().strip()) + + +def default_cfg_for_features(default_cfg): + default_cfg = deepcopy(default_cfg) + # remove default pretrained cfg fields that don't have much relevance for feature backbone + to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size? + for tr in to_remove: + default_cfg.pop(tr, None) + return default_cfg + + +def overlay_external_default_cfg(default_cfg, kwargs): + """ Overlay 'external_default_cfg' in kwargs on top of default_cfg arg. + """ + external_default_cfg = kwargs.pop('external_default_cfg', None) + if external_default_cfg: + default_cfg.pop('url', None) # url should come from external cfg + default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg + default_cfg.update(external_default_cfg) + + +def set_default_kwargs(kwargs, names, default_cfg): + for n in names: + # for legacy reasons, model __init__args uses img_size + in_chans as separate args while + # default_cfg has one input_size=(C, H ,W) entry + if n == 'img_size': + input_size = default_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[-2:]) + elif n == 'in_chans': + input_size = default_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[0]) + else: + default_val = default_cfg.get(n, None) + if default_val is not None: + kwargs.setdefault(n, default_cfg[n]) + + +def filter_kwargs(kwargs, names): + if not kwargs or not names: + return + for n in names: + kwargs.pop(n, None) + + +def update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter): + """ Update the default_cfg and kwargs before passing to model + + FIXME this sequence of overlay default_cfg, set default kwargs, filter kwargs + could/should be replaced by an improved configuration mechanism + + Args: + default_cfg: input default_cfg (updated in-place) + kwargs: keyword args passed to model build fn (updated in-place) + kwargs_filter: keyword arg keys that must be removed before model __init__ + """ + # Overlay default cfg values from `external_default_cfg` if it exists in kwargs + overlay_external_default_cfg(default_cfg, kwargs) + # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) + default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') + if default_cfg.get('fixed_input_size', False): + # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size + default_kwarg_names += ('img_size',) + set_default_kwargs(kwargs, names=default_kwarg_names, default_cfg=default_cfg) + # Filter keyword args for task specific model variants (some 'features only' models, etc.) + filter_kwargs(kwargs, names=kwargs_filter) + + +def build_model_with_cfg( + model_cls: Callable, + variant: str, + pretrained: bool, + default_cfg: dict, + model_cfg: Optional[Any] = None, + feature_cfg: Optional[dict] = None, + pretrained_strict: bool = True, + pretrained_filter_fn: Optional[Callable] = None, + pretrained_custom_load: bool = False, + kwargs_filter: Optional[Tuple[str]] = None, + **kwargs): + """ Build model with specified default_cfg and optional model_cfg + + This helper fn aids in the construction of a model including: + * handling default_cfg and associated pretained weight loading + * passing through optional model_cfg for models with config based arch spec + * features_only model adaptation + * pruning config / model adaptation + + Args: + model_cls (nn.Module): model class + variant (str): model variant name + pretrained (bool): load pretrained weights + default_cfg (dict): model's default pretrained/task config + model_cfg (Optional[Dict]): model's architecture config + feature_cfg (Optional[Dict]: feature extraction adapter config + pretrained_strict (bool): load pretrained weights strictly + pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights + pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights + kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model + **kwargs: model args passed through to model __init__ + """ + pruned = kwargs.pop('pruned', False) + features = False + feature_cfg = feature_cfg or {} + default_cfg = deepcopy(default_cfg) if default_cfg else {} + update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter) + default_cfg.setdefault('architecture', variant) + + # Setup for feature extraction wrapper done at end of this fn + if kwargs.pop('features_only', False): + features = True + feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) + if 'out_indices' in kwargs: + feature_cfg['out_indices'] = kwargs.pop('out_indices') + + # Build the model + model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs) + model.default_cfg = default_cfg + + if pruned: + model = adapt_model_from_file(model, variant) + + # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats + num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) + if pretrained: + if pretrained_custom_load: + load_custom_pretrained(model) + else: + load_pretrained( + model, + num_classes=num_classes_pretrained, + in_chans=kwargs.get('in_chans', 3), + filter_fn=pretrained_filter_fn, + strict=pretrained_strict) + + # Wrap the model in a feature extraction module if enabled + if features: + feature_cls = FeatureListNet + if 'feature_cls' in feature_cfg: + feature_cls = feature_cfg.pop('feature_cls') + if isinstance(feature_cls, str): + feature_cls = feature_cls.lower() + if 'hook' in feature_cls: + feature_cls = FeatureHookNet + else: + assert False, f'Unknown feature class {feature_cls}' + model = feature_cls(model, **feature_cfg) + model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg + + return model + + +def model_parameters(model, exclude_head=False): + if exclude_head: + # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering + return [p for p in model.parameters()][:-2] + else: + return model.parameters() + + +def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +def named_modules(module: nn.Module, name='', depth_first=True, include_root=False): + if not depth_first and include_root: + yield name, module + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + yield from named_modules( + module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + yield name, module diff --git a/testbed/huggingface__pytorch-image-models/timm/models/hrnet.py b/testbed/huggingface__pytorch-image-models/timm/models/hrnet.py new file mode 100644 index 0000000000000000000000000000000000000000..c56964f64feec08f10b02ad368987eecd46db618 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/hrnet.py @@ -0,0 +1,836 @@ +""" HRNet + +Copied from https://github.com/HRNet/HRNet-Image-Classification + +Original header: + Copyright (c) Microsoft + Licensed under the MIT License. + Written by Bin Xiao (Bin.Xiao@microsoft.com) + Modified by Ke Sun (sunk@mail.ustc.edu.cn) +""" +import logging +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .features import FeatureInfo +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import create_classifier +from .registry import register_model +from .resnet import BasicBlock, Bottleneck # leveraging ResNet blocks w/ additional features like SE + +_BN_MOMENTUM = 0.1 +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hrnet_w18_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth'), + 'hrnet_w18_small_v2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth'), + 'hrnet_w18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth'), + 'hrnet_w30': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth'), + 'hrnet_w32': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth'), + 'hrnet_w40': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth'), + 'hrnet_w44': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth'), + 'hrnet_w48': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth'), + 'hrnet_w64': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth'), +} + +cfg_cls = dict( + hrnet_w18_small=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(1,), + NUM_CHANNELS=(32,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(16, 32), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=1, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(16, 32, 64), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=1, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(16, 32, 64, 128), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18_small_v2=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(2,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=3, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=2, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w30=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(30, 60), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(30, 60, 120), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(30, 60, 120, 240), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w32=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(32, 64), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(32, 64, 128), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(32, 64, 128, 256), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w40=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(40, 80), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(40, 80, 160), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(40, 80, 160, 320), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w44=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(44, 88), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(44, 88, 176), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(44, 88, 176, 352), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w48=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(48, 96), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(48, 96, 192), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(48, 96, 192, 384), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w64=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(64, 128), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(64, 128, 256), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(64, 128, 256, 512), + FUSE_METHOD='SUM', + ), + ) +) + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, + num_channels, fuse_method, multi_scale_output=True): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, blocks, num_blocks, num_inchannels, num_channels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.fuse_act = nn.ReLU(False) + + def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): + error_msg = '' + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks)) + elif num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels)) + elif num_branches != len(num_inchannels): + error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(num_inchannels)) + if error_msg: + _logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): + downsample = None + if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.num_inchannels[branch_index], num_channels[branch_index] * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample)] + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + for i in range(num_branches): + branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return nn.Identity() + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), + nn.BatchNorm2d(num_inchannels[i], momentum=_BN_MOMENTUM), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(nn.Identity()) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x: List[torch.Tensor]): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i, branch in enumerate(self.branches): + x[i] = branch(x[i]) + + x_fuse = [] + for i, fuse_outer in enumerate(self.fuse_layers): + y = x[0] if i == 0 else fuse_outer[0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + fuse_outer[j](x[j]) + x_fuse.append(self.fuse_act(y)) + + return x_fuse + + +blocks_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class HighResolutionNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, head='classification'): + super(HighResolutionNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + + stem_width = cfg['STEM_WIDTH'] + self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) + self.act1 = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) + self.act2 = nn.ReLU(inplace=True) + + self.stage1_cfg = cfg['STAGE1'] + num_channels = self.stage1_cfg['NUM_CHANNELS'][0] + block = blocks_dict[self.stage1_cfg['BLOCK']] + num_blocks = self.stage1_cfg['NUM_BLOCKS'][0] + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + stage1_out_channel = block.expansion * num_channels + + self.stage2_cfg = cfg['STAGE2'] + num_channels = self.stage2_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage2_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) + self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg['STAGE3'] + num_channels = self.stage3_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage3_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg['STAGE4'] + num_channels = self.stage4_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage4_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) + + self.head = head + self.head_channels = None # set if _make_head called + if head == 'classification': + # Classification Head + self.num_features = 2048 + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(pre_stage_channels) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + elif head == 'incre': + self.num_features = 2048 + self.incre_modules, _, _ = self._make_head(pre_stage_channels, True) + else: + self.incre_modules = None + self.num_features = 256 + + curr_stride = 2 + # module names aren't actually valid here, hook or FeatureNet based extraction would not work + self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] + for i, c in enumerate(self.head_channels if self.head_channels else num_channels): + curr_stride *= 2 + c = c * 4 if self.head_channels else c # head block expansion factor of 4 + self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] + + self.init_weights() + + def _make_head(self, pre_stage_channels, incre_only=False): + head_block = Bottleneck + self.head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = [] + for i, channels in enumerate(pre_stage_channels): + incre_modules.append(self._make_layer(head_block, channels, self.head_channels[i], 1, stride=1)) + incre_modules = nn.ModuleList(incre_modules) + if incre_only: + return incre_modules, None, None + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = self.head_channels[i] * head_block.expansion + out_channels = self.head_channels[i + 1] * head_block.expansion + downsamp_module = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), + nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d( + in_channels=self.head_channels[3] * head_block.expansion, + out_channels=self.num_features, kernel_size=1, stride=1, padding=0 + ), + nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + + return incre_modules, downsamp_modules, final_layer + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), + nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + else: + transition_layers.append(nn.Identity()) + else: + conv3x3s = [] + for j in range(i + 1 - num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels + conv3x3s.append(nn.Sequential( + nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), + nn.BatchNorm2d(outchannels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(inplanes, planes, stride, downsample)] + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True): + num_modules = layer_config['NUM_MODULES'] + num_branches = layer_config['NUM_BRANCHES'] + num_blocks = layer_config['NUM_BLOCKS'] + num_channels = layer_config['NUM_CHANNELS'] + block = blocks_dict[layer_config['BLOCK']] + fuse_method = layer_config['FUSE_METHOD'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + reset_multi_scale_output = multi_scale_output or i < num_modules - 1 + modules.append(HighResolutionModule( + num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def stages(self, x) -> List[torch.Tensor]: + x = self.layer1(x) + + xl = [t(x) for i, t in enumerate(self.transition1)] + yl = self.stage2(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] + yl = self.stage3(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] + yl = self.stage4(xl) + return yl + + def forward_features(self, x): + # Stem + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + # Stages + yl = self.stages(x) + + # Classification Head + y = self.incre_modules[0](yl[0]) + for i, down in enumerate(self.downsamp_modules): + y = self.incre_modules[i + 1](yl[i + 1]) + down(y) + y = self.final_layer(y) + return y + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +class HighResolutionNetFeatures(HighResolutionNet): + """HighResolutionNet feature extraction + + The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. + It would be more complicated to use the FeatureNet helpers. + + The `feature_location=incre` allows grabbing increased channel count features using part of the + classification head. If `feature_location=''` the default HRNet features are returned. First stem + conv is used for stride 2 features. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, + feature_location='incre', out_indices=(0, 1, 2, 3, 4)): + assert feature_location in ('incre', '') + super(HighResolutionNetFeatures, self).__init__( + cfg, in_chans=in_chans, num_classes=num_classes, global_pool=global_pool, + drop_rate=drop_rate, head=feature_location) + self.feature_info = FeatureInfo(self.feature_info, out_indices) + self._out_idx = {i for i in out_indices} + + def forward_features(self, x): + assert False, 'Not supported' + + def forward(self, x) -> List[torch.tensor]: + out = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + if 0 in self._out_idx: + out.append(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + x = self.stages(x) + if self.incre_modules is not None: + x = [incre(f) for f, incre in zip(x, self.incre_modules)] + for i, f in enumerate(x): + if i + 1 in self._out_idx: + out.append(f) + return out + + +def _create_hrnet(variant, pretrained, **model_kwargs): + model_cls = HighResolutionNet + features_only = False + kwargs_filter = None + if model_kwargs.pop('features_only', False): + model_cls = HighResolutionNetFeatures + kwargs_filter = ('num_classes', 'global_pool') + features_only = True + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfg_cls[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hrnet_w18_small(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) + + +@register_model +def hrnet_w18_small_v2(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) + + +@register_model +def hrnet_w18(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18', pretrained, **kwargs) + + +@register_model +def hrnet_w30(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w30', pretrained, **kwargs) + + +@register_model +def hrnet_w32(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w32', pretrained, **kwargs) + + +@register_model +def hrnet_w40(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w40', pretrained, **kwargs) + + +@register_model +def hrnet_w44(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w44', pretrained, **kwargs) + + +@register_model +def hrnet_w48(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w48', pretrained, **kwargs) + + +@register_model +def hrnet_w64(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w64', pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/hub.py b/testbed/huggingface__pytorch-image-models/timm/models/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9b553031fb9d1846338990cd3b6f77228174c6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/hub.py @@ -0,0 +1,96 @@ +import json +import logging +import os +from functools import partial +from typing import Union, Optional + +import torch +from torch.hub import load_state_dict_from_url, download_url_to_file, urlparse, HASH_REGEX +try: + from torch.hub import get_dir +except ImportError: + from torch.hub import _get_torch_home as get_dir + +from timm import __version__ +try: + from huggingface_hub import hf_hub_url + from huggingface_hub import cached_download + cached_download = partial(cached_download, library_name="timm", library_version=__version__) +except ImportError: + hf_hub_url = None + cached_download = None + +_logger = logging.getLogger(__name__) + + +def get_cache_dir(child_dir=''): + """ + Returns the location of the directory where models are cached (and creates it if necessary). + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + _logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') + + hub_dir = get_dir() + child_dir = () if not child_dir else (child_dir,) + model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir) + os.makedirs(model_dir, exist_ok=True) + return model_dir + + +def download_cached_file(url, check_hash=True, progress=False): + parts = urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(get_cache_dir(), filename) + if not os.path.exists(cached_file): + _logger.info('Downloading: "{}" to {}\n'.format(url, cached_file)) + hash_prefix = None + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file(url, cached_file, hash_prefix, progress=progress) + return cached_file + + +def has_hf_hub(necessary=False): + if hf_hub_url is None and necessary: + # if no HF Hub module installed and it is necessary to continue, raise error + raise RuntimeError( + 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') + return hf_hub_url is not None + + +def hf_split(hf_id): + rev_split = hf_id.split('@') + assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.' + hf_model_id = rev_split[0] + hf_revision = rev_split[-1] if len(rev_split) > 1 else None + return hf_model_id, hf_revision + + +def load_cfg_from_json(json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + +def _download_from_hf(model_id: str, filename: str): + hf_model_id, hf_revision = hf_split(model_id) + url = hf_hub_url(hf_model_id, filename, revision=hf_revision) + return cached_download(url, cache_dir=get_cache_dir('hf')) + + +def load_model_config_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'config.json') + default_cfg = load_cfg_from_json(cached_file) + default_cfg['hf_hub'] = model_id # insert hf_hub id for pretrained weight load during model creation + model_name = default_cfg.get('architecture') + return default_cfg, model_name + + +def load_state_dict_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'pytorch_model.bin') + state_dict = torch.load(cached_file, map_location='cpu') + return state_dict diff --git a/testbed/huggingface__pytorch-image-models/timm/models/inception_resnet_v2.py b/testbed/huggingface__pytorch-image-models/timm/models/inception_resnet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..716728495a668ae4a11257d32780059b51b28763 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/inception_resnet_v2.py @@ -0,0 +1,358 @@ +""" Pytorch Inception-Resnet-V2 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionResnetV2'] + +default_cfgs = { + # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz + 'inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + }, + # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz + 'ens_adv_inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=.001) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed_5b(nn.Module): + def __init__(self): + super(Mixed_5b, self).__init__() + + self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(192, 48, kernel_size=1, stride=1), + BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(192, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(192, 64, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block35(nn.Module): + def __init__(self, scale=1.0): + super(Block35, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1), + BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1) + ) + + self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_6a(nn.Module): + def __init__(self): + super(Mixed_6a, self).__init__() + + self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Block17(nn.Module): + def __init__(self, scale=1.0): + super(Block17, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 128, kernel_size=1, stride=1), + BasicConv2d(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_7a(nn.Module): + def __init__(self): + super(Mixed_7a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1), + BasicConv2d(288, 320, kernel_size=3, stride=2) + ) + + self.branch3 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block8(nn.Module): + + def __init__(self, scale=1.0, no_relu=False): + super(Block8, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(2080, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), + BasicConv2d(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + ) + + self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) + self.relu = None if no_relu else nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + if self.relu is not None: + out = self.relu(out) + return out + + +class InceptionResnetV2(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg'): + super(InceptionResnetV2, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + assert output_stride == 32 + + self.conv2d_1a = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) + self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1) + self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] + + self.maxpool_3a = nn.MaxPool2d(3, stride=2) + self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) + self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) + self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] + + self.maxpool_5a = nn.MaxPool2d(3, stride=2) + self.mixed_5b = Mixed_5b() + self.repeat = nn.Sequential( + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17) + ) + self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] + + self.mixed_6a = Mixed_6a() + self.repeat_1 = nn.Sequential( + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10) + ) + self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] + + self.mixed_7a = Mixed_7a() + self.repeat_2 = nn.Sequential( + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20) + ) + self.block8 = Block8(no_relu=True) + self.conv2d_7b = BasicConv2d(2080, self.num_features, kernel_size=1, stride=1) + self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] + + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.classif + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv2d_1a(x) + x = self.conv2d_2a(x) + x = self.conv2d_2b(x) + x = self.maxpool_3a(x) + x = self.conv2d_3b(x) + x = self.conv2d_4a(x) + x = self.maxpool_5a(x) + x = self.mixed_5b(x) + x = self.repeat(x) + x = self.mixed_6a(x) + x = self.repeat_1(x) + x = self.mixed_7a(x) + x = self.repeat_2(x) + x = self.block8(x) + x = self.conv2d_7b(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classif(x) + return x + + +def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionResnetV2, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def inception_resnet_v2(pretrained=False, **kwargs): + r"""InceptionResnetV2 model architecture from the + `"InceptionV4, Inception-ResNet..." ` paper. + """ + return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) + + +@register_model +def ens_adv_inception_resnet_v2(pretrained=False, **kwargs): + r""" Ensemble Adversarially trained InceptionResnetV2 model architecture + As per https://arxiv.org/abs/1705.07204 and + https://github.com/tensorflow/models/tree/master/research/adv_imagenet_models. + """ + return _create_inception_resnet_v2('ens_adv_inception_resnet_v2', pretrained=pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/inception_v3.py b/testbed/huggingface__pytorch-image-models/timm/models/inception_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb1107b39b18418769f7cf775490cec4e95bb5b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/inception_v3.py @@ -0,0 +1,470 @@ +""" Inception-V3 + +Originally from torchvision Inception3 model +Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .layers import trunc_normal_, create_classifier, Linear + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # original PyTorch weights, ported from Tensorflow but modified + 'inception_v3': _cfg( + url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth', + has_aux=True), # checkpoint has aux logit layer weights + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + 'tf_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_inception_v3-e0069de4.pth', + num_classes=1000, has_aux=False, label_offset=1), + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + 'adv_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth', + num_classes=1000, has_aux=False, label_offset=1), + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + 'gluon_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_inception_v3-9f746940.pth', + mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults + std=IMAGENET_DEFAULT_STD, # also works well with inception defaults + has_aux=False, + ) +} + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features, conv_block=None): + super(InceptionA, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionB, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7, conv_block=None): + super(InceptionC, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionD, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionE, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes, conv_block=None): + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + self.fc = Linear(768, num_classes) + self.fc.stddev = 0.001 + + def forward(self, x): + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) + + +class InceptionV3(nn.Module): + """Inception-V3 with no AuxLogits + FIXME two class defs are redundant, but less screwing around with torchsript fussyness and inconsistent returns + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=False): + super(InceptionV3, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.aux_logits = aux_logits + + self.Conv2d_1a_3x3 = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + else: + self.AuxLogits = None + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), + dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), + dict(num_chs=288, reduction=8, module='Mixed_5d'), + dict(num_chs=768, reduction=16, module='Mixed_6e'), + dict(num_chs=2048, reduction=32, module='Mixed_7c'), + ] + + self.num_features = 2048 + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + trunc_normal_(m.weight, std=stddev) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward_preaux(self, x): + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + x = self.Pool1(x) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = self.Pool2(x) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + return x + + def forward_postaux(self, x): + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + return x + + def forward_features(self, x): + x = self.forward_preaux(x) + x = self.forward_postaux(x) + return x + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +class InceptionV3Aux(InceptionV3): + """InceptionV3 with AuxLogits + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=True): + super(InceptionV3Aux, self).__init__( + num_classes, in_chans, drop_rate, global_pool, aux_logits) + + def forward_features(self, x): + x = self.forward_preaux(x) + aux = self.AuxLogits(x) if self.training else None + x = self.forward_postaux(x) + return x, aux + + def forward(self, x): + x, aux = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x, aux + + +def _create_inception_v3(variant, pretrained=False, **kwargs): + default_cfg = default_cfgs[variant] + aux_logits = kwargs.pop('aux_logits', False) + if aux_logits: + assert not kwargs.pop('features_only', False) + model_cls = InceptionV3Aux + load_strict = default_cfg['has_aux'] + else: + model_cls = InceptionV3 + load_strict = not default_cfg['has_aux'] + return build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfg, + pretrained_strict=load_strict, + **kwargs) + + +@register_model +def inception_v3(pretrained=False, **kwargs): + # original PyTorch weights, ported from Tensorflow but modified + model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + model = _create_inception_v3('tf_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def adv_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + model = _create_inception_v3('adv_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def gluon_inception_v3(pretrained=False, **kwargs): + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + model = _create_inception_v3('gluon_inception_v3', pretrained=pretrained, **kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/inception_v4.py b/testbed/huggingface__pytorch-image-models/timm/models/inception_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..cc899e15daf8087ae6acb17017079c292a1e3aa7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/inception_v4.py @@ -0,0 +1,316 @@ +""" Pytorch Inception-V4 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionV4'] + +default_cfgs = { + 'inception_v4': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/inceptionv4-8e4777a0.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'features.0.conv', 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=0.001) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed3a(nn.Module): + def __init__(self): + super(Mixed3a, self).__init__() + self.maxpool = nn.MaxPool2d(3, stride=2) + self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) + + def forward(self, x): + x0 = self.maxpool(x) + x1 = self.conv(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed4a(nn.Module): + def __init__(self): + super(Mixed4a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed5a(nn.Module): + def __init__(self): + super(Mixed5a, self).__init__() + self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) + self.maxpool = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.conv(x) + x1 = self.maxpool(x) + out = torch.cat((x0, x1), 1) + return out + + +class InceptionA(nn.Module): + def __init__(self): + super(InceptionA, self).__init__() + self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(384, 96, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionA(nn.Module): + def __init__(self): + super(ReductionA, self).__init__() + self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), + BasicConv2d(224, 256, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionB(nn.Module): + def __init__(self): + super(InceptionB, self).__init__() + self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1024, 128, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionB(nn.Module): + def __init__(self): + super(ReductionB, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(320, 320, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionC(nn.Module): + def __init__(self): + super(InceptionC, self).__init__() + + self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) + + self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)) + self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1536, 256, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + + x1_0 = self.branch1_0(x) + x1_1a = self.branch1_1a(x1_0) + x1_1b = self.branch1_1b(x1_0) + x1 = torch.cat((x1_1a, x1_1b), 1) + + x2_0 = self.branch2_0(x) + x2_1 = self.branch2_1(x2_0) + x2_2 = self.branch2_2(x2_1) + x2_3a = self.branch2_3a(x2_2) + x2_3b = self.branch2_3b(x2_2) + x2 = torch.cat((x2_3a, x2_3b), 1) + + x3 = self.branch3(x) + + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class InceptionV4(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg'): + super(InceptionV4, self).__init__() + assert output_stride == 32 + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + + self.features = nn.Sequential( + BasicConv2d(in_chans, 32, kernel_size=3, stride=2), + BasicConv2d(32, 32, kernel_size=3, stride=1), + BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), + Mixed3a(), + Mixed4a(), + Mixed5a(), + InceptionA(), + InceptionA(), + InceptionA(), + InceptionA(), + ReductionA(), # Mixed6a + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + ReductionB(), # Mixed7a + InceptionC(), + InceptionC(), + InceptionC(), + ) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='features.2'), + dict(num_chs=160, reduction=4, module='features.3'), + dict(num_chs=384, reduction=8, module='features.9'), + dict(num_chs=1024, reduction=16, module='features.17'), + dict(num_chs=1536, reduction=32, module='features.21'), + ] + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_inception_v4(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionV4, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def inception_v4(pretrained=False, **kwargs): + return _create_inception_v4('inception_v4', pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/__init__.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e9a5f18fddece08d99e47e8f19d0bc0f852c5bcf --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/__init__.py @@ -0,0 +1,39 @@ +from .activations import * +from .adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from .blur_pool import BlurPool2d +from .classifier import ClassifierHead, create_classifier +from .cond_conv2d import CondConv2d, get_condconv_initializer +from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ + set_layer_config +from .conv2d_same import Conv2dSame, conv2d_same +from .conv_bn_act import ConvBnAct +from .create_act import create_act_layer, get_act_layer, get_act_fn +from .create_attn import get_attn, create_attn +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act +from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from .evo_norm import EvoNormBatch2d, EvoNormSample2d +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible +from .inplace_abn import InplaceAbn +from .linear import Linear +from .mixed_conv2d import MixedConv2d +from .mlp import Mlp, GluMlp, GatedMlp +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .norm import GroupNorm, LayerNorm2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .padding import get_padding, get_same_padding, pad_same +from .patch_embed import PatchEmbed +from .pool2d_same import AvgPool2dSame, create_pool2d +from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from .selective_kernel import SelectiveKernel +from .separable_conv import SeparableConv2d, SeparableConvBnAct +from .space_to_depth import SpaceToDepthModule +from .split_attn import SplitAttn +from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from .test_time_pool import TestTimePoolHead, apply_test_time_pool +from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_ diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/activations.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..e16b3bd3a1898365530c1ffc5154a0a4746a136e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/activations.py @@ -0,0 +1,145 @@ +""" Activations + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + + +def mish(x, inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + NOTE: I don't have a working inplace variant + """ + return x.mul(F.softplus(x).tanh()) + + +class Mish(nn.Module): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + def __init__(self, inplace: bool = False): + super(Mish, self).__init__() + + def forward(self, x): + return mish(x) + + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + + +def tanh(x, inplace: bool = False): + return x.tanh_() if inplace else x.tanh() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Tanh(nn.Module): + def __init__(self, inplace: bool = False): + super(Tanh, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.tanh_() if self.inplace else x.tanh() + + +def hard_swish(x, inplace: bool = False): + inner = F.relu6(x + 3.).div_(6.) + return x.mul_(inner) if inplace else x.mul(inner) + + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + + +def hard_mish(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + if inplace: + return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) + else: + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_mish(x, self.inplace) + + +class PReLU(nn.PReLU): + """Applies PReLU (w/ dummy inplace arg) + """ + def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: + super(PReLU, self).__init__(num_parameters=num_parameters, init=init) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.prelu(input, self.weight) + + +def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: + return F.gelu(x) + + +class GELU(nn.Module): + """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) + """ + def __init__(self, inplace: bool = False): + super(GELU, self).__init__() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.gelu(input) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/activations_jit.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/activations_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..b4a516530ad0abf41f720ac83d02791179bb7b67 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/activations_jit.py @@ -0,0 +1,90 @@ +""" Activations + +A collection of jit-scripted activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not +currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted +versions if they contain in-place ops. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul(x.sigmoid()) + + +@torch.jit.script +def mish_jit(x, _inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class SwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishJit, self).__init__() + + def forward(self, x): + return swish_jit(x) + + +class MishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(MishJit, self).__init__() + + def forward(self, x): + return mish_jit(x) + + +@torch.jit.script +def hard_sigmoid_jit(x, inplace: bool = False): + # return F.relu6(x + 3.) / 6. + return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSigmoidJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidJit, self).__init__() + + def forward(self, x): + return hard_sigmoid_jit(x) + + +@torch.jit.script +def hard_swish_jit(x, inplace: bool = False): + # return x * (F.relu6(x + 3.) / 6) + return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishJit, self).__init__() + + def forward(self, x): + return hard_swish_jit(x) + + +@torch.jit.script +def hard_mish_jit(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishJit, self).__init__() + + def forward(self, x): + return hard_mish_jit(x) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/activations_me.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/activations_me.py new file mode 100644 index 0000000000000000000000000000000000000000..9a12bb7ebbfef02c508801742d38da6b48dd1bb6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/activations_me.py @@ -0,0 +1,218 @@ +""" Activations (memory-efficient w/ custom autograd) + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +These activations are not compatible with jit scripting or ONNX export of the model, please use either +the JIT or basic versions of the activations. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit_fwd(x): + return x.mul(torch.sigmoid(x)) + + +@torch.jit.script +def swish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) + + +class SwishJitAutoFn(torch.autograd.Function): + """ torch.jit.script optimised Swish w/ memory-efficient checkpoint + Inspired by conversation btw Jeremy Howard & Adam Pazske + https://twitter.com/jeremyphoward/status/1188251041835315200 + """ + @staticmethod + def symbolic(g, x): + return g.op("Mul", x, g.op("Sigmoid", x)) + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_jit_bwd(x, grad_output) + + +def swish_me(x, inplace=False): + return SwishJitAutoFn.apply(x) + + +class SwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishMe, self).__init__() + + def forward(self, x): + return SwishJitAutoFn.apply(x) + + +@torch.jit.script +def mish_jit_fwd(x): + return x.mul(torch.tanh(F.softplus(x))) + + +@torch.jit.script +def mish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + x_tanh_sp = F.softplus(x).tanh() + return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) + + +class MishJitAutoFn(torch.autograd.Function): + """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + A memory efficient, jit scripted variant of Mish + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return mish_jit_bwd(x, grad_output) + + +def mish_me(x, inplace=False): + return MishJitAutoFn.apply(x) + + +class MishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(MishMe, self).__init__() + + def forward(self, x): + return MishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_sigmoid_jit_fwd(x, inplace: bool = False): + return (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_sigmoid_jit_bwd(x, grad_output): + m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. + return grad_output * m + + +class HardSigmoidJitAutoFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_sigmoid_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_sigmoid_jit_bwd(x, grad_output) + + +def hard_sigmoid_me(x, inplace: bool = False): + return HardSigmoidJitAutoFn.apply(x) + + +class HardSigmoidMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidMe, self).__init__() + + def forward(self, x): + return HardSigmoidJitAutoFn.apply(x) + + +@torch.jit.script +def hard_swish_jit_fwd(x): + return x * (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_swish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= 3.) + m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) + return grad_output * m + + +class HardSwishJitAutoFn(torch.autograd.Function): + """A memory efficient, jit-scripted HardSwish activation""" + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_swish_jit_bwd(x, grad_output) + + @staticmethod + def symbolic(g, self): + input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) + hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + return g.op("Mul", self, hardtanh_) + + +def hard_swish_me(x, inplace=False): + return HardSwishJitAutoFn.apply(x) + + +class HardSwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishMe, self).__init__() + + def forward(self, x): + return HardSwishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_mish_jit_fwd(x): + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +@torch.jit.script +def hard_mish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= -2.) + m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) + return grad_output * m + + +class HardMishJitAutoFn(torch.autograd.Function): + """ A memory efficient, jit scripted variant of Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_mish_jit_bwd(x, grad_output) + + +def hard_mish_me(x, inplace: bool = False): + return HardMishJitAutoFn.apply(x) + + +class HardMishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishMe, self).__init__() + + def forward(self, x): + return HardMishJitAutoFn.apply(x) + + + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/adaptive_avgmax_pool.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/adaptive_avgmax_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc6ada8c5b28c7eac5785b0cc2933eb01a15d46 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/adaptive_avgmax_pool.py @@ -0,0 +1,118 @@ +""" PyTorch selectable adaptive pooling +Adaptive pooling with the ability to select the type of pooling from: + * 'avg' - Average pooling + * 'max' - Max pooling + * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 + * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim + +Both a functional and a nn.Module version of the pooling is provided. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class FastAdaptiveAvgPool2d(nn.Module): + def __init__(self, flatten=False): + super(FastAdaptiveAvgPool2d, self).__init__() + self.flatten = flatten + + def forward(self, x): + return x.mean((2, 3), keepdim=not self.flatten) + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='fast', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing + self.flatten = nn.Flatten(1) if flatten else nn.Identity() + if pool_type == '': + self.pool = nn.Identity() # pass through + elif pool_type == 'fast': + assert output_size == 1 + self.pool = FastAdaptiveAvgPool2d(flatten) + self.flatten = nn.Identity() + elif pool_type == 'avg': + self.pool = nn.AdaptiveAvgPool2d(output_size) + elif pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + + def is_identity(self): + return not self.pool_type + + def forward(self, x): + x = self.pool(x) + x = self.flatten(x) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'pool_type=' + self.pool_type \ + + ', flatten=' + str(self.flatten) + ')' + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/attention_pool2d.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/attention_pool2d.py new file mode 100644 index 0000000000000000000000000000000000000000..66e49b8a93d7455077866f74421e0027ca73d95a --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/attention_pool2d.py @@ -0,0 +1,182 @@ +""" Attention Pool 2D + +Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. + +Based on idea in CLIP by OpenAI, licensed Apache 2.0 +https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from typing import List, Union, Tuple + +import torch +import torch.nn as nn + +from .helpers import to_2tuple +from .weight_init import trunc_normal_ + + +def rot(x): + return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) + + +def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): + return x * cos_emb + rot(x) * sin_emb + + +def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): + if isinstance(x, torch.Tensor): + x = [x] + return [t * cos_emb + rot(t) * sin_emb for t in x] + + +class RotaryEmbedding(nn.Module): + """ Rotary position embedding + + NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not + been well tested, and will likely change. It will be moved to its own file. + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + def __init__(self, dim, max_freq=4): + super().__init__() + self.dim = dim + self.register_buffer('bands', 2 ** torch.linspace(0., max_freq - 1, self.dim // 4), persistent=False) + + def get_embed(self, shape: torch.Size, device: torch.device = None, dtype: torch.dtype = None): + """ + NOTE: shape arg should include spatial dim only + """ + device = device or self.bands.device + dtype = dtype or self.bands.dtype + if not isinstance(shape, torch.Size): + shape = torch.Size(shape) + N = shape.numel() + grid = torch.stack(torch.meshgrid( + [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in shape]), dim=-1).unsqueeze(-1) + emb = grid * math.pi * self.bands + sin = emb.sin().reshape(N, -1).repeat_interleave(2, -1) + cos = emb.cos().reshape(N, -1).repeat_interleave(2, -1) + return sin, cos + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + sin_emb, cos_emb = self.get_embed(x.shape[2:]) + return apply_rot_embed(x, sin_emb, cos_emb) + + +class RotAttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from + train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW + """ + def __init__( + self, + in_features: int, + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + embed_dim = embed_dim or in_features + out_features = out_features or in_features + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + assert embed_dim % num_heads == 0 + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + self.pos_embed = RotaryEmbedding(self.head_dim) + + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + sin_emb, cos_emb = self.pos_embed.get_embed(x.shape[2:]) + x = x.reshape(B, -1, N).permute(0, 2, 1) + + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + + qc, q = q[:, :, :1], q[:, :, 1:] + q = apply_rot_embed(q, sin_emb, cos_emb) + q = torch.cat([qc, q], dim=2) + + kc, k = k[:, :, :1], k[:, :, 1:] + k = apply_rot_embed(k, sin_emb, cos_emb) + k = torch.cat([kc, k], dim=2) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] + + +class AttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + It was based on impl in CLIP by OpenAI + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. + """ + def __init__( + self, + in_features: int, + feat_size: Union[int, Tuple[int, int]], + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + + embed_dim = embed_dim or in_features + out_features = out_features or in_features + assert embed_dim % num_heads == 0 + self.feat_size = to_2tuple(feat_size) + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + + spatial_dim = self.feat_size[0] * self.feat_size[1] + self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) + trunc_normal_(self.pos_embed, std=in_features ** -0.5) + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + assert self.feat_size[0] == H + assert self.feat_size[1] == W + x = x.reshape(B, -1, N).permute(0, 2, 1) + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + x = x + self.pos_embed.unsqueeze(0).to(x.dtype) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/blur_pool.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/blur_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..ca4ce756e434d577c38a20e2e8de2909777862d4 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/blur_pool.py @@ -0,0 +1,42 @@ +""" +BlurPool layer inspired by + - Kornia's Max_BlurPool2d + - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` + +Hacked together by Chris Ha and Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from .padding import get_padding + + +class BlurPool2d(nn.Module): + r"""Creates a module that computes blurs and downsample a given feature map. + See :cite:`zhang2019shiftinvar` for more details. + Corresponds to the Downsample class, which does blurring and subsampling + + Args: + channels = Number of input channels + filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. + stride (int): downsampling filter stride + + Returns: + torch.Tensor: the transformed tensor. + """ + def __init__(self, channels, filt_size=3, stride=2) -> None: + super(BlurPool2d, self).__init__() + assert filt_size > 1 + self.channels = channels + self.filt_size = filt_size + self.stride = stride + self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 + coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) + blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) + self.register_buffer('filt', blur_filter, persistent=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.pad(x, self.padding, 'reflect') + return F.conv2d(x, self.filt, stride=self.stride, groups=x.shape[1]) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/bottleneck_attn.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/bottleneck_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c619ccdbeb3a2d51bcafaa0abec78e74ad3c12 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/bottleneck_attn.py @@ -0,0 +1,129 @@ +""" Bottleneck Self Attention (Bottleneck Transformers) + +Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + +@misc{2101.11605, +Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, +Title = {Bottleneck Transformers for Visual Recognition}, +Year = {2021}, +} + +Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + +This impl is a WIP but given that it is based on the ref gist likely not too far off. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import to_2tuple +from .weight_init import trunc_normal_ + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, heads, height, width, dim) + rel_k: (2 * width - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, 2 * W -1) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, W - 1]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) + x = x_pad[:, :W, W - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + """ + def __init__(self, feat_size, dim_head, scale): + super().__init__() + self.height, self.width = to_2tuple(feat_size) + self.dim_head = dim_head + self.scale = scale + self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * self.scale) + self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * self.scale) + + def forward(self, q): + B, num_heads, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(B * num_heads, self.height, self.width, -1) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, num_heads, HW, HW) + return rel_logits + + +class BottleneckAttn(nn.Module): + """ Bottleneck Attention + Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + """ + def __init__(self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, qkv_bias=False): + super().__init__() + assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.num_heads = num_heads + self.dim_out = dim_out + self.dim_head = dim_out // num_heads + self.scale = self.dim_head ** -0.5 + + self.qkv = nn.Conv2d(dim, self.dim_out * 3, 1, bias=qkv_bias) + + # NOTE I'm only supporting relative pos embedding for now + self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.pos_embed.height + assert W == self.pos_embed.width + + x = self.qkv(x) # B, 3 * num_heads * dim_head, H, W + x = x.reshape(B, -1, self.dim_head, H * W).transpose(-1, -2) + q, k, v = torch.split(x, self.num_heads, dim=1) + + attn_logits = (q @ k.transpose(-1, -2)) * self.scale + attn_logits = attn_logits + self.pos_embed(q) # B, num_heads, H * W, H * W + + attn_out = attn_logits.softmax(dim=-1) + attn_out = (attn_out @ v).transpose(1, 2).reshape(B, self.dim_out, H, W) # B, dim_out, H, W + attn_out = self.pool(attn_out) + return attn_out + + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/cbam.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/cbam.py new file mode 100644 index 0000000000000000000000000000000000000000..bacf5cf07b695ce6c5fd87facc79f6a5773e6ecf --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/cbam.py @@ -0,0 +1,112 @@ +""" CBAM (sort-of) Attention + +Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 + +WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on +some tasks, especially fine-grained it seems. I may end up removing this impl. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn +import torch.nn.functional as F + +from .conv_bn_act import ConvBnAct +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible + + +class ChannelAttn(nn.Module): + """ Original CBAM channel attention module, currently avg + max pool variant only. + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(ChannelAttn, self).__init__() + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) + self.act = act_layer(inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) + x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) + return x * self.gate(x_avg + x_max) + + +class LightChannelAttn(ChannelAttn): + """An experimental 'lightweight' that sums avg + max pool first + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightChannelAttn, self).__init__( + channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) + + def forward(self, x): + x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) + x_attn = self.fc2(self.act(self.fc1(x_pool))) + return x * F.sigmoid(x_attn) + + +class SpatialAttn(nn.Module): + """ Original CBAM spatial attention module + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(SpatialAttn, self).__init__() + self.conv = ConvBnAct(2, 1, kernel_size, act_layer=None) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class LightSpatialAttn(nn.Module): + """An experimental 'lightweight' variant that sums avg_pool and max_pool results. + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(LightSpatialAttn, self).__init__() + self.conv = ConvBnAct(1, 1, kernel_size, act_layer=None) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class CbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(CbamModule, self).__init__() + self.channel = ChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + + +class LightCbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightCbamModule, self).__init__() + self.channel = LightChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = LightSpatialAttn(spatial_kernel_size) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/classifier.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..2b74541341ad24bfb97f7ea90ac6470b83a73aa3 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/classifier.py @@ -0,0 +1,56 @@ +""" Classifier head and layer factory + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn +from torch.nn import functional as F + +from .adaptive_avgmax_pool import SelectAdaptivePool2d +from .linear import Linear + + +def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): + flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling + if not pool_type: + assert num_classes == 0 or use_conv,\ + 'Pooling can only be disabled if classifier is also removed or conv classifier is used' + flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) + global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool) + num_pooled_features = num_features * global_pool.feat_mult() + return global_pool, num_pooled_features + + +def _create_fc(num_features, num_classes, use_conv=False): + if num_classes <= 0: + fc = nn.Identity() # pass-through (no classifier) + elif use_conv: + fc = nn.Conv2d(num_features, num_classes, 1, bias=True) + else: + # NOTE: using my Linear wrapper that fixes AMP + torchscript casting issue + fc = Linear(num_features, num_classes, bias=True) + return fc + + +def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False): + global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv) + fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + return global_pool, fc + + +class ClassifierHead(nn.Module): + """Classifier head w/ configurable global pooling and dropout.""" + + def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False): + super(ClassifierHead, self).__init__() + self.drop_rate = drop_rate + self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv) + self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() + + def forward(self, x): + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + x = self.fc(x) + x = self.flatten(x) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/cond_conv2d.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/cond_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..8b4bbca84d6f12e0fb875b4edb435b976fc649d6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/cond_conv2d.py @@ -0,0 +1,122 @@ +""" PyTorch Conditionally Parameterized Convolution (CondConv) + +Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference +(https://arxiv.org/abs/1904.04971) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +from functools import partial +import numpy as np +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .helpers import to_2tuple +from .conv2d_same import conv2d_same +from .padding import get_padding_value + + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = to_2tuple(padding_val) + self.dilation = to_2tuple(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/config.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f07b9d782ba0597c174dee81097c28280335fdba --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/config.py @@ -0,0 +1,115 @@ +""" Model / Layer Config singleton state +""" +from typing import Any, Optional + +__all__ = [ + 'is_exportable', 'is_scriptable', 'is_no_jit', + 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' +] + +# Set to True if prefer to have layers with no jit optimization (includes activations) +_NO_JIT = False + +# Set to True if prefer to have activation layers with no jit optimization +# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying +# the jit flags so far are activations. This will change as more layers are updated and/or added. +_NO_ACTIVATION_JIT = False + +# Set to True if exporting a model with Same padding via ONNX +_EXPORTABLE = False + +# Set to True if wanting to use torch.jit.script on a model +_SCRIPTABLE = False + + +def is_no_jit(): + return _NO_JIT + + +class set_no_jit: + def __init__(self, mode: bool) -> None: + global _NO_JIT + self.prev = _NO_JIT + _NO_JIT = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _NO_JIT + _NO_JIT = self.prev + return False + + +def is_exportable(): + return _EXPORTABLE + + +class set_exportable: + def __init__(self, mode: bool) -> None: + global _EXPORTABLE + self.prev = _EXPORTABLE + _EXPORTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _EXPORTABLE + _EXPORTABLE = self.prev + return False + + +def is_scriptable(): + return _SCRIPTABLE + + +class set_scriptable: + def __init__(self, mode: bool) -> None: + global _SCRIPTABLE + self.prev = _SCRIPTABLE + _SCRIPTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + _SCRIPTABLE = self.prev + return False + + +class set_layer_config: + """ Layer config context manager that allows setting all layer config flags at once. + If a flag arg is None, it will not change the current value. + """ + def __init__( + self, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + no_activation_jit: Optional[bool] = None): + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT + if scriptable is not None: + _SCRIPTABLE = scriptable + if exportable is not None: + _EXPORTABLE = exportable + if no_jit is not None: + _NO_JIT = no_jit + if no_activation_jit is not None: + _NO_ACTIVATION_JIT = no_activation_jit + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev + return False diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/conv2d_same.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/conv2d_same.py new file mode 100644 index 0000000000000000000000000000000000000000..75f0f98d4ec1e3f4a0dc004b977815afaa25e7fc --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/conv2d_same.py @@ -0,0 +1,42 @@ +""" Conv2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Optional + +from .padding import pad_same, get_padding_value + + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/conv_bn_act.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/conv_bn_act.py new file mode 100644 index 0000000000000000000000000000000000000000..33005c37b752bd995aeb983ad8480c36b94d0a0c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/conv_bn_act.py @@ -0,0 +1,40 @@ +""" Conv2d + BN + Act + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import convert_norm_act + + +class ConvBnAct(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, + drop_block=None): + super(ConvBnAct, self).__init__() + use_aa = aa_layer is not None + + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = convert_norm_act(norm_layer, act_layer) + self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) + self.aa = aa_layer(channels=out_channels) if stride == 2 and use_aa else None + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.aa is not None: + x = self.aa(x) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/create_act.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/create_act.py new file mode 100644 index 0000000000000000000000000000000000000000..aa557692accff431fe1f9cfb7a5c6d94314b14f6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/create_act.py @@ -0,0 +1,153 @@ +""" Activation Factory +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Union, Callable, Type + +from .activations import * +from .activations_jit import * +from .activations_me import * +from .config import is_exportable, is_scriptable, is_no_jit + +# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. +# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. +# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. +_has_silu = 'silu' in dir(torch.nn.functional) +_has_hardswish = 'hardswish' in dir(torch.nn.functional) +_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) +_has_mish = 'mish' in dir(torch.nn.functional) + + +_ACT_FN_DEFAULT = dict( + silu=F.silu if _has_silu else swish, + swish=F.silu if _has_silu else swish, + mish=F.mish if _has_mish else mish, + relu=F.relu, + relu6=F.relu6, + leaky_relu=F.leaky_relu, + elu=F.elu, + celu=F.celu, + selu=F.selu, + gelu=gelu, + sigmoid=sigmoid, + tanh=tanh, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, + hard_swish=F.hardswish if _has_hardswish else hard_swish, + hard_mish=hard_mish, +) + +_ACT_FN_JIT = dict( + silu=F.silu if _has_silu else swish_jit, + swish=F.silu if _has_silu else swish_jit, + mish=F.mish if _has_mish else mish_jit, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, + hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, + hard_mish=hard_mish_jit +) + +_ACT_FN_ME = dict( + silu=F.silu if _has_silu else swish_me, + swish=F.silu if _has_silu else swish_me, + mish=F.mish if _has_mish else mish_me, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, + hard_swish=F.hardswish if _has_hardswish else hard_swish_me, + hard_mish=hard_mish_me, +) + +_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) +for a in _ACT_FNS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +_ACT_LAYER_DEFAULT = dict( + silu=nn.SiLU if _has_silu else Swish, + swish=nn.SiLU if _has_silu else Swish, + mish=nn.Mish if _has_mish else Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + leaky_relu=nn.LeakyReLU, + elu=nn.ELU, + prelu=PReLU, + celu=nn.CELU, + selu=nn.SELU, + gelu=GELU, + sigmoid=Sigmoid, + tanh=Tanh, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, + hard_swish=nn.Hardswish if _has_hardswish else HardSwish, + hard_mish=HardMish, +) + +_ACT_LAYER_JIT = dict( + silu=nn.SiLU if _has_silu else SwishJit, + swish=nn.SiLU if _has_silu else SwishJit, + mish=nn.Mish if _has_mish else MishJit, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, + hard_mish=HardMishJit +) + +_ACT_LAYER_ME = dict( + silu=nn.SiLU if _has_silu else SwishMe, + swish=nn.SiLU if _has_silu else SwishMe, + mish=nn.Mish if _has_mish else MishMe, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, + hard_mish=HardMishMe, +) + +_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) +for a in _ACT_LAYERS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +def get_act_fn(name: Union[Callable, str] = 'relu'): + """ Activation Function Factory + Fetching activation fns by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, Callable): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + # If not exporting or scripting the model, first look for a memory-efficient version with + # custom autograd, then fallback + if name in _ACT_FN_ME: + return _ACT_FN_ME[name] + if is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return swish + if not (is_no_jit() or is_exportable()): + if name in _ACT_FN_JIT: + return _ACT_FN_JIT[name] + return _ACT_FN_DEFAULT[name] + + +def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): + """ Activation Layer Factory + Fetching activation layers by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, type): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + if name in _ACT_LAYER_ME: + return _ACT_LAYER_ME[name] + if is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return Swish + if not (is_no_jit() or is_exportable()): + if name in _ACT_LAYER_JIT: + return _ACT_LAYER_JIT[name] + return _ACT_LAYER_DEFAULT[name] + + +def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): + act_layer = get_act_layer(name) + if act_layer is None: + return None + return act_layer(**kwargs) if inplace is None else act_layer(inplace=inplace, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/create_attn.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/create_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..028c0f75960d513294d997e3c6c180566e97536a --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/create_attn.py @@ -0,0 +1,89 @@ +""" Attention Factory + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from functools import partial + +from .bottleneck_attn import BottleneckAttn +from .cbam import CbamModule, LightCbamModule +from .eca import EcaModule, CecaModule +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .halo_attn import HaloAttn +from .lambda_layer import LambdaLayer +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .selective_kernel import SelectiveKernel +from .split_attn import SplitAttn +from .squeeze_excite import SEModule, EffectiveSEModule + + +def get_attn(attn_type): + if isinstance(attn_type, torch.nn.Module): + return attn_type + module_cls = None + if attn_type is not None: + if isinstance(attn_type, str): + attn_type = attn_type.lower() + # Lightweight attention modules (channel and/or coarse spatial). + # Typically added to existing network architecture blocks in addition to existing convolutions. + if attn_type == 'se': + module_cls = SEModule + elif attn_type == 'ese': + module_cls = EffectiveSEModule + elif attn_type == 'eca': + module_cls = EcaModule + elif attn_type == 'ecam': + module_cls = partial(EcaModule, use_mlp=True) + elif attn_type == 'ceca': + module_cls = CecaModule + elif attn_type == 'ge': + module_cls = GatherExcite + elif attn_type == 'gc': + module_cls = GlobalContext + elif attn_type == 'gca': + module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) + elif attn_type == 'cbam': + module_cls = CbamModule + elif attn_type == 'lcbam': + module_cls = LightCbamModule + + # Attention / attention-like modules w/ significant params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'sk': + module_cls = SelectiveKernel + elif attn_type == 'splat': + module_cls = SplitAttn + + # Self-attention / attention-like modules w/ significant compute and/or params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'lambda': + return LambdaLayer + elif attn_type == 'bottleneck': + return BottleneckAttn + elif attn_type == 'halo': + return HaloAttn + elif attn_type == 'nl': + module_cls = NonLocalAttn + elif attn_type == 'bat': + module_cls = BatNonLocalAttn + + # Woops! + else: + assert False, "Invalid attn module (%s)" % attn_type + elif isinstance(attn_type, bool): + if attn_type: + module_cls = SEModule + else: + module_cls = attn_type + return module_cls + + +def create_attn(attn_type, channels, **kwargs): + module_cls = get_attn(attn_type) + if module_cls is not None: + # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels + return module_cls(channels, **kwargs) + return None diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/create_conv2d.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/create_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0cc03a5c8c23fe047d1d3c24782700422e2e6e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/create_conv2d.py @@ -0,0 +1,31 @@ +""" Create Conv2d Factory Method + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from .mixed_conv2d import MixedConv2d +from .cond_conv2d import CondConv2d +from .conv2d_same import create_conv2d_pad + + +def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + assert 'groups' not in kwargs # MixedConv groups are defined by kernel list + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 + groups = in_channels if depthwise else kwargs.pop('groups', 1) + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + return m diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/create_norm_act.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/create_norm_act.py new file mode 100644 index 0000000000000000000000000000000000000000..5b5629457dc14b5da3b9673b7e21d7d80f7cda4c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/create_norm_act.py @@ -0,0 +1,83 @@ +""" NormAct (Normalizaiton + Activation Layer) Factory + +Create norm + act combo modules that attempt to be backwards compatible with separate norm + act +isntances in models. Where these are used it will be possible to swap separate BN + act layers with +combined modules like IABN or EvoNorms. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import types +import functools + +import torch +import torch.nn as nn + +from .evo_norm import EvoNormBatch2d, EvoNormSample2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .inplace_abn import InplaceAbn + +_NORM_ACT_TYPES = {BatchNormAct2d, GroupNormAct, EvoNormBatch2d, EvoNormSample2d, InplaceAbn} +_NORM_ACT_REQUIRES_ARG = {BatchNormAct2d, GroupNormAct, InplaceAbn} # requires act_layer arg to define act type + + +def get_norm_act_layer(layer_class): + layer_class = layer_class.replace('_', '').lower() + if layer_class.startswith("batchnorm"): + layer = BatchNormAct2d + elif layer_class.startswith("groupnorm"): + layer = GroupNormAct + elif layer_class == "evonormbatch": + layer = EvoNormBatch2d + elif layer_class == "evonormsample": + layer = EvoNormSample2d + elif layer_class == "iabn" or layer_class == "inplaceabn": + layer = InplaceAbn + else: + assert False, "Invalid norm_act layer (%s)" % layer_class + return layer + + +def create_norm_act(layer_type, num_features, apply_act=True, jit=False, **kwargs): + layer_parts = layer_type.split('-') # e.g. batchnorm-leaky_relu + assert len(layer_parts) in (1, 2) + layer = get_norm_act_layer(layer_parts[0]) + #activation_class = layer_parts[1].lower() if len(layer_parts) > 1 else '' # FIXME support string act selection? + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + if jit: + layer_instance = torch.jit.script(layer_instance) + return layer_instance + + +def convert_norm_act(norm_layer, act_layer): + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) + norm_act_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_act_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + norm_act_layer = get_norm_act_layer(norm_layer) + elif norm_layer in _NORM_ACT_TYPES: + norm_act_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, must be a lambda/fn that creates a norm_act layer + norm_act_layer = norm_layer + else: + type_name = norm_layer.__name__.lower() + if type_name.startswith('batchnorm'): + norm_act_layer = BatchNormAct2d + elif type_name.startswith('groupnorm'): + norm_act_layer = GroupNormAct + else: + assert False, f"No equivalent norm_act layer for {type_name}" + + if norm_act_layer in _NORM_ACT_REQUIRES_ARG: + # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. + # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types + norm_act_kwargs.setdefault('act_layer', act_layer) + if norm_act_kwargs: + norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args + return norm_act_layer diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/drop.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/drop.py new file mode 100644 index 0000000000000000000000000000000000000000..6de9e3f729f7f1ca29d4511f6c64733d3169fbec --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/drop.py @@ -0,0 +1,168 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def drop_block_2d( + x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, + with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, + gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + if batchwise: + # one mask for whole batch, quite a bit faster + block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma + else: + # mask per batch element + block_mask = torch.rand_like(x) < gamma + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + def __init__(self, + drop_prob=0.1, + block_size=7, + gamma_scale=1.0, + with_noise=False, + inplace=False, + batchwise=False, + fast=True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/eca.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/eca.py new file mode 100644 index 0000000000000000000000000000000000000000..e29be6ac3c95bb61229cdcdd659ec89d541f1a53 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/evo_norm.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/evo_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..9023afd0e81dc8a76871d03141866217d59f4770 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/evo_norm.py @@ -0,0 +1,83 @@ +"""EvoNormB0 (Batched) and EvoNormS0 (Sample) in PyTorch + +An attempt at getting decent performing EvoNorms running in PyTorch. +While currently faster than other impl, still quite a ways off the built-in BN +in terms of memory usage and throughput (roughly 5x mem, 1/2 - 1/3x speed). + +Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn + + +class EvoNormBatch2d(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, drop_block=None): + super(EvoNormBatch2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + param_shape = (1, num_features, 1, 1) + self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True) + self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True) + if apply_act: + self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True) + self.register_buffer('running_var', torch.ones(1, num_features, 1, 1)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.apply_act: + nn.init.ones_(self.v) + + def forward(self, x): + assert x.dim() == 4, 'expected 4D input' + x_type = x.dtype + if self.training: + var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True) + n = x.numel() / x.shape[1] + self.running_var.copy_( + var.detach() * self.momentum * (n / (n - 1)) + self.running_var * (1 - self.momentum)) + else: + var = self.running_var + + if self.apply_act: + v = self.v.to(dtype=x_type) + d = x * v + (x.var(dim=(2, 3), unbiased=False, keepdim=True) + self.eps).sqrt().to(dtype=x_type) + d = d.max((var + self.eps).sqrt().to(dtype=x_type)) + x = x / d + return x * self.weight + self.bias + + +class EvoNormSample2d(nn.Module): + def __init__(self, num_features, apply_act=True, groups=8, eps=1e-5, drop_block=None): + super(EvoNormSample2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.groups = groups + self.eps = eps + param_shape = (1, num_features, 1, 1) + self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True) + self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True) + if apply_act: + self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.apply_act: + nn.init.ones_(self.v) + + def forward(self, x): + assert x.dim() == 4, 'expected 4D input' + B, C, H, W = x.shape + assert C % self.groups == 0 + if self.apply_act: + n = x * (x * self.v).sigmoid() + x = x.reshape(B, self.groups, -1) + x = n.reshape(B, self.groups, -1) / (x.var(dim=-1, unbiased=False, keepdim=True) + self.eps).sqrt() + x = x.reshape(B, C, H, W) + return x * self.weight + self.bias diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/gather_excite.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/gather_excite.py new file mode 100644 index 0000000000000000000000000000000000000000..2d60dc961e2b5e135d38e290b8fa5820ef0fe18f --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/gather_excite.py @@ -0,0 +1,90 @@ +""" Gather-Excite Attention Block + +Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 + +Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet + +I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another +impl that covers all of the cases. + +NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math + +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .create_conv2d import create_conv2d +from .helpers import make_divisible +from .mlp import ConvMlp + + +class GatherExcite(nn.Module): + """ Gather-Excite Attention Module + """ + def __init__( + self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, + rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): + super(GatherExcite, self).__init__() + self.add_maxpool = add_maxpool + act_layer = get_act_layer(act_layer) + self.extent = extent + if extra_params: + self.gather = nn.Sequential() + if extent == 0: + assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' + self.gather.add_module( + 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) + else: + assert extent % 2 == 0 + num_conv = int(math.log2(extent)) + for i in range(num_conv): + self.gather.add_module( + f'conv{i + 1}', + create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) + if i != num_conv - 1: + self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) + else: + self.gather = None + if self.extent == 0: + self.gk = 0 + self.gs = 0 + else: + assert extent % 2 == 0 + self.gk = self.extent * 2 - 1 + self.gs = self.extent + + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + size = x.shape[-2:] + if self.gather is not None: + x_ge = self.gather(x) + else: + if self.extent == 0: + # global extent + x_ge = x.mean(dim=(2, 3), keepdims=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) + else: + x_ge = F.avg_pool2d( + x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) + x_ge = self.mlp(x_ge) + if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: + x_ge = F.interpolate(x_ge, size=size) + return x * self.gate(x_ge) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/global_context.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/global_context.py new file mode 100644 index 0000000000000000000000000000000000000000..de7fb5c15f08a5c2fe42cb7c174fff92d6b0d3bf --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/global_context.py @@ -0,0 +1,67 @@ +""" Global Context Attention Block + +Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` + - https://arxiv.org/abs/1904.11492 + +Official code consulted as reference: https://github.com/xvjiarui/GCNet + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible +from .mlp import ConvMlp +from .norm import LayerNorm2d + + +class GlobalContext(nn.Module): + + def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, + rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): + super(GlobalContext, self).__init__() + act_layer = get_act_layer(act_layer) + + self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None + + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + if fuse_add: + self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_add = None + if fuse_scale: + self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_scale = None + + self.gate = create_act_layer(gate_layer) + self.init_last_zero = init_last_zero + self.reset_parameters() + + def reset_parameters(self): + if self.conv_attn is not None: + nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') + if self.mlp_add is not None: + nn.init.zeros_(self.mlp_add.fc2.weight) + + def forward(self, x): + B, C, H, W = x.shape + + if self.conv_attn is not None: + attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) + attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) + context = x.reshape(B, C, H * W).unsqueeze(1) @ attn + context = context.view(B, C, 1, 1) + else: + context = x.mean(dim=(2, 3), keepdim=True) + + if self.mlp_scale is not None: + mlp_x = self.mlp_scale(context) + x = x * self.gate(mlp_x) + if self.mlp_add is not None: + mlp_x = self.mlp_add(context) + x = x + mlp_x + + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/halo_attn.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/halo_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..d298fc0b02af8ab707f23bbf5aa5be38f68e86cd --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/halo_attn.py @@ -0,0 +1,179 @@ +""" Halo Self Attention + +Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + +@misc{2103.12731, +Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and + Jonathon Shlens}, +Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, +Year = {2021}, +} + +Status: +This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. +The attention mechanism works but it's slow as implemented. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Tuple, List + +import torch +from torch import nn +import torch.nn.functional as F + +from .weight_init import trunc_normal_ + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, height, width, dim) + rel_k: (2 * window - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + rel_size = rel_k.shape[0] + win_size = (rel_size + 1) // 2 + + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, rel_size) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, rel_size - W]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, rel_size) + x = x_pad[:, :W, win_size - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + """ + def __init__(self, block_size, win_size, dim_head, scale): + """ + Args: + block_size (int): block size + win_size (int): neighbourhood window size + dim_head (int): attention head dim + scale (float): scale factor (for init) + """ + super().__init__() + self.block_size = block_size + self.dim_head = dim_head + self.scale = scale + self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * self.scale) + self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * self.scale) + + def forward(self, q): + B, BB, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, BB, HW, -1) + return rel_logits + + +class HaloAttn(nn.Module): + """ Halo Attention + + Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + """ + def __init__( + self, dim, dim_out=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, qkv_bias=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.stride = stride + self.num_heads = num_heads + self.dim_head = dim_head or dim // num_heads + self.dim_qk = num_heads * self.dim_head + self.dim_v = dim_out + self.block_size = block_size + self.halo_size = halo_size + self.win_size = block_size + halo_size * 2 # neighbourhood window size + self.scale = self.dim_head ** -0.5 + + # FIXME not clear if this stride behaviour is what the paper intended + # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving + # data in unfolded block form. I haven't wrapped my head around how that'd look. + self.q = nn.Conv2d(dim, self.dim_qk, 1, stride=self.stride, bias=qkv_bias) + self.kv = nn.Conv2d(dim, self.dim_qk + self.dim_v, 1, bias=qkv_bias) + + self.pos_embed = PosEmbedRel( + block_size=block_size // self.stride, win_size=self.win_size, dim_head=self.dim_head, scale=self.scale) + + self.reset_parameters() + + def reset_parameters(self): + std = self.q.weight.shape[1] ** -0.5 # fan-in + trunc_normal_(self.q.weight, std=std) + trunc_normal_(self.kv.weight, std=std) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + assert H % self.block_size == 0 + assert W % self.block_size == 0 + num_h_blocks = H // self.block_size + num_w_blocks = W // self.block_size + num_blocks = num_h_blocks * num_w_blocks + bs_stride = self.block_size // self.stride + + q = self.q(x) + # unfold + q = q.reshape(-1, self.dim_head, num_h_blocks, bs_stride, num_w_blocks, bs_stride).permute(0, 1, 3, 5, 2, 4) + # B, num_heads * dim_head * block_size ** 2, num_blocks + q = q.reshape(B * self.num_heads, self.dim_head, -1, num_blocks).transpose(1, 3) + # B * num_heads, num_blocks, block_size ** 2, dim_head + + kv = self.kv(x) + # generate overlapping windows for kv + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) + kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( + B * self.num_heads, self.dim_head + (self.dim_v // self.num_heads), num_blocks, -1).permute(0, 2, 3, 1) + # NOTE these two alternatives are equivalent, but above is the best balance of performance and clarity + # if self.stride_tricks: + # kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() + # kv = kv.as_strided(( + # B, self.dim_qk + self.dim_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), + # stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) + # else: + # kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) + # kv = kv.reshape( + # B * self.num_heads, self.dim_head + (self.dim_v // self.num_heads), -1, num_blocks).transpose(1, 3) + k, v = torch.split(kv, [self.dim_head, self.dim_v // self.num_heads], dim=-1) + # B * num_heads, num_blocks, block_size ** 2, dim_head or dim_v // num_heads + + attn_logits = (q @ k.transpose(-1, -2)) * self.scale # FIXME should usual attn scale be applied? + attn_logits = attn_logits + self.pos_embed(q) # B * num_heads, block_size ** 2, win_size ** 2 + + attn_out = attn_logits.softmax(dim=-1) + attn_out = (attn_out @ v).transpose(1, 3) # B * num_heads, dim_v // num_heads, block_size ** 2, num_blocks + + # fold + attn_out = attn_out.reshape(-1, bs_stride, bs_stride, num_h_blocks, num_w_blocks) + attn_out = attn_out.permute(0, 3, 1, 4, 2).contiguous().view(B, self.dim_v, H // self.stride, W // self.stride) + # B, dim_out, H // stride, W // stride + return attn_out diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/helpers.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..cc54ca7f8a24de7e1ee0e5d27decf3e88c55ece3 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/helpers.py @@ -0,0 +1,31 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/inplace_abn.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/inplace_abn.py new file mode 100644 index 0000000000000000000000000000000000000000..3aae7cf563edfe6c9d2bf1a9f3994d911aacea23 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/inplace_abn.py @@ -0,0 +1,87 @@ +import torch +from torch import nn as nn + +try: + from inplace_abn.functions import inplace_abn, inplace_abn_sync + has_iabn = True +except ImportError: + has_iabn = False + + def inplace_abn(x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): + raise ImportError( + "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") + + def inplace_abn_sync(**kwargs): + inplace_abn(**kwargs) + + +class InplaceAbn(nn.Module): + """Activated Batch Normalization + + This gathers a BatchNorm and an activation function in a single module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + act_layer : str or nn.Module type + Name or type of the activation functions, one of: `leaky_relu`, `elu` + act_param : float + Negative slope for the `leaky_relu` activation. + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, + act_layer="leaky_relu", act_param=0.01, drop_block=None): + super(InplaceAbn, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + if apply_act: + if isinstance(act_layer, str): + assert act_layer in ('leaky_relu', 'elu', 'identity', '') + self.act_name = act_layer if act_layer else 'identity' + else: + # convert act layer passed as type to string + if act_layer == nn.ELU: + self.act_name = 'elu' + elif act_layer == nn.LeakyReLU: + self.act_name = 'leaky_relu' + elif act_layer == nn.Identity: + self.act_name = 'identity' + else: + assert False, f'Invalid act layer {act_layer.__name__} for IABN' + else: + self.act_name = 'identity' + self.act_param = act_param + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + output = inplace_abn( + x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.act_name, self.act_param) + if isinstance(output, tuple): + output = output[0] + return output diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/lambda_layer.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/lambda_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..d298c1aa7bd1e94b395f35b473f260402dd357aa --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/lambda_layer.py @@ -0,0 +1,86 @@ +""" Lambda Layer + +Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + +@misc{2102.08602, +Author = {Irwan Bello}, +Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, +Year = {2021}, +} + +Status: +This impl is a WIP. Code snippets in the paper were used as reference but +good chance some details are missing/wrong. + +I've only implemented local lambda conv based pos embeddings. + +For a PyTorch impl that includes other embedding options checkout +https://github.com/lucidrains/lambda-networks + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from torch import nn +import torch.nn.functional as F + +from .weight_init import trunc_normal_ + + +class LambdaLayer(nn.Module): + """Lambda Layer w/ lambda conv position embedding + + Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + """ + def __init__( + self, + dim, dim_out=None, stride=1, num_heads=4, dim_head=16, r=7, qkv_bias=False): + super().__init__() + self.dim = dim + self.dim_out = dim_out or dim + self.dim_k = dim_head # query depth 'k' + self.num_heads = num_heads + assert self.dim_out % num_heads == 0, ' should be divided by num_heads' + self.dim_v = self.dim_out // num_heads # value depth 'v' + self.r = r # relative position neighbourhood (lambda conv kernel size) + + self.qkv = nn.Conv2d( + dim, + num_heads * dim_head + dim_head + self.dim_v, + kernel_size=1, bias=qkv_bias) + self.norm_q = nn.BatchNorm2d(num_heads * dim_head) + self.norm_v = nn.BatchNorm2d(self.dim_v) + + # NOTE currently only supporting the local lambda convolutions for positional + self.conv_lambda = nn.Conv3d(1, dim_head, (r, r, 1), padding=(r // 2, r // 2, 0)) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.dim ** -0.5) + trunc_normal_(self.conv_lambda.weight, std=self.dim_k ** -0.5) + + def forward(self, x): + B, C, H, W = x.shape + M = H * W + + qkv = self.qkv(x) + q, k, v = torch.split(qkv, [ + self.num_heads * self.dim_k, self.dim_k, self.dim_v], dim=1) + q = self.norm_q(q).reshape(B, self.num_heads, self.dim_k, M).transpose(-1, -2) # B, num_heads, M, K + v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V + k = F.softmax(k.reshape(B, self.dim_k, M), dim=-1) # B, K, M + + content_lam = k @ v # B, K, V + content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V + + position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K + position_lam = position_lam.reshape(B, 1, self.dim_k, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V + position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V + + out = (content_out + position_out).transpose(3, 1).reshape(B, C, H, W) # B, C (num_heads * V), H, W + out = self.pool(out) + return out diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/linear.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..38fe3380b067ea0b275c45ffd689afdeb4598f3c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/linear.py @@ -0,0 +1,19 @@ +""" Linear layer (alternate definition) +""" +import torch +import torch.nn.functional as F +from torch import nn as nn + + +class Linear(nn.Linear): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting + weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. + """ + def forward(self, input: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting(): + bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None + return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) + else: + return F.linear(input, self.weight, self.bias) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/median_pool.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/median_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..40bd71a7a3840aaebefd2af0a99605b845054cd7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/median_pool.py @@ -0,0 +1,49 @@ +""" Median Pool +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F +from .helpers import to_2tuple, to_4tuple + + +class MedianPool2d(nn.Module): + """ Median pool (usable as median filter when stride=1) module. + + Args: + kernel_size: size of pooling kernel, int or 2-tuple + stride: pool stride, int or 2-tuple + padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad + same: override padding and enforce same padding, boolean + """ + def __init__(self, kernel_size=3, stride=1, padding=0, same=False): + super(MedianPool2d, self).__init__() + self.k = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + self.padding = to_4tuple(padding) # convert to l, r, t, b + self.same = same + + def _padding(self, x): + if self.same: + ih, iw = x.size()[2:] + if ih % self.stride[0] == 0: + ph = max(self.k[0] - self.stride[0], 0) + else: + ph = max(self.k[0] - (ih % self.stride[0]), 0) + if iw % self.stride[1] == 0: + pw = max(self.k[1] - self.stride[1], 0) + else: + pw = max(self.k[1] - (iw % self.stride[1]), 0) + pl = pw // 2 + pr = pw - pl + pt = ph // 2 + pb = ph - pt + padding = (pl, pr, pt, pb) + else: + padding = self.padding + return padding + + def forward(self, x): + x = F.pad(x, self._padding(x), mode='reflect') + x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) + x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/mixed_conv2d.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/mixed_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..fa0ce565c0a9d348d4e68165960fa77fcf7f70d7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/mixed_conv2d.py @@ -0,0 +1,51 @@ +""" PyTorch Mixed Convolution + +Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn + +from .conv2d_same import create_conv2d_pad + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = in_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/mlp.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..05d076527cfb6f15bcf5f2830fa36777abbc5a1e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/mlp.py @@ -0,0 +1,108 @@ +""" MLP module w/ dropout and configurable activation layer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class GluMlp(nn.Module): + """ MLP w/ GLU style gating + See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + assert hidden_features % 2 == 0 + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features // 2, out_features) + self.drop = nn.Dropout(drop) + + def init_weights(self): + # override init of fc1 w/ gate portion set to weight near zero, bias=1 + fc1_mid = self.fc1.bias.shape[0] // 2 + nn.init.ones_(self.fc1.bias[fc1_mid:]) + nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x, gates = x.chunk(2, dim=-1) + x = x * self.act(gates) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class GatedMlp(nn.Module): + """ MLP as used in gMLP + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, + gate_layer=None, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + if gate_layer is not None: + assert hidden_features % 2 == 0 + self.gate = gate_layer(hidden_features) + hidden_features = hidden_features // 2 # FIXME base reduction on gate property? + else: + self.gate = nn.Identity() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.gate(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class ConvMlp(nn.Module): + """ MLP using 1x1 convs that keeps spatial dims + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) + self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() + self.act = act_layer() + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.norm(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/non_local_attn.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/non_local_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..a537d60e6e575f093b93a146b83fb8e6398f6288 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/non_local_attn.py @@ -0,0 +1,143 @@ +""" Bilinear-Attention-Transform and Non-Local Attention + +Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` + - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html +Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification +""" +import torch +from torch import nn +from torch.nn import functional as F + +from .conv_bn_act import ConvBnAct +from .helpers import make_divisible + + +class NonLocalAttn(nn.Module): + """Spatial NL block for image classification. + + This was adapted from https://github.com/BA-Transform/BAT-Image-Classification + Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. + """ + + def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): + super(NonLocalAttn, self).__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.scale = in_channels ** -0.5 if use_scale else 1.0 + self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) + self.norm = nn.BatchNorm2d(in_channels) + self.reset_parameters() + + def forward(self, x): + shortcut = x + + t = self.t(x) + p = self.p(x) + g = self.g(x) + + B, C, H, W = t.size() + t = t.view(B, C, -1).permute(0, 2, 1) + p = p.view(B, C, -1) + g = g.view(B, C, -1).permute(0, 2, 1) + + att = torch.bmm(t, p) * self.scale + att = F.softmax(att, dim=2) + x = torch.bmm(att, g) + + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.z(x) + x = self.norm(x) + shortcut + + return x + + def reset_parameters(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + if len(list(m.parameters())) > 1: + nn.init.constant_(m.bias, 0.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + + +class BilinearAttnTransform(nn.Module): + + def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(BilinearAttnTransform, self).__init__() + + self.conv1 = ConvBnAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) + self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) + self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) + self.conv2 = ConvBnAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.block_size = block_size + self.groups = groups + self.in_channels = in_channels + + def resize_mat(self, x, t: int): + B, C, block_size, block_size1 = x.shape + assert block_size == block_size1 + if t <= 1: + return x + x = x.view(B * C, -1, 1, 1) + x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) + x = x.view(B * C, block_size, block_size, t, t) + x = torch.cat(torch.split(x, 1, dim=1), dim=3) + x = torch.cat(torch.split(x, 1, dim=2), dim=4) + x = x.view(B, C, block_size * t, block_size * t) + return x + + def forward(self, x): + assert x.shape[-1] % self.block_size == 0 and x.shape[-2] % self.block_size == 0 + B, C, H, W = x.shape + out = self.conv1(x) + rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) + cp = F.adaptive_max_pool2d(out, (1, self.block_size)) + p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + p = p / p.sum(dim=3, keepdim=True) + q = q / q.sum(dim=2, keepdim=True) + p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + p = p.view(B, C, self.block_size, self.block_size) + q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + q = q.view(B, C, self.block_size, self.block_size) + p = self.resize_mat(p, H // self.block_size) + q = self.resize_mat(q, W // self.block_size) + y = p.matmul(x) + y = y.matmul(q) + + y = self.conv2(y) + return y + + +class BatNonLocalAttn(nn.Module): + """ BAT + Adapted from: https://github.com/BA-Transform/BAT-Image-Classification + """ + + def __init__( + self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): + super().__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.conv1 = ConvBnAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) + self.conv2 = ConvBnAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.dropout = nn.Dropout2d(p=drop_rate) + + def forward(self, x): + xl = self.conv1(x) + y = self.ba(xl) + y = self.conv2(y) + y = self.dropout(y) + return y + x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/norm.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/norm.py new file mode 100644 index 0000000000000000000000000000000000000000..aace107b08e66cd2925ddbf4a5a4491a5899963b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/norm.py @@ -0,0 +1,24 @@ +""" Normalization layers and wrappers +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class GroupNorm(nn.GroupNorm): + def __init__(self, num_channels, num_groups, eps=1e-5, affine=True): + # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN + super().__init__(num_groups, num_channels, eps=eps, affine=affine) + + def forward(self, x): + return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + + +class LayerNorm2d(nn.LayerNorm): + """ LayerNorm for channels of '2D' spatial BCHW tensors """ + def __init__(self, num_channels): + super().__init__(num_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/norm_act.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/norm_act.py new file mode 100644 index 0000000000000000000000000000000000000000..02cabe88861f96345599b71a4a96edd8d115f6d3 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/norm_act.py @@ -0,0 +1,85 @@ +""" Normalization + Activation Layers +""" +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .create_act import get_act_layer + + +class BatchNormAct2d(nn.BatchNorm2d): + """BatchNorm + Activation + + This module performs BatchNorm + Activation in a manner that will remain backwards + compatible with weights trained with separate bn, act. This is why we inherit from BN + instead of composing it as a .bn member. + """ + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None): + super(BatchNormAct2d, self).__init__( + num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def _forward_jit(self, x): + """ A cut & paste of the contents of the PyTorch BatchNorm2d forward function + """ + # exponential_average_factor is self.momentum set to + # (when it is available) only so that if gets updated + # in ONNX graph when this node is exported to ONNX. + if self.momentum is None: + exponential_average_factor = 0.0 + else: + exponential_average_factor = self.momentum + + if self.training and self.track_running_stats: + # TODO: if statement only here to tell the jit to skip emitting this when it is None + if self.num_batches_tracked is not None: + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + x = F.batch_norm( + x, self.running_mean, self.running_var, self.weight, self.bias, + self.training or not self.track_running_stats, + exponential_average_factor, self.eps) + return x + + @torch.jit.ignore + def _forward_python(self, x): + return super(BatchNormAct2d, self).forward(x) + + def forward(self, x): + # FIXME cannot call parent forward() and maintain jit.script compatibility? + if torch.jit.is_scripting(): + x = self._forward_jit(x) + else: + x = self._forward_python(x) + x = self.act(x) + return x + + +class GroupNormAct(nn.GroupNorm): + # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args + def __init__(self, num_channels, num_groups, eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None): + super(GroupNormAct, self).__init__(num_groups, num_channels, eps=eps, affine=affine) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + x = self.act(x) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/padding.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/padding.py new file mode 100644 index 0000000000000000000000000000000000000000..34afc37c6c59c8782ad29c7a779f58177011f891 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/padding.py @@ -0,0 +1,56 @@ +""" Padding Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from typing import List, Tuple + +import torch.nn.functional as F + + +# Calculate symmetric padding for a convolution +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + + +# Can SAME padding for given args be done statically? +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +# Dynamically pad input x with 'SAME' padding for conv with specified args +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/patch_embed.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/patch_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..42997fb89f10d518028e064c46387f694dce9026 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/patch_embed.py @@ -0,0 +1,39 @@ +""" Image to Patch Embedding using Conv2d + +A convolution based approach to patchifying a 2D image w/ embedding projection. + +Based on the impl in https://github.com/google-research/vision_transformer + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from torch import nn as nn + +from .helpers import to_2tuple + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/pool2d_same.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/pool2d_same.py new file mode 100644 index 0000000000000000000000000000000000000000..4c2a1c44713e552be850865ada9623a1c3b1d836 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/pool2d_same.py @@ -0,0 +1,73 @@ +""" AvgPool2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import List, Tuple, Optional + +from .helpers import to_2tuple +from .padding import pad_same, get_padding_value + + +def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + ceil_mode: bool = False, count_include_pad: bool = True): + # FIXME how to deal with count_include_pad vs not for external padding? + x = pad_same(x, kernel_size, stride) + return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + +class AvgPool2dSame(nn.AvgPool2d): + """ Tensorflow like 'SAME' wrapper for 2D average pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride) + return F.avg_pool2d( + x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) + + +def max_pool2d_same( + x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + dilation: List[int] = (1, 1), ceil_mode: bool = False): + x = pad_same(x, kernel_size, stride, value=-float('inf')) + return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) + + +class MaxPool2dSame(nn.MaxPool2d): + """ Tensorflow like 'SAME' wrapper for 2D max pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) + return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) + + +def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): + stride = stride or kernel_size + padding = kwargs.pop('padding', '') + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) + if is_dynamic: + if pool_type == 'avg': + return AvgPool2dSame(kernel_size, stride=stride, **kwargs) + elif pool_type == 'max': + return MaxPool2dSame(kernel_size, stride=stride, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' + else: + if pool_type == 'avg': + return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + elif pool_type == 'max': + return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/selective_kernel.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/selective_kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..f28b8d2e9ad49740081d4e1da5287e45f5ee76b8 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/selective_kernel.py @@ -0,0 +1,119 @@ +""" Selective Kernel Convolution/Attention + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn + +from .conv_bn_act import ConvBnAct +from .helpers import make_divisible + + +def _kernel_valid(k): + if isinstance(k, (list, tuple)): + for ki in k: + return _kernel_valid(ki) + assert k >= 3 and k % 2 + + +class SelectiveKernelAttn(nn.Module): + def __init__(self, channels, num_paths=2, attn_channels=32, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + """ Selective Kernel Attention Module + + Selective Kernel attention mechanism factored out into its own module. + + """ + super(SelectiveKernelAttn, self).__init__() + self.num_paths = num_paths + self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) + self.bn = norm_layer(attn_channels) + self.act = act_layer(inplace=True) + self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) + + def forward(self, x): + assert x.shape[1] == self.num_paths + x = x.sum(1).mean((2, 3), keepdim=True) + x = self.fc_reduce(x) + x = self.bn(x) + x = self.act(x) + x = self.fc_select(x) + B, C, H, W = x.shape + x = x.view(B, self.num_paths, C // self.num_paths, H, W) + x = torch.softmax(x, dim=1) + return x + + +class SelectiveKernel(nn.Module): + + def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, + rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, + drop_block=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None): + """ Selective Kernel Convolution Module + + As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. + + Largest change is the input split, which divides the input channels across each convolution path, this can + be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps + the parameter count from ballooning when the convolutions themselves don't have groups, but still provides + a noteworthy increase in performance over similar param count models without this attention layer. -Ross W + + Args: + in_channels (int): module input (feature) channel count + out_channels (int): module output (feature) channel count + kernel_size (int, list): kernel size for each convolution branch + stride (int): stride for convolutions + dilation (int): dilation for module as a whole, impacts dilation of each branch + groups (int): number of groups for each branch + rd_ratio (int, float): reduction factor for attention features + keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations + split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, + can be viewed as grouping by path, output expands to module out_channels count + drop_block (nn.Module): drop block module + act_layer (nn.Module): activation layer to use + norm_layer (nn.Module): batchnorm/norm layer to use + """ + super(SelectiveKernel, self).__init__() + out_channels = out_channels or in_channels + kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation + _kernel_valid(kernel_size) + if not isinstance(kernel_size, list): + kernel_size = [kernel_size] * 2 + if keep_3x3: + dilation = [dilation * (k - 1) // 2 for k in kernel_size] + kernel_size = [3] * len(kernel_size) + else: + dilation = [dilation] * len(kernel_size) + self.num_paths = len(kernel_size) + self.in_channels = in_channels + self.out_channels = out_channels + self.split_input = split_input + if self.split_input: + assert in_channels % self.num_paths == 0 + in_channels = in_channels // self.num_paths + groups = min(out_channels, groups) + + conv_kwargs = dict( + stride=stride, groups=groups, drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, + aa_layer=aa_layer) + self.paths = nn.ModuleList([ + ConvBnAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) + for k, d in zip(kernel_size, dilation)]) + + attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) + self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) + self.drop_block = drop_block + + def forward(self, x): + if self.split_input: + x_split = torch.split(x, self.in_channels // self.num_paths, 1) + x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] + else: + x_paths = [op(x) for op in self.paths] + x = torch.stack(x_paths, dim=1) + x_attn = self.attn(x) + x = x * x_attn + x = torch.sum(x, dim=1) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/separable_conv.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/separable_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..1ddcb4e62409492f898ab963027a9c2229b72f64 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/separable_conv.py @@ -0,0 +1,73 @@ +""" Depthwise Separable Conv Modules + +Basic DWS convs. Other variations of DWS exist with batch norm or activations between the +DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import convert_norm_act + + +class SeparableConvBnAct(nn.Module): + """ Separable Conv w/ trailing Norm and Activation + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, + apply_act=True, drop_block=None): + super(SeparableConvBnAct, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + norm_act_layer = convert_norm_act(norm_layer, act_layer) + self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + if self.bn is not None: + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + """ Separable Conv + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1): + super(SeparableConv2d, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/space_to_depth.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/space_to_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e8e0b2a486d51fe3e4ab0472d89b7f1b92e1dc --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/space_to_depth.py @@ -0,0 +1,53 @@ +import torch +import torch.nn as nn + + +class SpaceToDepth(nn.Module): + def __init__(self, block_size=4): + super().__init__() + assert block_size == 4 + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) + return x + + +@torch.jit.script +class SpaceToDepthJit(object): + def __call__(self, x: torch.Tensor): + # assuming hard-coded that block_size==4 for acceleration + N, C, H, W = x.size() + x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) + return x + + +class SpaceToDepthModule(nn.Module): + def __init__(self, no_jit=False): + super().__init__() + if not no_jit: + self.op = SpaceToDepthJit() + else: + self.op = SpaceToDepth() + + def forward(self, x): + return self.op(x) + + +class DepthToSpace(nn.Module): + + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) + x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) + return x diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/split_attn.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/split_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..dde601befa933727e169d9b84b035cf1f035e67c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/split_attn.py @@ -0,0 +1,85 @@ +""" Split Attention Conv2d (for ResNeSt Models) + +Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt + +Modified for torchscript compat, performance, and consistency with timm by Ross Wightman +""" +import torch +import torch.nn.functional as F +from torch import nn + +from .helpers import make_divisible + + +class RadixSoftmax(nn.Module): + def __init__(self, radix, cardinality): + super(RadixSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttn(nn.Module): + """Split-Attention (aka Splat) + """ + def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, + dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + act_layer=nn.ReLU, norm_layer=None, drop_block=None, **kwargs): + super(SplitAttn, self).__init__() + out_channels = out_channels or in_channels + self.radix = radix + self.drop_block = drop_block + mid_chs = out_channels * radix + if rd_channels is None: + attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) + else: + attn_chs = rd_channels * radix + + padding = kernel_size // 2 if padding is None else padding + self.conv = nn.Conv2d( + in_channels, mid_chs, kernel_size, stride, padding, dilation, + groups=groups * radix, bias=bias, **kwargs) + self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() + self.act0 = act_layer(inplace=True) + self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) + self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() + self.act1 = act_layer(inplace=True) + self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) + self.rsoftmax = RadixSoftmax(radix, groups) + + def forward(self, x): + x = self.conv(x) + x = self.bn0(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act0(x) + + B, RC, H, W = x.shape + if self.radix > 1: + x = x.reshape((B, self.radix, RC // self.radix, H, W)) + x_gap = x.sum(dim=1) + else: + x_gap = x + x_gap = x_gap.mean((2, 3), keepdim=True) + x_gap = self.fc1(x_gap) + x_gap = self.bn1(x_gap) + x_gap = self.act1(x_gap) + x_attn = self.fc2(x_gap) + + x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) + if self.radix > 1: + out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) + else: + out = x * x_attn + return out.contiguous() diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/split_batchnorm.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/split_batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..830781b335161f8d6dd74c9458070bb1fa88a918 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/split_batchnorm.py @@ -0,0 +1,75 @@ +""" Split BatchNorm + +A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through +a separate BN layer. The first split is passed through the parent BN layers with weight/bias +keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' +namespace. + +This allows easily removing the auxiliary BN layers after training to efficiently +achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, +'Disentangled Learning via An Auxiliary BN' + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn + + +class SplitBatchNorm2d(torch.nn.BatchNorm2d): + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, num_splits=2): + super().__init__(num_features, eps, momentum, affine, track_running_stats) + assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' + self.num_splits = num_splits + self.aux_bn = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) + + def forward(self, input: torch.Tensor): + if self.training: # aux BN only relevant while training + split_size = input.shape[0] // self.num_splits + assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" + split_input = input.split(split_size) + x = [super().forward(split_input[0])] + for i, a in enumerate(self.aux_bn): + x.append(a(split_input[i + 1])) + return torch.cat(x, dim=0) + else: + return super().forward(input) + + +def convert_splitbn_model(module, num_splits=2): + """ + Recursively traverse module and its children to replace all instances of + ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. + Args: + module (torch.nn.Module): input module + num_splits: number of separate batchnorm layers to split input across + Example:: + >>> # model is an instance of torch.nn.Module + >>> model = timm.models.convert_splitbn_model(model, num_splits=2) + """ + mod = module + if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): + return module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + mod = SplitBatchNorm2d( + module.num_features, module.eps, module.momentum, module.affine, + module.track_running_stats, num_splits=num_splits) + mod.running_mean = module.running_mean + mod.running_var = module.running_var + mod.num_batches_tracked = module.num_batches_tracked + if module.affine: + mod.weight.data = module.weight.data.clone().detach() + mod.bias.data = module.bias.data.clone().detach() + for aux in mod.aux_bn: + aux.running_mean = module.running_mean.clone() + aux.running_var = module.running_var.clone() + aux.num_batches_tracked = module.num_batches_tracked.clone() + if module.affine: + aux.weight.data = module.weight.data.clone().detach() + aux.bias.data = module.bias.data.clone().detach() + for name, child in module.named_children(): + mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) + del module + return mod diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/squeeze_excite.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/squeeze_excite.py new file mode 100644 index 0000000000000000000000000000000000000000..e5da29ef166de27705cc160f729b6e3b45061c59 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/squeeze_excite.py @@ -0,0 +1,74 @@ +""" Squeeze-and-Excitation Channel Attention + +An SE implementation originally based on PyTorch SE-Net impl. +Has since evolved with additional functionality / configuration. + +Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + +Also included is Effective Squeeze-Excitation (ESE). +Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class SEModule(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): + super(SEModule, self).__init__() + self.add_maxpool = add_maxpool + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=True) + self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc1(x_se) + x_se = self.act(self.bn(x_se)) + x_se = self.fc2(x_se) + return x * self.gate(x_se) + + +SqueezeExcite = SEModule # alias + + +class EffectiveSEModule(nn.Module): + """ 'Effective Squeeze-Excitation + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + """ + def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): + super(EffectiveSEModule, self).__init__() + self.add_maxpool = add_maxpool + self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.gate(x_se) + + +EffectiveSqueezeExcite = EffectiveSEModule # alias diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/std_conv.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/std_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..3ccc16e1197a41440add454a40ed3146ed0b6211 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/std_conv.py @@ -0,0 +1,133 @@ +""" Convolution with Weight Standardization (StdConv and ScaledStdConv) + +StdConv: +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +Code: https://github.com/joe-siyuan-qiao/WeightStandardization + +ScaledStdConv: +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Hacked together by / copyright Ross Wightman, 2021. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .padding import get_padding, get_padding_value, pad_same + + +class StdConv2d(nn.Conv2d): + """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=False, eps=1e-6): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.view(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class StdConv2dSame(nn.Conv2d): + """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=False, eps=1e-6): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.view(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class ScaledStdConv2d(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization. + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.view(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +class ScaledStdConv2dSame(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.view(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/test_time_pool.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/test_time_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..98c0bf53a74eb954a25b96d84712ef974eb8ea3b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/test_time_pool.py @@ -0,0 +1,52 @@ +""" Test Time Pooling (Average-Max Pool) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +from torch import nn +import torch.nn.functional as F + +from .adaptive_avgmax_pool import adaptive_avgmax_pool2d + + +_logger = logging.getLogger(__name__) + + +class TestTimePoolHead(nn.Module): + def __init__(self, base, original_pool=7): + super(TestTimePoolHead, self).__init__() + self.base = base + self.original_pool = original_pool + base_fc = self.base.get_classifier() + if isinstance(base_fc, nn.Conv2d): + self.fc = base_fc + else: + self.fc = nn.Conv2d( + self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) + self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) + self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) + self.base.reset_classifier(0) # delete original fc layer + + def forward(self, x): + x = self.base.forward_features(x) + x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) + x = self.fc(x) + x = adaptive_avgmax_pool2d(x, 1) + return x.view(x.size(0), -1) + + +def apply_test_time_pool(model, config, use_test_size=True): + test_time_pool = False + if not hasattr(model, 'default_cfg') or not model.default_cfg: + return model, False + if use_test_size and 'test_input_size' in model.default_cfg: + df_input_size = model.default_cfg['test_input_size'] + else: + df_input_size = model.default_cfg['input_size'] + if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: + _logger.info('Target input size %s > pretrained default %s, using test time pooling' % + (str(config['input_size'][-2:]), str(df_input_size[-2:]))) + model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) + test_time_pool = True + return model, test_time_pool diff --git a/testbed/huggingface__pytorch-image-models/timm/models/layers/weight_init.py b/testbed/huggingface__pytorch-image-models/timm/models/layers/weight_init.py new file mode 100644 index 0000000000000000000000000000000000000000..305a2fd067e7104e58b9b5ff70d96e89a06050af --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/layers/weight_init.py @@ -0,0 +1,89 @@ +import torch +import math +import warnings + +from torch.nn.init import _calculate_fan_in_and_fan_out + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == 'fan_in': + denom = fan_in + elif mode == 'fan_out': + denom = fan_out + elif mode == 'fan_avg': + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) + elif distribution == "normal": + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') diff --git a/testbed/huggingface__pytorch-image-models/timm/models/levit.py b/testbed/huggingface__pytorch-image-models/timm/models/levit.py new file mode 100644 index 0000000000000000000000000000000000000000..9987e4ba987ea66d99b9627f59e55526f9ed8655 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/levit.py @@ -0,0 +1,563 @@ +""" LeViT + +Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference` + - https://arxiv.org/abs/2104.01136 + +@article{graham2021levit, + title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference}, + author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze}, + journal={arXiv preprint arXiv:22104.01136}, + year={2021} +} + +Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow. + +This version combines both conv/linear models and fixes torchscript compatibility. + +Modifications by/coyright Copyright 2021 Ross Wightman +""" + +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +# Modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# Copyright 2020 Ross Wightman, Apache-2.0 License +import itertools +from copy import deepcopy +from functools import partial +from typing import Dict + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import to_ntuple, get_act_layer +from .vision_transformer import trunc_normal_ +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.0.c', 'classifier': ('head.l', 'head_dist.l'), + **kwargs + } + + +default_cfgs = dict( + levit_128s=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth' + ), + levit_128=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth' + ), + levit_192=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth' + ), + levit_256=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth' + ), + levit_384=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth' + ), +) + +model_cfgs = dict( + levit_128s=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)), + levit_128=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)), + levit_192=dict( + embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)), + levit_256=dict( + embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)), + levit_384=dict( + embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)), +) + +__all__ = ['Levit'] + + +@register_model +def levit_128s(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128s', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_128(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_192(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_192', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_256(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_256', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_384(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_384', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +class ConvNorm(nn.Sequential): + def __init__( + self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1, resolution=-10000): + super().__init__() + self.add_module('c', nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)) + bn = nn.BatchNorm2d(b) + nn.init.constant_(bn.weight, bn_weight_init) + nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Conv2d( + w.size(1), w.size(0), w.shape[2:], stride=self.c.stride, + padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class LinearNorm(nn.Sequential): + def __init__(self, a, b, bn_weight_init=1, resolution=-100000): + super().__init__() + self.add_module('c', nn.Linear(a, b, bias=False)) + bn = nn.BatchNorm1d(b) + nn.init.constant_(bn.weight, bn_weight_init) + nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + l, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[:, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + def forward(self, x): + x = self.c(x) + return self.bn(x.flatten(0, 1)).reshape_as(x) + + +class NormLinear(nn.Sequential): + def __init__(self, a, b, bias=True, std=0.02): + super().__init__() + self.add_module('bn', nn.BatchNorm1d(a)) + l = nn.Linear(a, b, bias=bias) + trunc_normal_(l.weight, std=std) + if bias: + nn.init.constant_(l.bias, 0) + self.add_module('l', l) + + @torch.no_grad() + def fuse(self): + bn, l = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[None, :] + if l.bias is None: + b = b @ self.l.weight.T + else: + b = (l.weight @ b[:, None]).view(-1) + self.l.bias + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +def stem_b16(in_chs, out_chs, activation, resolution=224): + return nn.Sequential( + ConvNorm(in_chs, out_chs // 8, 3, 2, 1, resolution=resolution), + activation(), + ConvNorm(out_chs // 8, out_chs // 4, 3, 2, 1, resolution=resolution // 2), + activation(), + ConvNorm(out_chs // 4, out_chs // 2, 3, 2, 1, resolution=resolution // 4), + activation(), + ConvNorm(out_chs // 2, out_chs, 3, 2, 1, resolution=resolution // 8)) + + +class Residual(nn.Module): + def __init__(self, m, drop): + super().__init__() + self.m = m + self.drop = drop + + def forward(self, x): + if self.training and self.drop > 0: + return x + self.m(x) * torch.rand( + x.size(0), 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() + else: + return x + self.m(x) + + +class Subsample(nn.Module): + def __init__(self, stride, resolution): + super().__init__() + self.stride = stride + self.resolution = resolution + + def forward(self, x): + B, N, C = x.shape + x = x.view(B, self.resolution, self.resolution, C)[:, ::self.stride, ::self.stride] + return x.reshape(B, -1, C) + + +class Attention(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, dim, key_dim, num_heads=8, attn_ratio=4, act_layer=None, resolution=14, use_conv=False): + super().__init__() + + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + self.use_conv = use_conv + ln_layer = ConvNorm if self.use_conv else LinearNorm + h = self.dh + nh_kd * 2 + self.qkv = ln_layer(dim, h, resolution=resolution) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.dh, dim, bn_weight_init=0, resolution=resolution)) + + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N)) + self.ab = {} + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): # x (B,C,H,W) + if self.use_conv: + B, C, H, W = x.shape + q, k, v = self.qkv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.d], dim=2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) + else: + B, N, C = x.shape + qkv = self.qkv(x) + q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class AttentionSubsample(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=2, + act_layer=None, stride=2, resolution=14, resolution_=7, use_conv=False): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = self.d * self.num_heads + self.attn_ratio = attn_ratio + self.resolution_ = resolution_ + self.resolution_2 = resolution_ ** 2 + self.use_conv = use_conv + if self.use_conv: + ln_layer = ConvNorm + sub_layer = partial(nn.AvgPool2d, kernel_size=1, padding=0) + else: + ln_layer = LinearNorm + sub_layer = partial(Subsample, resolution=resolution) + + h = self.dh + nh_kd + self.kv = ln_layer(in_dim, h, resolution=resolution) + self.q = nn.Sequential( + sub_layer(stride=stride), + ln_layer(in_dim, nh_kd, resolution=resolution_)) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.dh, out_dim, resolution=resolution_)) + + self.stride = stride + self.resolution = resolution + points = list(itertools.product(range(resolution), range(resolution))) + points_ = list(itertools.product(range(resolution_), range(resolution_))) + N = len(points) + N_ = len(points_) + attention_offsets = {} + idxs = [] + for p1 in points_: + for p2 in points: + size = 1 + offset = ( + abs(p1[0] * stride - p2[0] + (size - 1) / 2), + abs(p1[1] * stride - p2[1] + (size - 1) / 2)) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N_, N)) + self.ab = {} # per-device attention_biases cache + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): + if self.use_conv: + B, C, H, W = x.shape + k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.d], dim=2) + q = self.q(x).view(B, self.num_heads, self.key_dim, self.resolution_2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).reshape(B, -1, self.resolution_, self.resolution_) + else: + B, N, C = x.shape + k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.d], dim=3) + k = k.permute(0, 2, 1, 3) # BHNC + v = v.permute(0, 2, 1, 3) # BHNC + q = self.q(x).view(B, self.resolution_2, self.num_heads, self.key_dim).permute(0, 2, 1, 3) + + attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, -1, self.dh) + x = self.proj(x) + return x + + +class Levit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + + NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems + w/ train scripts that don't take tuple outputs, + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=(192,), + key_dim=64, + depth=(12,), + num_heads=(3,), + attn_ratio=2, + mlp_ratio=2, + hybrid_backbone=None, + down_ops=None, + act_layer='hard_swish', + attn_act_layer='hard_swish', + distillation=True, + use_conv=False, + drop_rate=0., + drop_path_rate=0.): + super().__init__() + act_layer = get_act_layer(act_layer) + attn_act_layer = get_act_layer(attn_act_layer) + if isinstance(img_size, tuple): + # FIXME origin impl passes single img/res dim through whole hierarchy, + # not sure this model will be used enough to spend time fixing it. + assert img_size[0] == img_size[1] + img_size = img_size[0] + self.num_classes = num_classes + self.num_features = embed_dim[-1] + self.embed_dim = embed_dim + N = len(embed_dim) + assert len(depth) == len(num_heads) == N + key_dim = to_ntuple(N)(key_dim) + attn_ratio = to_ntuple(N)(attn_ratio) + mlp_ratio = to_ntuple(N)(mlp_ratio) + down_ops = down_ops or ( + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + ('Subsample', key_dim[0], embed_dim[0] // key_dim[0], 4, 2, 2), + ('Subsample', key_dim[0], embed_dim[1] // key_dim[1], 4, 2, 2), + ('',) + ) + self.distillation = distillation + self.use_conv = use_conv + ln_layer = ConvNorm if self.use_conv else LinearNorm + + self.patch_embed = hybrid_backbone or stem_b16(in_chans, embed_dim[0], activation=act_layer) + + self.blocks = [] + resolution = img_size // patch_size + for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate( + zip(embed_dim, key_dim, depth, num_heads, attn_ratio, mlp_ratio, down_ops)): + for _ in range(dpth): + self.blocks.append( + Residual( + Attention( + ed, kd, nh, attn_ratio=ar, act_layer=attn_act_layer, + resolution=resolution, use_conv=use_conv), + drop_path_rate)) + if mr > 0: + h = int(ed * mr) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(ed, h, resolution=resolution), + act_layer(), + ln_layer(h, ed, bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + if do[0] == 'Subsample': + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + resolution_ = (resolution - 1) // do[5] + 1 + self.blocks.append( + AttentionSubsample( + *embed_dim[i:i + 2], key_dim=do[1], num_heads=do[2], + attn_ratio=do[3], act_layer=attn_act_layer, stride=do[5], + resolution=resolution, resolution_=resolution_, use_conv=use_conv)) + resolution = resolution_ + if do[4] > 0: # mlp_ratio + h = int(embed_dim[i + 1] * do[4]) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(embed_dim[i + 1], h, resolution=resolution), + act_layer(), + ln_layer(h, embed_dim[i + 1], bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + self.blocks = nn.Sequential(*self.blocks) + + # Classifier head + self.head = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distillation: + self.head_dist = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def no_weight_decay(self): + return {x for x in self.state_dict().keys() if 'attention_biases' in x} + + def get_classifier(self): + if self.head_dist is None: + return self.head + else: + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool='', distillation=None): + self.num_classes = num_classes + self.head = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + if distillation is not None: + self.distillation = distillation + if self.distillation: + self.head_dist = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + else: + self.head_dist = None + + def forward_features(self, x): + x = self.patch_embed(x) + if not self.use_conv: + x = x.flatten(2).transpose(1, 2) + x = self.blocks(x) + x = x.mean((-2, -1)) if self.use_conv else x.mean(1) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x), self.head_dist(x) + if self.training and not torch.jit.is_scripting(): + return x, x_dist + else: + # during inference, return the average of both classifier predictions + return (x + x_dist) / 2 + else: + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + D = model.state_dict() + for k in state_dict.keys(): + if k in D and D[k].ndim == 4 and state_dict[k].ndim == 2: + state_dict[k] = state_dict[k][:, :, None, None] + return state_dict + + +def create_levit(variant, pretrained=False, default_cfg=None, fuse=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model_cfg = dict(**model_cfgs[variant], **kwargs) + model = build_model_with_cfg( + Levit, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **model_cfg) + #if fuse: + # utils.replace_batchnorm(model) + return model + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/mlp_mixer.py b/testbed/huggingface__pytorch-image-models/timm/models/mlp_mixer.py new file mode 100644 index 0000000000000000000000000000000000000000..f128b9c916a68839a11fbd3ed6409743ac7a6a4c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/mlp_mixer.py @@ -0,0 +1,625 @@ +""" MLP-Mixer, ResMLP, and gMLP in PyTorch + +This impl originally based on MLP-Mixer paper. + +Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py + +Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + +@article{tolstikhin2021, + title={MLP-Mixer: An all-MLP Architecture for Vision}, + author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, + Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, + journal={arXiv preprint arXiv:2105.01601}, + year={2021} +} + +Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP + +Code: https://github.com/facebookresearch/deit +Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 +@misc{touvron2021resmlp, + title={ResMLP: Feedforward networks for image classification with data-efficient training}, + author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and + Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, + year={2021}, + eprint={2105.03404}, +} + +Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 +@misc{liu2021pay, + title={Pay Attention to MLPs}, + author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, + year={2021}, + eprint={2105.08050}, +} + +A thank you to paper authors for releasing code and weights. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg, named_apply +from .layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + mixer_s32_224=_cfg(), + mixer_s16_224=_cfg(), + mixer_b32_224=_cfg(), + mixer_b16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', + ), + mixer_b16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', + num_classes=21843 + ), + mixer_l32_224=_cfg(), + mixer_l16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', + ), + mixer_l16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', + num_classes=21843 + ), + + # Mixer ImageNet-21K-P pretraining + mixer_b16_224_miil_in21k=_cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil_in21k.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + mixer_b16_224_miil=_cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', + ), + + gmixer_12_224=_cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + gmixer_24_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', + #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_big_24_224_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + gmlp_ti16_224=_cfg(), + gmlp_s16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', + ), + gmlp_b16_224=_cfg(), +) + + +class MixerBlock(nn.Module): + """ Residual Block w/ token mixing and channel MLPs + Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + def __init__( + self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] + self.norm1 = norm_layer(dim) + self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class Affine(nn.Module): + def __init__(self, dim): + super().__init__() + self.alpha = nn.Parameter(torch.ones((1, 1, dim))) + self.beta = nn.Parameter(torch.zeros((1, 1, dim))) + + def forward(self, x): + return torch.addcmul(self.beta, self.alpha, x) + + +class ResBlock(nn.Module): + """ Residual MLP block w/ LayerScale and Affine 'norm' + + Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, + act_layer=nn.GELU, init_values=1e-4, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm1 = norm_layer(dim) + self.linear_tokens = nn.Linear(seq_len, seq_len) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) + self.ls1 = nn.Parameter(init_values * torch.ones(dim)) + self.ls2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) + return x + + +class SpatialGatingUnit(nn.Module): + """ Spatial Gating Unit + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): + super().__init__() + gate_dim = dim // 2 + self.norm = norm_layer(gate_dim) + self.proj = nn.Linear(seq_len, seq_len) + + def init_weights(self): + # special init for the projection gate, called as override by base model init + nn.init.normal_(self.proj.weight, std=1e-6) + nn.init.ones_(self.proj.bias) + + def forward(self, x): + u, v = x.chunk(2, dim=-1) + v = self.norm(v) + v = self.proj(v.transpose(-1, -2)) + return u * v.transpose(-1, -2) + + +class SpatialGatingBlock(nn.Module): + """ Residual Block w/ Spatial Gating + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm = norm_layer(dim) + sgu = partial(SpatialGatingUnit, seq_len=seq_len) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.mlp_channels(self.norm(x))) + return x + + +class MlpMixer(nn.Module): + + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + patch_size=16, + num_blocks=8, + embed_dim=512, + mlp_ratio=(0.5, 4.0), + block_layer=MixerBlock, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + ): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.stem = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None) + # FIXME drop_path (stochastic depth scaling rule or all the same?) + self.blocks = nn.Sequential(*[ + block_layer( + embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, + act_layer=act_layer, drop=drop_rate, drop_path=drop_path_rate) + for _ in range(num_blocks)]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + x = self.norm(x) + x = x.mean(dim=1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + """ Mixer weight initialization (trying to match Flax defaults) + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # like MLP init in vit (my original init) + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + # NOTE if a parent module contains init_weights method, it can override the init of the + # child modules as this will be called in depth-first order. + module.init_weights() + + +def checkpoint_filter_fn(state_dict, model): + """ Remap checkpoints if needed """ + if 'patch_embed.proj.weight' in state_dict: + # Remap FB ResMlp models -> timm + out_dict = {} + for k, v in state_dict.items(): + k = k.replace('patch_embed.', 'stem.') + k = k.replace('attn.', 'linear_tokens.') + k = k.replace('mlp.', 'mlp_channels.') + k = k.replace('gamma_', 'ls') + if k.endswith('.alpha') or k.endswith('.beta'): + v = v.reshape(1, 1, -1) + out_dict[k] = v + return out_dict + return state_dict + + +def _create_mixer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for MLP-Mixer models.') + + model = build_model_with_cfg( + MlpMixer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def mixer_s32_224(pretrained=False, **kwargs): + """ Mixer-S/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_s16_224(pretrained=False, **kwargs): + """ Mixer-S/16 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b32_224(pretrained=False, **kwargs): + """ Mixer-B/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l32_224(pretrained=False, **kwargs): + """ Mixer-L/32 224x224. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224_in21k(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_12_224(pretrained=False, **kwargs): + """ Glu-Mixer-12 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_24_224(pretrained=False, **kwargs): + """ Glu-Mixer-24 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_distilled_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_distilled_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224_in22ft1k(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_ti16_224(pretrained=False, **kwargs): + """ gMLP-Tiny + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_s16_224(pretrained=False, **kwargs): + """ gMLP-Small + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_b16_224(pretrained=False, **kwargs): + """ gMLP-Base + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/mobilenetv3.py b/testbed/huggingface__pytorch-image-models/timm/models/mobilenetv3.py new file mode 100644 index 0000000000000000000000000000000000000000..f810eb8281510b3c3445ce27809cee613626aff6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/mobilenetv3.py @@ -0,0 +1,562 @@ + +""" MobileNet V3 + +A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. + +Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import SelectAdaptivePool2d, Linear, create_conv2d, get_act_fn, hard_sigmoid +from .registry import register_model + +__all__ = ['MobileNetV3', 'MobileNetV3Features'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mobilenetv3_large_075': _cfg(url=''), + 'mobilenetv3_large_100': _cfg( + interpolation='bicubic', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'), + 'mobilenetv3_large_100_miil': _cfg( + interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_1k_miil_78_0.pth'), + 'mobilenetv3_large_100_miil_in21k': _cfg( + interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_in21k_miil.pth', num_classes=11221), + 'mobilenetv3_small_075': _cfg(url=''), + 'mobilenetv3_small_100': _cfg(url=''), + + 'mobilenetv3_rw': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + interpolation='bicubic'), + + 'tf_mobilenetv3_large_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_100': _cfg( + url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'fbnetv3_b': _cfg(), + 'fbnetv3_d': _cfg(), + 'fbnetv3_g': _cfg(), +} + + +class MobileNetV3(nn.Module): + """ MobiletNet-V3 + + Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific + 'efficient head', where global pooling is done before the head convolution without a final batch-norm + layer before the classifier. + + Paper: https://arxiv.org/abs/1905.02244 + """ + + def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, + pad_type='', act_layer=None, norm_layer=None, se_layer=None, se_from_exp=True, + round_chs_fn=round_channels, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(MobileNetV3, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + + # Stem + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + num_pooled_chs = head_chs * self.global_pool.feat_mult() + self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.global_pool, self.conv_head, self.act2]) + layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class MobileNetV3Features(nn.Module): + """ MobileNetV3 Feature Extractor + + A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=16, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=True, + act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(MobileNetV3Features, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, + drop_path_rate=drop_path_rate, feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_mnv3(variant, pretrained=False, **kwargs): + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + head_bias=False, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + if 'small' in variant: + num_features = 1024 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16'], + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], + # stage 2, 28x28 in + ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], + # stage 3, 14x14 in + ['ir_r2_k3_s1_e3_c48'], + # stage 4, 14x14in + ['ir_r3_k3_s2_e6_c96'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu + # stage 2, 28x28 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish + # stage 3, 14x14 in + ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish + # stage 4, 14x14in + ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], # hard-swish + ] + else: + num_features = 1280 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k3_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112'], + # stage 5, 14x14in + ['ir_r3_k3_s2_e6_c160'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetv3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNetV3 + Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining` + - https://arxiv.org/abs/2006.02049 + FIXME untested, this is a preliminary impl of some FBNet-V3 variants. + """ + vl = variant.split('_')[-1] + if vl in ('a', 'b'): + stem_size = 16 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], + ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], + ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], + ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], + ['cn_r1_k1_s1_c1344'], + ] + elif vl == 'd': + stem_size = 24 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], + ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], + ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], + ['cn_r1_k1_s1_c1440'], + ] + elif vl == 'g': + stem_size = 32 + arch_def = [ + ['ds_r3_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], + ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], + ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], + ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], + ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], + ['cn_r1_k1_s1_c1728'], + ] + else: + raise NotImplemented + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) + act_layer = resolve_act_layer(kwargs, 'hard_swish') + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1984, + head_bias=False, + stem_size=stem_size, + round_chs_fn=round_chs_fn, + se_from_exp=False, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil(pretrained=False, **kwargs): + """ MobileNet V3 + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil_in21k(pretrained=False, **kwargs): + """ MobileNet V3, 21k pretraining + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil_in21k', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_rw(pretrained=False, **kwargs): + """ MobileNet V3 """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_b(pretrained=False, **kwargs): + """ FBNetV3-B """ + model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_d(pretrained=False, **kwargs): + """ FBNetV3-D """ + model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_g(pretrained=False, **kwargs): + """ FBNetV3-G """ + model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/nasnet.py b/testbed/huggingface__pytorch-image-models/timm/models/nasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..2afe82c3f374dd4790bc940289c8e3794497fbbc --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/nasnet.py @@ -0,0 +1,567 @@ +""" NasNet-A (Large) + nasnetalarge implementation grabbed from Cadene's pretrained models + https://github.com/Cadene/pretrained-models.pytorch +""" +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['NASNetALarge'] + +default_cfgs = { + 'nasnetalarge': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=0) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) + self.act_2 = nn.ReLU(inplace=True) + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class CellStem0(nn.Module): + def __init__(self, stem_size, num_channels=42, pad_type=''): + super(CellStem0, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x): + x1 = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x1) + x_comb_iter_0_right = self.comb_iter_0_right(x) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x1) + x_comb_iter_1_right = self.comb_iter_1_right(x) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x1) + x_comb_iter_2_right = self.comb_iter_2_right(x) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x1) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem1(nn.Module): + + def __init__(self, stem_size, num_channels, pad_type=''): + super(CellStem1, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x_conv0, x_stem_0): + x_left = self.conv_1x1(x_stem_0) + + x_relu = self.act(x_conv0) + # path 1 + x_path1 = self.path_1(x_relu) + # path 2 + x_path2 = self.path_2(x_relu) + # final path + x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_right) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_left) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_left) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class FirstCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(FirstCell, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_relu = self.act(x_prev) + x_path1 = self.path_1(x_relu) + x_path2 = self.path_2(x_relu) + x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NormalCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(NormalCell, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell0(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell0, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell1(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell1, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NASNetALarge(nn.Module): + """NASNetALarge (6 @ 4032) """ + + def __init__(self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, + num_features=4032, output_stride=32, drop_rate=0., global_pool='avg', pad_type='same'): + super(NASNetALarge, self).__init__() + self.num_classes = num_classes + self.stem_size = stem_size + self.num_features = num_features + self.channel_multiplier = channel_multiplier + self.drop_rate = drop_rate + assert output_stride == 32 + + channels = self.num_features // 24 + # 24 is default value for the architecture + + self.conv0 = ConvBnAct( + in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) + self.cell_stem_1 = CellStem1( + self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) + + self.cell_0 = FirstCell( + in_chs_left=channels, out_chs_left=channels // 2, + in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_1 = NormalCell( + in_chs_left=2 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_2 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_3 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_4 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_5 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + + self.reduction_cell_0 = ReductionCell0( + in_chs_left=6 * channels, out_chs_left=2 * channels, + in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_6 = FirstCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_7 = NormalCell( + in_chs_left=8 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_8 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_9 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_10 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_11 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + + self.reduction_cell_1 = ReductionCell1( + in_chs_left=12 * channels, out_chs_left=4 * channels, + in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_12 = FirstCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_13 = NormalCell( + in_chs_left=16 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_14 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_15 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_16 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_17 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.act = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv0'), + dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), + dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), + dict(num_chs=4032, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv0 = self.conv0(x) + + x_stem_0 = self.cell_stem_0(x_conv0) + x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) + + x_cell_0 = self.cell_0(x_stem_1, x_stem_0) + x_cell_1 = self.cell_1(x_cell_0, x_stem_1) + x_cell_2 = self.cell_2(x_cell_1, x_cell_0) + x_cell_3 = self.cell_3(x_cell_2, x_cell_1) + x_cell_4 = self.cell_4(x_cell_3, x_cell_2) + x_cell_5 = self.cell_5(x_cell_4, x_cell_3) + + x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) + x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) + x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) + x_cell_8 = self.cell_8(x_cell_7, x_cell_6) + x_cell_9 = self.cell_9(x_cell_8, x_cell_7) + x_cell_10 = self.cell_10(x_cell_9, x_cell_8) + x_cell_11 = self.cell_11(x_cell_10, x_cell_9) + + x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) + x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) + x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) + x_cell_14 = self.cell_14(x_cell_13, x_cell_12) + x_cell_15 = self.cell_15(x_cell_14, x_cell_13) + x_cell_16 = self.cell_16(x_cell_15, x_cell_14) + x_cell_17 = self.cell_17(x_cell_16, x_cell_15) + x = self.act(x_cell_17) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_nasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + NASNetALarge, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def nasnetalarge(pretrained=False, **kwargs): + """NASNet-A large model architecture. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_nasnet('nasnetalarge', pretrained, **model_kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/nest.py b/testbed/huggingface__pytorch-image-models/timm/models/nest.py new file mode 100644 index 0000000000000000000000000000000000000000..fe0645ccb54f3048c81ee2ad517938ce910b421e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/nest.py @@ -0,0 +1,462 @@ +""" Nested Transformer (NesT) in PyTorch + +A PyTorch implement of Aggregating Nested Transformers as described in: + +'Aggregating Nested Transformers' + - https://arxiv.org/abs/2105.12723 + +The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights +have been converted with convert/convert_nest_flax.py + +Acknowledgments: +* The paper authors for sharing their research, code, and model weights +* Ross Wightman's existing code off which I based this + +Copyright 2021 Alexander Soare +""" + +import collections.abc +import logging +import math +from functools import partial + +import torch +import torch.nn.functional as F +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply +from .layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_ +from .layers import create_conv2d, create_pool2d, to_ntuple +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], + 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # (weights from official Google JAX impl) + 'nest_base': _cfg(), + 'nest_small': _cfg(), + 'nest_tiny': _cfg(), + 'jx_nest_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_base-8bc41011.pth'), + 'jx_nest_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_small-422eaded.pth'), + 'jx_nest_tiny': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_tiny-e3428fb9.pth'), +} + + +class Attention(nn.Module): + """ + This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with + an extra "image block" dim + """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + """ + x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) + """ + B, T, N, C = x.shape + # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) + qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale # (B, H, T, N, N) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, T, N, C'), permute -> (B, T, N, C', H) + x = (attn @ v).permute(0, 2, 3, 4, 1).reshape(B, T, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x # (B, T, N, C) + + +class TransformerLayer(nn.Module): + """ + This is much like `.vision_transformer.Block` but: + - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") + - Uses modified Attention layer that handles the "block" dimension + """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + y = self.norm1(x) + x = x + self.drop_path(self.attn(y)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConvPool(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): + super().__init__() + self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) + self.norm = norm_layer(out_channels) + self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) + + def forward(self, x): + """ + x is expected to have shape (B, C, H, W) + """ + assert x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims' + assert x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims' + x = self.conv(x) + # Layer norm done over channel dim only + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + x = self.pool(x) + return x # (B, C, H//2, W//2) + + +def blockify(x, block_size: int): + """image to blocks + Args: + x (Tensor): with shape (B, H, W, C) + block_size (int): edge length of a single square block in units of H, W + """ + B, H, W, C = x.shape + assert H % block_size == 0, '`block_size` must divide input height evenly' + assert W % block_size == 0, '`block_size` must divide input width evenly' + grid_height = H // block_size + grid_width = W // block_size + x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) + x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) + return x # (B, T, N, C) + + +def deblockify(x, block_size: int): + """blocks to image + Args: + x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block + block_size (int): edge length of a single square block in units of desired H, W + """ + B, T, _, C = x.shape + grid_size = int(math.sqrt(T)) + height = width = grid_size * block_size + x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) + x = x.transpose(2, 3).reshape(B, height, width, C) + return x # (B, H, W, C) + + +class NestLevel(nn.Module): + """ Single hierarchical level of a Nested Transformer + """ + def __init__( + self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, + mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rates=[], + norm_layer=None, act_layer=None, pad_type=''): + super().__init__() + self.block_size = block_size + self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) + + if prev_embed_dim is not None: + self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) + else: + self.pool = nn.Identity() + + # Transformer encoder + if len(drop_path_rates): + assert len(drop_path_rates) == depth, 'Must provide as many drop path rates as there are transformer layers' + self.transformer_encoder = nn.Sequential(*[ + TransformerLayer( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rates[i], + norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + + def forward(self, x): + """ + expects x as (B, C, H, W) + """ + x = self.pool(x) + x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer + x = blockify(x, self.block_size) # (B, T, N, C') + x = x + self.pos_embed + x = self.transformer_encoder(x) # (B, T, N, C') + x = deblockify(x, self.block_size) # (B, H', W', C') + # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage + return x.permute(0, 3, 1, 2) # (B, C, H', W') + + +class Nest(nn.Module): + """ Nested Transformer (NesT) + + A PyTorch impl of : `Aggregating Nested Transformers` + - https://arxiv.org/abs/2105.12723 + """ + + def __init__(self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), + num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, + pad_type='', weight_init='', global_pool='avg'): + """ + Args: + img_size (int, tuple): input image size + in_chans (int): number of input channels + patch_size (int): patch size + num_levels (int): number of block hierarchies (T_d in the paper) + embed_dims (int, tuple): embedding dimensions of each level + num_heads (int, tuple): number of attention heads for each level + depths (int, tuple): number of transformer layers for each level + num_classes (int): number of classes for classification head + mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer for transformer layers + act_layer: (nn.Module): activation layer in MLP of transformer layers + pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME + weight_init: (str): weight init scheme + global_pool: (str): type of pooling operation to apply to final feature map + + Notes: + - Default values follow NesT-B from the original Jax code. + - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. + - For those following the paper, Table A1 may have errors! + - https://github.com/google-research/nested-transformer/issues/2 + """ + super().__init__() + + for param_name in ['embed_dims', 'num_heads', 'depths']: + param_value = locals()[param_name] + if isinstance(param_value, collections.abc.Sequence): + assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' + + embed_dims = to_ntuple(num_levels)(embed_dims) + num_heads = to_ntuple(num_levels)(num_heads) + depths = to_ntuple(num_levels)(depths) + self.num_classes = num_classes + self.num_features = embed_dims[-1] + self.feature_info = [] + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + self.drop_rate = drop_rate + self.num_levels = num_levels + if isinstance(img_size, collections.abc.Sequence): + assert img_size[0] == img_size[1], 'Model only handles square inputs' + img_size = img_size[0] + assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' + self.patch_size = patch_size + + # Number of blocks at each level + self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() + assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ + 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' + + # Block edge size in units of patches + # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the + # number of blocks along edge of image + self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) + + # Patch embedding + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False) + self.num_patches = self.patch_embed.num_patches + self.seq_length = self.num_patches // self.num_blocks[0] + + # Build up each hierarchical level + levels = [] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + prev_dim = None + curr_stride = 4 + for i in range(len(self.num_blocks)): + dim = embed_dims[i] + levels.append(NestLevel( + self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, + mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, dp_rates[i], norm_layer, act_layer, pad_type=pad_type)) + self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] + prev_dim = dim + curr_stride *= 2 + self.levels = nn.Sequential(*levels) + + # Final normalization layer + self.norm = norm_layer(embed_dims[-1]) + + # Classifier + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + for level in self.levels: + trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) + named_apply(partial(_init_nest_weights, head_bias=head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + return {f'level.{i}.pos_embed' for i in range(len(self.levels))} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + """ x shape (B, C, H, W) + """ + x = self.patch_embed(x) + x = self.levels(x) + # Layer norm done over channel dim only (to NHWC and back) + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + + def forward(self, x): + """ x shape (B, C, H, W) + """ + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.head(x) + + +def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): + """ NesT weight initialization + Can replicate Jax implementation. Otherwise follows vision_transformer.py + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + nn.init.constant_(module.bias, head_bias) + else: + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): + nn.init.zeros_(module.bias) + nn.init.ones_(module.weight) + + +def resize_pos_embed(posemb, posemb_new): + """ + Rescale the grid of position embeddings when loading from state_dict + Expected shape of position embeddings is (1, T, N, C), and considers only square images + """ + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + seq_length_old = posemb.shape[2] + num_blocks_new, seq_length_new = posemb_new.shape[1:3] + size_new = int(math.sqrt(num_blocks_new*seq_length_new)) + # First change to (1, C, H, W) + posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) + posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) + # Now change to new (1, T, N, C) + posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ resize positional embeddings of pretrained weights """ + pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] + for k in pos_embed_keys: + if state_dict[k].shape != getattr(model, k).shape: + state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) + return state_dict + + +def _create_nest(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + model = build_model_with_cfg( + Nest, variant, pretrained, + default_cfg=default_cfg, + feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + +@register_model +def nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224 + """ + model_kwargs = dict( + embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('jx_nest_tiny', pretrained=pretrained, **model_kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/nfnet.py b/testbed/huggingface__pytorch-image-models/timm/models/nfnet.py new file mode 100644 index 0000000000000000000000000000000000000000..4e0f2b211155dc1e304cf076506929817c78d913 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/nfnet.py @@ -0,0 +1,966 @@ +""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models + +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + +Paper: `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Status: +* These models are a work in progress, experiments ongoing. +* Pretrained weights for two models so far, more to come. +* Model details updated to closer match official JAX code now that it's released +* NF-ResNet, NF-RegNet-B, and NFNet-F models supported + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field +from collections import OrderedDict +from typing import Tuple, Optional +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame,\ + get_act_layer, get_act_fn, get_attn, make_divisible + + +def _dcfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + dm_nfnet_f0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', + pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9), + dm_nfnet_f1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91), + dm_nfnet_f2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92), + dm_nfnet_f3=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94), + dm_nfnet_f4=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', + pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951), + dm_nfnet_f5=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', + pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954), + dm_nfnet_f6=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', + pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956), + + nfnet_f0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_f0s=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1s=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2s=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3s=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4s=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5s=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6s=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7s=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', + hf_hub='timm/eca_nfnet_l0', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), crop_pct=1.0), + eca_nfnet_l2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), crop_pct=1.0), + eca_nfnet_l3=_dcfg( + url='', + pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), crop_pct=1.0), + + nf_regnet_b0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), + nf_regnet_b1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec + nf_regnet_b2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), + nf_regnet_b3=_dcfg( + url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), + nf_regnet_b4=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), + nf_regnet_b5=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), + + nf_resnet26=_dcfg(url='', first_conv='stem.conv'), + nf_resnet50=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), + nf_resnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_seresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_ecaresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet101=_dcfg(url='', first_conv='stem.conv'), +) + + +@dataclass +class NfCfg: + depths: Tuple[int, int, int, int] + channels: Tuple[int, int, int, int] + alpha: float = 0.2 + stem_type: str = '3x3' + stem_chs: Optional[int] = None + group_size: Optional[int] = None + attn_layer: Optional[str] = None + attn_kwargs: dict = None + attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used + width_factor: float = 1.0 + bottle_ratio: float = 0.5 + num_features: int = 0 # num out_channels for final conv, no final_conv if 0 + ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal + reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle + extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models + gamma_in_act: bool = False + same_padding: bool = False + std_conv_eps: float = 1e-5 + skipinit: bool = False # disabled by default, non-trivial performance impact + zero_init_fc: bool = False + act_layer: str = 'silu' + + +def _nfres_cfg( + depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None): + attn_kwargs = attn_kwargs or {} + cfg = NfCfg( + depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, + group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): + num_features = 1280 * channels[-1] // 440 + attn_kwargs = dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, + num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs) + return cfg + + +def _nfnet_cfg( + depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2., + act_layer='gelu', attn_layer='se', attn_kwargs=None): + num_features = int(channels[-1] * feat_mult) + attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, + bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, + attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _dm_nfnet_cfg(depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True): + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, + bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, + num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5)) + return cfg + + + +model_cfgs = dict( + # NFNet-F models w/ GELU compatible with DeepMind weights + dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), + dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), + dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), + dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), + dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), + dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), + dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), + + # NFNet-F models w/ GELU (I will likely deprecate/remove these models and just keep dm_ ver for GELU) + nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), + nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), + nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), + nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), + nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), + nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), + nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), + nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), + + # NFNet-F models w/ SiLU (much faster in PyTorch) + nfnet_f0s=_nfnet_cfg(depths=(1, 2, 6, 3), act_layer='silu'), + nfnet_f1s=_nfnet_cfg(depths=(2, 4, 12, 6), act_layer='silu'), + nfnet_f2s=_nfnet_cfg(depths=(3, 6, 18, 9), act_layer='silu'), + nfnet_f3s=_nfnet_cfg(depths=(4, 8, 24, 12), act_layer='silu'), + nfnet_f4s=_nfnet_cfg(depths=(5, 10, 30, 15), act_layer='silu'), + nfnet_f5s=_nfnet_cfg(depths=(6, 12, 36, 18), act_layer='silu'), + nfnet_f6s=_nfnet_cfg(depths=(7, 14, 42, 21), act_layer='silu'), + nfnet_f7s=_nfnet_cfg(depths=(8, 16, 48, 24), act_layer='silu'), + + # Experimental 'light' versions of NFNet-F that are little leaner + nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), + eca_nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l1=_nfnet_cfg( + depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l2=_nfnet_cfg( + depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l3=_nfnet_cfg( + depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + + # EffNet influenced RegNet defs. + # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. + nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), + nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), + nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), + nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), + nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), + nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), + # FIXME add B6-B8 + + # ResNet (preact, D style deep stem/avg down) defs + nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), + nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), + nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), + + nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + + nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), + +) + + +class GammaAct(nn.Module): + def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False): + super().__init__() + self.act_fn = get_act_fn(act_type) + self.gamma = gamma + self.inplace = inplace + + def forward(self, x): + return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) + + +def act_with_gamma(act_type, gamma: float = 1.): + def _create(inplace=False): + return GammaAct(act_type, gamma=gamma, inplace=inplace) + return _create + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, conv_layer=ScaledStdConv2d): + """ AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + + def forward(self, x): + return self.conv(self.pool(x)) + + +class NormFreeBlock(nn.Module): + """Normalization-Free pre-activation block. + """ + + def __init__( + self, in_chs, out_chs=None, stride=1, dilation=1, first_dilation=None, + alpha=1.0, beta=1.0, bottle_ratio=0.25, group_size=None, ch_div=1, reg=True, extra_conv=False, + skipinit=False, attn_layer=None, attn_gain=2.0, act_layer=None, conv_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + out_chs = out_chs or in_chs + # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet + mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) + groups = 1 if not group_size else mid_chs // group_size + if group_size and group_size % ch_div == 0: + mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error + self.alpha = alpha + self.beta = beta + self.attn_gain = attn_gain + + if in_chs != out_chs or stride != 1 or dilation != first_dilation: + self.downsample = DownsampleAvg( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer) + else: + self.downsample = None + + self.act1 = act_layer() + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.act2 = act_layer(inplace=True) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + if extra_conv: + self.act2b = act_layer(inplace=True) + self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) + else: + self.act2b = None + self.conv2b = None + if reg and attn_layer is not None: + self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 + else: + self.attn = None + self.act3 = act_layer() + self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) + if not reg and attn_layer is not None: + self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 + else: + self.attn_last = None + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None + + def forward(self, x): + out = self.act1(x) * self.beta + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(out) + + # residual branch + out = self.conv1(out) + out = self.conv2(self.act2(out)) + if self.conv2b is not None: + out = self.conv2b(self.act2b(out)) + if self.attn is not None: + out = self.attn_gain * self.attn(out) + out = self.conv3(self.act3(out)) + if self.attn_last is not None: + out = self.attn_gain * self.attn_last(out) + out = self.drop_path(out) + + if self.skipinit_gain is not None: + out.mul_(self.skipinit_gain) # this slows things down more than expected, TBD + out = out * self.alpha + shortcut + return out + + +def create_stem(in_chs, out_chs, stem_type='', conv_layer=None, act_layer=None, preact_feature=True): + stem_stride = 2 + stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') + stem = OrderedDict() + assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') + if 'deep' in stem_type: + if 'quad' in stem_type: + # 4 deep conv stack as in NFNet-F models + assert not 'pool' in stem_type + stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) + strides = (2, 1, 1, 2) + stem_stride = 4 + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') + else: + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets + strides = (2, 1, 1) + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') + last_idx = len(stem_chs) - 1 + for i, (c, s) in enumerate(zip(stem_chs, strides)): + stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) + if i != last_idx: + stem[f'act{i + 2}'] = act_layer(inplace=True) + in_chs = c + elif '3x3' in stem_type: + # 3x3 stem conv as in RegNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) + else: + # 7x7 stem conv as in ResNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + + if 'pool' in stem_type: + stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) + stem_stride = 4 + + return nn.Sequential(stem), stem_stride, stem_feature + + +# from https://github.com/deepmind/deepmind-research/tree/master/nfnets +_nonlin_gamma = dict( + identity=1.0, + celu=1.270926833152771, + elu=1.2716004848480225, + gelu=1.7015043497085571, + leaky_relu=1.70590341091156, + log_sigmoid=1.9193484783172607, + log_softmax=1.0002083778381348, + relu=1.7139588594436646, + relu6=1.7131484746932983, + selu=1.0008515119552612, + sigmoid=4.803835391998291, + silu=1.7881293296813965, + softsign=2.338853120803833, + softplus=1.9203323125839233, + tanh=1.5939117670059204, +) + + +class NormFreeNet(nn.Module): + """ Normalization-Free Network + + As described in : + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + and + `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 + + This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and + the (preact) ResNet models described earlier in the paper. + + There are a few differences: + * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), + this changes channel dim and param counts slightly from the paper models + * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance + impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. + * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but + apply it in each activation. This is slightly slower, numerically different, but matches official impl. + * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput + for what it is/does. Approx 8-10% throughput loss. + """ + def __init__(self, cfg: NfCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + drop_rate=0., drop_path_rate=0.): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." + conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d + if cfg.gamma_in_act: + act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) + conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) + else: + act_layer = get_act_layer(cfg.act_layer) + conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) + attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + + stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) + self.stem, stem_stride, stem_feat = create_stem( + in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer) + + self.feature_info = [stem_feat] + drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + prev_chs = stem_chs + net_stride = stem_stride + dilation = 1 + expected_var = 1.0 + stages = [] + for stage_idx, stage_depth in enumerate(cfg.depths): + stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx in range(cfg.depths[stage_idx]): + first_block = block_idx == 0 and stage_idx == 0 + out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) + blocks += [NormFreeBlock( + in_chs=prev_chs, out_chs=out_chs, + alpha=cfg.alpha, + beta=1. / expected_var ** 0.5, + stride=stride if block_idx == 0 else 1, + dilation=dilation, + first_dilation=first_dilation, + group_size=cfg.group_size, + bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, + ch_div=cfg.ch_div, + reg=cfg.reg, + extra_conv=cfg.extra_conv, + skipinit=cfg.skipinit, + attn_layer=attn_layer, + attn_gain=cfg.attn_gain, + act_layer=act_layer, + conv_layer=conv_layer, + drop_path_rate=drop_path_rates[stage_idx][block_idx], + )] + if block_idx == 0: + expected_var = 1. # expected var is reset after first block of each stage + expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance + first_dilation = dilation + prev_chs = out_chs + self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] + stages += [nn.Sequential(*blocks)] + self.stages = nn.Sequential(*stages) + + if cfg.num_features: + # The paper NFRegNet models have an EfficientNet-like final head convolution. + self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) + self.final_conv = conv_layer(prev_chs, self.num_features, 1) + self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.final_act = act_layer(inplace=cfg.num_features > 0) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + for n, m in self.named_modules(): + if 'fc' in n and isinstance(m, nn.Linear): + if cfg.zero_init_fc: + nn.init.zeros_(m.weight) + else: + nn.init.normal_(m.weight, 0., .01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') + if m.bias is not None: + nn.init.zeros_(m.bias) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.final_conv(x) + x = self.final_act(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_normfreenet(variant, pretrained=False, **kwargs): + model_cfg = model_cfgs[variant] + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + NormFreeNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfg, + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def dm_nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7(pretrained=False, **kwargs): + """ NFNet-F7 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0s(pretrained=False, **kwargs): + """ NFNet-F0 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1s(pretrained=False, **kwargs): + """ NFNet-F1 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2s(pretrained=False, **kwargs): + """ NFNet-F2 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3s(pretrained=False, **kwargs): + """ NFNet-F3 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4s(pretrained=False, **kwargs): + """ NFNet-F4 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5s(pretrained=False, **kwargs): + """ NFNet-F5 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6s(pretrained=False, **kwargs): + """ NFNet-F6 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7s(pretrained=False, **kwargs): + """ NFNet-F7 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_l0(pretrained=False, **kwargs): + """ NFNet-L0b w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio + """ + return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l0(pretrained=False, **kwargs): + """ ECA-NFNet-L0 w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l1(pretrained=False, **kwargs): + """ ECA-NFNet-L1 w/ SiLU + My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l2(pretrained=False, **kwargs): + """ ECA-NFNet-L2 w/ SiLU + My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l3(pretrained=False, **kwargs): + """ ECA-NFNet-L3 w/ SiLU + My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b0(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B0 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b1(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B1 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b2(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B2 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b3(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B3 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b4(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B4 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b5(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B5 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet26(pretrained=False, **kwargs): + """ Normalization-Free ResNet-26 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet50(pretrained=False, **kwargs): + """ Normalization-Free ResNet-50 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet101(pretrained=False, **kwargs): + """ Normalization-Free ResNet-101 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet26(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet26 + """ + return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet50(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet50 + """ + return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet101(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet101 + """ + return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet26(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet26 + """ + return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet50(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet50 + """ + return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet101(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet101 + """ + return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/pit.py b/testbed/huggingface__pytorch-image-models/timm/models/pit.py new file mode 100644 index 0000000000000000000000000000000000000000..460824e2d65ae403caea62c4fe8ac48a2a0f78e9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/pit.py @@ -0,0 +1,384 @@ +""" Pooling-based Vision Transformer (PiT) in PyTorch + +A PyTorch implement of Pooling-based Vision Transformers as described in +'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 + +This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below. + +Modifications for timm by / Copyright 2020 Ross Wightman +""" +# PiT +# Copyright 2021-present NAVER Corp. +# Apache License v2.0 + +import math +import re +from copy import deepcopy +from functools import partial +from typing import Tuple + +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import trunc_normal_, to_2tuple +from .registry import register_model +from .vision_transformer import Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # deit models (FB weights) + 'pit_ti_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'), + 'pit_xs_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'), + 'pit_s_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'), + 'pit_b_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'), + 'pit_ti_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth', + classifier=('head', 'head_dist')), + 'pit_xs_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth', + classifier=('head', 'head_dist')), + 'pit_s_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth', + classifier=('head', 'head_dist')), + 'pit_b_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth', + classifier=('head', 'head_dist')), +} + + +class SequentialTuple(nn.Sequential): + """ This module exists to work around torchscript typing issues list -> list""" + def __init__(self, *args): + super(SequentialTuple, self).__init__(*args) + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + for module in self: + x = module(x) + return x + + +class Transformer(nn.Module): + def __init__( + self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None): + super(Transformer, self).__init__() + self.layers = nn.ModuleList([]) + embed_dim = base_dim * heads + + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, + num_heads=heads, + mlp_ratio=mlp_ratio, + qkv_bias=True, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_prob[i], + norm_layer=partial(nn.LayerNorm, eps=1e-6) + ) + for i in range(depth)]) + + self.pool = pool + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + x, cls_tokens = x + B, C, H, W = x.shape + token_length = cls_tokens.shape[1] + + x = x.flatten(2).transpose(1, 2) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.blocks(x) + + cls_tokens = x[:, :token_length] + x = x[:, token_length:] + x = x.transpose(1, 2).reshape(B, C, H, W) + + if self.pool is not None: + x, cls_tokens = self.pool(x, cls_tokens) + return x, cls_tokens + + +class ConvHeadPooling(nn.Module): + def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): + super(ConvHeadPooling, self).__init__() + + self.conv = nn.Conv2d( + in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, + padding_mode=padding_mode, groups=in_feature) + self.fc = nn.Linear(in_feature, out_feature) + + def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: + + x = self.conv(x) + cls_token = self.fc(cls_token) + + return x, cls_token + + +class ConvEmbedding(nn.Module): + def __init__(self, in_channels, out_channels, patch_size, stride, padding): + super(ConvEmbedding, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True) + + def forward(self, x): + x = self.conv(x) + return x + + +class PoolingVisionTransformer(nn.Module): + """ Pooling-based Vision Transformer + + A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers' + - https://arxiv.org/abs/2103.16302 + """ + def __init__(self, img_size, patch_size, stride, base_dims, depth, heads, + mlp_ratio, num_classes=1000, in_chans=3, distilled=False, + attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0): + super(PoolingVisionTransformer, self).__init__() + + padding = 0 + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1) + width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1) + + self.base_dims = base_dims + self.heads = heads + self.num_classes = num_classes + self.num_tokens = 2 if distilled else 1 + + self.patch_size = patch_size + self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width)) + self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding) + + self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0])) + self.pos_drop = nn.Dropout(p=drop_rate) + + transformers = [] + # stochastic depth decay rule + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] + for stage in range(len(depth)): + pool = None + if stage < len(heads) - 1: + pool = ConvHeadPooling( + base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2) + transformers += [Transformer( + base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage]) + ] + self.transformers = SequentialTuple(*transformers) + self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6) + self.num_features = self.embed_dim = base_dims[-1] * heads[-1] + + # Classifier head + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + if self.head_dist is not None: + return self.head, self.head_dist + else: + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.head_dist is not None: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x + self.pos_embed) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + x, cls_tokens = self.transformers((x, cls_tokens)) + cls_tokens = self.norm(cls_tokens) + if self.head_dist is not None: + return cls_tokens[:, 0], cls_tokens[:, 1] + else: + return cls_tokens[:, 0] + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple + if self.training and not torch.jit.is_scripting(): + return x, x_dist + else: + return (x + x_dist) / 2 + else: + return self.head(x) + + +def checkpoint_filter_fn(state_dict, model): + """ preprocess checkpoints """ + out_dict = {} + p_blocks = re.compile(r'pools\.(\d)\.') + for k, v in state_dict.items(): + # FIXME need to update resize for PiT impl + # if k == 'pos_embed' and v.shape != model.pos_embed.shape: + # # To resize pos embedding when using model at different size from pretrained weights + # v = resize_pos_embed(v, model.pos_embed) + k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k) + out_dict[k] = v + return out_dict + + +def _create_pit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + PoolingVisionTransformer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def pit_b_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_b_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_s_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_xs_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_ti_224', pretrained, **model_kwargs) + + +@register_model +def pit_b_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs) \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/pnasnet.py b/testbed/huggingface__pytorch-image-models/timm/models/pnasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..999181563a40b58c751b2ff56a631ae7508047e9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/pnasnet.py @@ -0,0 +1,350 @@ +""" + pnasnet5large implementation grabbed from Cadene's pretrained models + Additional credit to https://github.com/creafz + + https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py + +""" +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['PNASNet5Large'] + +default_cfgs = { + 'pnasnet5large': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv_0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=padding) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=padding) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) + self.act_2 = nn.ReLU() + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=padding) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class FactorizedReduction(nn.Module): + + def __init__(self, in_channels, out_channels, padding=''): + super(FactorizedReduction, self).__init__() + self.act = nn.ReLU() + self.path_1 = nn.Sequential(OrderedDict([ + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.path_2 = nn.Sequential(OrderedDict([ + ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x_path1 = self.path_1(x) + x_path2 = self.path_2(x) + out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + return out + + +class CellBase(nn.Module): + + def cell_forward(self, x_left, x_right): + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) + x_comb_iter_3_right = self.comb_iter_3_right(x_right) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_left) + if self.comb_iter_4_right is not None: + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + else: + x_comb_iter_4_right = x_right + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem0(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(CellStem0, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_0_right = nn.Sequential(OrderedDict([ + ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), + ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), + ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)), + ])) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) + self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) + + def forward(self, x_left): + x_right = self.conv_1x1(x_left) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class Cell(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='', + is_reduction=False, match_prev_layer_dims=False): + super(Cell, self).__init__() + + # If `is_reduction` is set to `True` stride 2 is used for + # convolution and pooling layers to reduce the spatial size of + # the output of a cell approximately by a factor of 2. + stride = 2 if is_reduction else 1 + + # If `match_prev_layer_dimensions` is set to `True` + # `FactorizedReduction` is used to reduce the spatial size + # of the left input of a cell approximately by a factor of 2. + self.match_prev_layer_dimensions = match_prev_layer_dims + if match_prev_layer_dims: + self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) + else: + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) + self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) + if is_reduction: + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) + else: + self.comb_iter_4_right = None + + def forward(self, x_left, x_right): + x_left = self.conv_prev_1x1(x_left) + x_right = self.conv_1x1(x_right) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class PNASNet5Large(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', pad_type=''): + super(PNASNet5Large, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.num_features = 4320 + assert output_stride == 32 + + self.conv_0 = ConvBnAct( + in_chans, 96, kernel_size=3, stride=2, padding=0, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) + + self.cell_stem_1 = Cell( + in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, + match_prev_layer_dims=True, is_reduction=True) + self.cell_0 = Cell( + in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_1 = Cell( + in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_2 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_3 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + + self.cell_4 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, + is_reduction=True) + self.cell_5 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_6 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + self.cell_7 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + + self.cell_8 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, + is_reduction=True) + self.cell_9 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_10 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.cell_11 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.act = nn.ReLU() + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv_0'), + dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), + dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), + dict(num_chs=4320, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv_0 = self.conv_0(x) + x_stem_0 = self.cell_stem_0(x_conv_0) + x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) + x_cell_0 = self.cell_0(x_stem_0, x_stem_1) + x_cell_1 = self.cell_1(x_stem_1, x_cell_0) + x_cell_2 = self.cell_2(x_cell_0, x_cell_1) + x_cell_3 = self.cell_3(x_cell_1, x_cell_2) + x_cell_4 = self.cell_4(x_cell_2, x_cell_3) + x_cell_5 = self.cell_5(x_cell_3, x_cell_4) + x_cell_6 = self.cell_6(x_cell_4, x_cell_5) + x_cell_7 = self.cell_7(x_cell_5, x_cell_6) + x_cell_8 = self.cell_8(x_cell_6, x_cell_7) + x_cell_9 = self.cell_9(x_cell_7, x_cell_8) + x_cell_10 = self.cell_10(x_cell_8, x_cell_9) + x_cell_11 = self.cell_11(x_cell_9, x_cell_10) + x = self.act(x_cell_11) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_pnasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + PNASNet5Large, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def pnasnet5large(pretrained=False, **kwargs): + r"""PNASNet-5 model architecture from the + `"Progressive Neural Architecture Search" + `_ paper. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/pruned/ecaresnet101d_pruned.txt b/testbed/huggingface__pytorch-image-models/timm/models/pruned/ecaresnet101d_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..2589b2f9dd3f0d1e02e1d5ddc1fbcd5c143e02c6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/pruned/ecaresnet101d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042] \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/pruned/ecaresnet50d_pruned.txt b/testbed/huggingface__pytorch-image-models/timm/models/pruned/ecaresnet50d_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..9a8b2bf50e0631dce74d66a1a98e26cae10572a7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/pruned/ecaresnet50d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022] \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b1_pruned.txt b/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b1_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..0972b527612b283fd242cc5eaeb6e767ea106c66 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b1_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000] \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b2_pruned.txt b/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b2_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e3fadee3e9f92eaade96afd8691a5e4437551ee --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b2_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000] \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b3_pruned.txt b/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b3_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..489781736de08e5cf40bf76528a735fff4a3f61c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/pruned/efficientnet_b3_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000] \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/registry.py b/testbed/huggingface__pytorch-image-models/timm/models/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..f92219b218228baf09ef7ee596c0b1f360347d47 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/registry.py @@ -0,0 +1,149 @@ +""" Model Registry +Hacked together by / Copyright 2020 Ross Wightman +""" + +import sys +import re +import fnmatch +from collections import defaultdict +from copy import deepcopy + +__all__ = ['list_models', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', + 'is_model_default_key', 'has_model_default_key', 'get_model_default_value', 'is_model_pretrained'] + +_module_to_models = defaultdict(set) # dict of sets to check membership of model in module +_model_to_module = {} # mapping of model names to module names +_model_entrypoints = {} # mapping of model names to entrypoint fns +_model_has_pretrained = set() # set of model names that have pretrained weight url present +_model_default_cfgs = dict() # central repo for model default_cfgs + + +def register_model(fn): + # lookup containing module + mod = sys.modules[fn.__module__] + module_name_split = fn.__module__.split('.') + module_name = module_name_split[-1] if len(module_name_split) else '' + + # add model to __all__ in module + model_name = fn.__name__ + if hasattr(mod, '__all__'): + mod.__all__.append(model_name) + else: + mod.__all__ = [model_name] + + # add entries to registry dict/sets + _model_entrypoints[model_name] = fn + _model_to_module[model_name] = module_name + _module_to_models[module_name].add(model_name) + has_pretrained = False # check if model has a pretrained url to allow filtering on this + if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: + # this will catch all models that have entrypoint matching cfg key, but miss any aliasing + # entrypoints or non-matching combos + has_pretrained = 'url' in mod.default_cfgs[model_name] and 'http' in mod.default_cfgs[model_name]['url'] + _model_default_cfgs[model_name] = deepcopy(mod.default_cfgs[model_name]) + if has_pretrained: + _model_has_pretrained.add(model_name) + return fn + + +def _natural_key(string_): + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def list_models(filter='', module='', pretrained=False, exclude_filters='', name_matches_cfg=False): + """ Return list of available model names, sorted alphabetically + + Args: + filter (str) - Wildcard filter string that works with fnmatch + module (str) - Limit model selection to a specific sub-module (ie 'gen_efficientnet') + pretrained (bool) - Include only models with pretrained weights if True + exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter + name_matches_cfg (bool) - Include only models w/ model_name matching default_cfg name (excludes some aliases) + + Example: + model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' + model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module + """ + if module: + all_models = list(_module_to_models[module]) + else: + all_models = _model_entrypoints.keys() + if filter: + models = [] + include_filters = filter if isinstance(filter, (tuple, list)) else [filter] + for f in include_filters: + include_models = fnmatch.filter(all_models, f) # include these models + if len(include_models): + models = set(models).union(include_models) + else: + models = all_models + if exclude_filters: + if not isinstance(exclude_filters, (tuple, list)): + exclude_filters = [exclude_filters] + for xf in exclude_filters: + exclude_models = fnmatch.filter(models, xf) # exclude these models + if len(exclude_models): + models = set(models).difference(exclude_models) + if pretrained: + models = _model_has_pretrained.intersection(models) + if name_matches_cfg: + models = set(_model_default_cfgs).intersection(models) + return list(sorted(models, key=_natural_key)) + + +def is_model(model_name): + """ Check if a model name exists + """ + return model_name in _model_entrypoints + + +def model_entrypoint(model_name): + """Fetch a model entrypoint for specified model name + """ + return _model_entrypoints[model_name] + + +def list_modules(): + """ Return list of module names that contain models / model entrypoints + """ + modules = _module_to_models.keys() + return list(sorted(modules)) + + +def is_model_in_modules(model_name, module_names): + """Check if a model exists within a subset of modules + Args: + model_name (str) - name of model to check + module_names (tuple, list, set) - names of modules to search in + """ + assert isinstance(module_names, (tuple, list, set)) + return any(model_name in _module_to_models[n] for n in module_names) + + +def has_model_default_key(model_name, cfg_key): + """ Query model default_cfgs for existence of a specific key. + """ + if model_name in _model_default_cfgs and cfg_key in _model_default_cfgs[model_name]: + return True + return False + + +def is_model_default_key(model_name, cfg_key): + """ Return truthy value for specified model default_cfg key, False if does not exist. + """ + if model_name in _model_default_cfgs and _model_default_cfgs[model_name].get(cfg_key, False): + return True + return False + + +def get_model_default_value(model_name, cfg_key): + """ Get a specific model default_cfg value by key. None if it doesn't exist. + """ + if model_name in _model_default_cfgs: + return _model_default_cfgs[model_name].get(cfg_key, None) + else: + return None + + +def is_model_pretrained(model_name): + return model_name in _model_has_pretrained diff --git a/testbed/huggingface__pytorch-image-models/timm/models/regnet.py b/testbed/huggingface__pytorch-image-models/timm/models/regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..6a38107467d22e195230663f5eeb03b38c82c125 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/regnet.py @@ -0,0 +1,494 @@ +"""RegNet + +Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 +Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + +Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here) +and cleaned up with more descriptive variable names. + +Weights from original impl have been modified +* first layer from BGR -> RGB as most PyTorch models are +* removed training specific dict entries from checkpoints and keep model state_dict only +* remap names to match the ones here + +Hacked together by / Copyright 2020 Ross Wightman +""" +import numpy as np +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, AvgPool2dSame, ConvBnAct, SEModule, DropPath +from .registry import register_model + + +def _mcfg(**kwargs): + cfg = dict(se_ratio=0., bottle_ratio=1., stem_width=32) + cfg.update(**kwargs) + return cfg + + +# Model FLOPS = three trailing digits * 10^8 +model_cfgs = dict( + regnetx_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13), + regnetx_004=_mcfg(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22), + regnetx_006=_mcfg(w0=48, wa=36.97, wm=2.24, group_w=24, depth=16), + regnetx_008=_mcfg(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16), + regnetx_016=_mcfg(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18), + regnetx_032=_mcfg(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25), + regnetx_040=_mcfg(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23), + regnetx_064=_mcfg(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17), + regnetx_080=_mcfg(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23), + regnetx_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19), + regnetx_160=_mcfg(w0=216, wa=55.59, wm=2.1, group_w=128, depth=22), + regnetx_320=_mcfg(w0=320, wa=69.86, wm=2.0, group_w=168, depth=23), + regnety_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13, se_ratio=0.25), + regnety_004=_mcfg(w0=48, wa=27.89, wm=2.09, group_w=8, depth=16, se_ratio=0.25), + regnety_006=_mcfg(w0=48, wa=32.54, wm=2.32, group_w=16, depth=15, se_ratio=0.25), + regnety_008=_mcfg(w0=56, wa=38.84, wm=2.4, group_w=16, depth=14, se_ratio=0.25), + regnety_016=_mcfg(w0=48, wa=20.71, wm=2.65, group_w=24, depth=27, se_ratio=0.25), + regnety_032=_mcfg(w0=80, wa=42.63, wm=2.66, group_w=24, depth=21, se_ratio=0.25), + regnety_040=_mcfg(w0=96, wa=31.41, wm=2.24, group_w=64, depth=22, se_ratio=0.25), + regnety_064=_mcfg(w0=112, wa=33.22, wm=2.27, group_w=72, depth=25, se_ratio=0.25), + regnety_080=_mcfg(w0=192, wa=76.82, wm=2.19, group_w=56, depth=17, se_ratio=0.25), + regnety_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, se_ratio=0.25), + regnety_160=_mcfg(w0=200, wa=106.23, wm=2.48, group_w=112, depth=18, se_ratio=0.25), + regnety_320=_mcfg(w0=232, wa=115.89, wm=2.53, group_w=232, depth=20, se_ratio=0.25), +) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + regnetx_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth'), + regnetx_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth'), + regnetx_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth'), + regnetx_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth'), + regnetx_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth'), + regnetx_032=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth'), + regnetx_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth'), + regnetx_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth'), + regnetx_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth'), + regnetx_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth'), + regnetx_160=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth'), + regnetx_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth'), + regnety_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth'), + regnety_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth'), + regnety_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth'), + regnety_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth'), + regnety_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth'), + regnety_032=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth', + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth'), + regnety_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth'), + regnety_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth'), + regnety_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth'), + regnety_160=_cfg( + url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth', # from Facebook DeiT GitHub repository + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth'), +) + + +def quantize_float(f, q): + """Converts a float to closest non-zero int divisible by q.""" + return int(round(f / q) * q) + + +def adjust_widths_groups_comp(widths, bottle_ratios, groups): + """Adjusts the compatibility of widths and groups.""" + bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)] + bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)] + widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)] + return widths, groups + + +def generate_regnet(width_slope, width_initial, width_mult, depth, q=8): + """Generates per block widths from RegNet parameters.""" + assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % q == 0 + widths_cont = np.arange(depth) * width_slope + width_initial + width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult)) + widths = width_initial * np.power(width_mult, width_exps) + widths = np.round(np.divide(widths, q)) * q + num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1 + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages, max_stage, widths_cont + + +class Bottleneck(nn.Module): + """ RegNet Bottleneck + + This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from + after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. + """ + + def __init__(self, in_chs, out_chs, stride=1, dilation=1, bottleneck_ratio=1, group_width=1, se_ratio=0.25, + downsample=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, + drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + bottleneck_chs = int(round(out_chs * bottleneck_ratio)) + groups = bottleneck_chs // group_width + + cargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + self.conv1 = ConvBnAct(in_chs, bottleneck_chs, kernel_size=1, **cargs) + self.conv2 = ConvBnAct( + bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation, + groups=groups, **cargs) + if se_ratio: + se_channels = int(round(in_chs * se_ratio)) + self.se = SEModule(bottleneck_chs, rd_channels=se_channels) + else: + self.se = None + cargs['act_layer'] = None + self.conv3 = ConvBnAct(bottleneck_chs, out_chs, kernel_size=1, **cargs) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + x = self.conv3(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + return x + + +def downsample_conv( + in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + dilation = dilation if kernel_size > 1 else 1 + return ConvBnAct( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, act_layer=None) + + +def downsample_avg( + in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + pool = nn.Identity() + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + return nn.Sequential(*[ + pool, ConvBnAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, act_layer=None)]) + + +class RegStage(nn.Module): + """Stage (sequence of blocks w/ the same output shape).""" + + def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio, group_width, + block_fn=Bottleneck, se_ratio=0., drop_path_rates=None, drop_block=None): + super(RegStage, self).__init__() + block_kwargs = {} # FIXME setup to pass various aa, norm, act layer common args + first_dilation = 1 if dilation in (1, 2) else 2 + for i in range(depth): + block_stride = stride if i == 0 else 1 + block_in_chs = in_chs if i == 0 else out_chs + block_dilation = first_dilation if i == 0 else dilation + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + if (block_in_chs != out_chs) or (block_stride != 1): + proj_block = downsample_conv(block_in_chs, out_chs, 1, block_stride, block_dilation) + else: + proj_block = None + + name = "b{}".format(i + 1) + self.add_module( + name, block_fn( + block_in_chs, out_chs, block_stride, block_dilation, bottle_ratio, group_width, se_ratio, + downsample=proj_block, drop_block=drop_block, drop_path=drop_path, **block_kwargs) + ) + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +class RegNet(nn.Module): + """RegNet model. + + Paper: https://arxiv.org/abs/2003.13678 + Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., + drop_path_rate=0., zero_init_last_bn=True): + super().__init__() + # TODO add drop block, drop path, anti-aliasing, custom bn/act args + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + # Construct the stem + stem_width = cfg['stem_width'] + self.stem = ConvBnAct(in_chans, stem_width, 3, stride=2) + self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')] + + # Construct the stages + prev_width = stem_width + curr_stride = 2 + stage_params = self._get_stage_params(cfg, output_stride=output_stride, drop_path_rate=drop_path_rate) + se_ratio = cfg['se_ratio'] + for i, stage_args in enumerate(stage_params): + stage_name = "s{}".format(i + 1) + self.add_module(stage_name, RegStage(prev_width, **stage_args, se_ratio=se_ratio)) + prev_width = stage_args['out_chs'] + curr_stride *= stage_args['stride'] + self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)] + + # Construct the head + self.num_features = prev_width + self.head = ClassifierHead( + in_chs=prev_width, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, mean=0.0, std=0.01) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def _get_stage_params(self, cfg, default_stride=2, output_stride=32, drop_path_rate=0.): + # Generate RegNet ws per block + w_a, w_0, w_m, d = cfg['wa'], cfg['w0'], cfg['wm'], cfg['depth'] + widths, num_stages, _, _ = generate_regnet(w_a, w_0, w_m, d) + + # Convert to per stage format + stage_widths, stage_depths = np.unique(widths, return_counts=True) + + # Use the same group width, bottleneck mult and stride for each stage + stage_groups = [cfg['group_w'] for _ in range(num_stages)] + stage_bottle_ratios = [cfg['bottle_ratio'] for _ in range(num_stages)] + stage_strides = [] + stage_dilations = [] + net_stride = 2 + dilation = 1 + for _ in range(num_stages): + if net_stride >= output_stride: + dilation *= default_stride + stride = 1 + else: + stride = default_stride + net_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + stage_dpr = np.split(np.linspace(0, drop_path_rate, d), np.cumsum(stage_depths[:-1])) + + # Adjust the compatibility of ws and gws + stage_widths, stage_groups = adjust_widths_groups_comp(stage_widths, stage_bottle_ratios, stage_groups) + param_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_width', 'drop_path_rates'] + stage_params = [ + dict(zip(param_names, params)) for params in + zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_bottle_ratios, stage_groups, + stage_dpr)] + return stage_params + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + for block in list(self.children())[:-1]: + x = block(x) + return x + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if 'model' in state_dict: + # For DeiT trained regnety_160 pretraiend model + state_dict = state_dict['model'] + return state_dict + + +def _create_regnet(variant, pretrained, **kwargs): + return build_model_with_cfg( + RegNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + pretrained_filter_fn=_filter_fn, + **kwargs) + + +@register_model +def regnetx_002(pretrained=False, **kwargs): + """RegNetX-200MF""" + return _create_regnet('regnetx_002', pretrained, **kwargs) + + +@register_model +def regnetx_004(pretrained=False, **kwargs): + """RegNetX-400MF""" + return _create_regnet('regnetx_004', pretrained, **kwargs) + + +@register_model +def regnetx_006(pretrained=False, **kwargs): + """RegNetX-600MF""" + return _create_regnet('regnetx_006', pretrained, **kwargs) + + +@register_model +def regnetx_008(pretrained=False, **kwargs): + """RegNetX-800MF""" + return _create_regnet('regnetx_008', pretrained, **kwargs) + + +@register_model +def regnetx_016(pretrained=False, **kwargs): + """RegNetX-1.6GF""" + return _create_regnet('regnetx_016', pretrained, **kwargs) + + +@register_model +def regnetx_032(pretrained=False, **kwargs): + """RegNetX-3.2GF""" + return _create_regnet('regnetx_032', pretrained, **kwargs) + + +@register_model +def regnetx_040(pretrained=False, **kwargs): + """RegNetX-4.0GF""" + return _create_regnet('regnetx_040', pretrained, **kwargs) + + +@register_model +def regnetx_064(pretrained=False, **kwargs): + """RegNetX-6.4GF""" + return _create_regnet('regnetx_064', pretrained, **kwargs) + + +@register_model +def regnetx_080(pretrained=False, **kwargs): + """RegNetX-8.0GF""" + return _create_regnet('regnetx_080', pretrained, **kwargs) + + +@register_model +def regnetx_120(pretrained=False, **kwargs): + """RegNetX-12GF""" + return _create_regnet('regnetx_120', pretrained, **kwargs) + + +@register_model +def regnetx_160(pretrained=False, **kwargs): + """RegNetX-16GF""" + return _create_regnet('regnetx_160', pretrained, **kwargs) + + +@register_model +def regnetx_320(pretrained=False, **kwargs): + """RegNetX-32GF""" + return _create_regnet('regnetx_320', pretrained, **kwargs) + + +@register_model +def regnety_002(pretrained=False, **kwargs): + """RegNetY-200MF""" + return _create_regnet('regnety_002', pretrained, **kwargs) + + +@register_model +def regnety_004(pretrained=False, **kwargs): + """RegNetY-400MF""" + return _create_regnet('regnety_004', pretrained, **kwargs) + + +@register_model +def regnety_006(pretrained=False, **kwargs): + """RegNetY-600MF""" + return _create_regnet('regnety_006', pretrained, **kwargs) + + +@register_model +def regnety_008(pretrained=False, **kwargs): + """RegNetY-800MF""" + return _create_regnet('regnety_008', pretrained, **kwargs) + + +@register_model +def regnety_016(pretrained=False, **kwargs): + """RegNetY-1.6GF""" + return _create_regnet('regnety_016', pretrained, **kwargs) + + +@register_model +def regnety_032(pretrained=False, **kwargs): + """RegNetY-3.2GF""" + return _create_regnet('regnety_032', pretrained, **kwargs) + + +@register_model +def regnety_040(pretrained=False, **kwargs): + """RegNetY-4.0GF""" + return _create_regnet('regnety_040', pretrained, **kwargs) + + +@register_model +def regnety_064(pretrained=False, **kwargs): + """RegNetY-6.4GF""" + return _create_regnet('regnety_064', pretrained, **kwargs) + + +@register_model +def regnety_080(pretrained=False, **kwargs): + """RegNetY-8.0GF""" + return _create_regnet('regnety_080', pretrained, **kwargs) + + +@register_model +def regnety_120(pretrained=False, **kwargs): + """RegNetY-12GF""" + return _create_regnet('regnety_120', pretrained, **kwargs) + + +@register_model +def regnety_160(pretrained=False, **kwargs): + """RegNetY-16GF""" + return _create_regnet('regnety_160', pretrained, **kwargs) + + +@register_model +def regnety_320(pretrained=False, **kwargs): + """RegNetY-32GF""" + return _create_regnet('regnety_320', pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/res2net.py b/testbed/huggingface__pytorch-image-models/timm/models/res2net.py new file mode 100644 index 0000000000000000000000000000000000000000..282baba3b04f7805b16ffeaef55dd2b19b434f0c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/res2net.py @@ -0,0 +1,216 @@ +""" Res2Net and Res2NeXt +Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ +Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .resnet import ResNet + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'res2net50_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth'), + 'res2net50_48w_2s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth'), + 'res2net50_14w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth'), + 'res2net50_26w_6s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth'), + 'res2net50_26w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth'), + 'res2net101_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth'), + 'res2next50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth'), +} + + +class Bottle2neck(nn.Module): + """ Res2Net/Res2NeXT Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py + """ + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_): + super(Bottle2neck, self).__init__() + self.scale = scale + self.is_first = stride > 1 or downsample is not None + self.num_scales = max(1, scale - 1) + width = int(math.floor(planes * (base_width / 64.0))) * cardinality + self.width = width + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) + self.bn1 = norm_layer(width * scale) + + convs = [] + bns = [] + for i in range(self.num_scales): + convs.append(nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False)) + bns.append(norm_layer(width)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + # FIXME this should probably have count_include_pad=False, but hurts original weights + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + else: + self.pool = None + + self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + self.se = attn_layer(outplanes) if attn_layer is not None else None + + self.relu = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + sp = spx[0] # redundant, for torchscript + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + if i == 0 or self.is_first: + sp = spx[i] + else: + sp = sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + if self.pool is not None: + # self.is_first == True, None check for torchscript + spo.append(self.pool(spx[-1])) + else: + spo.append(spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + if self.se is not None: + out = self.se(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.relu(out) + + return out + + +def _create_res2net(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def res2net50_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net50_26w_4s', pretrained, **model_args) + + +@register_model +def res2net101_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-101 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net101_26w_4s', pretrained, **model_args) + + +@register_model +def res2net50_26w_6s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w6s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6), **kwargs) + return _create_res2net('res2net50_26w_6s', pretrained, **model_args) + + +@register_model +def res2net50_26w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_26w_8s', pretrained, **model_args) + + +@register_model +def res2net50_48w_2s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 48w2s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2), **kwargs) + return _create_res2net('res2net50_48w_2s', pretrained, **model_args) + + +@register_model +def res2net50_14w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 14w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_14w_8s', pretrained, **model_args) + + +@register_model +def res2next50(pretrained=False, **kwargs): + """Construct Res2NeXt-50 4s + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2next50', pretrained, **model_args) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/resnest.py b/testbed/huggingface__pytorch-image-models/timm/models/resnest.py new file mode 100644 index 0000000000000000000000000000000000000000..31eebd8092a75e949a7592833f00f05c0a5a9be7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/resnest.py @@ -0,0 +1,237 @@ +""" ResNeSt Models + +Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang + +Modified for torchscript compat, and consistency with timm by Ross Wightman +""" +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SplitAttn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1.0', 'classifier': 'fc', + **kwargs + } + +default_cfgs = { + 'resnest14d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth'), + 'resnest26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth'), + 'resnest50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth'), + 'resnest101e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'resnest200e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest200-75117900.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), + 'resnest269e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth', + input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), + 'resnest50d_4s2x40d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth', + interpolation='bicubic'), + 'resnest50d_1s4x24d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth', + interpolation='bicubic') +} + + +class ResNestBottleneck(nn.Module): + """ResNet Bottleneck + """ + # pylint: disable=unused-argument + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResNestBottleneck, self).__init__() + assert reduce_first == 1 # not supported + assert attn_layer is None # not supported + assert aa_layer is None # TODO not yet supported + assert drop_path is None # TODO not yet supported + + group_width = int(planes * (base_width / 64.)) * cardinality + first_dilation = first_dilation or dilation + if avd and (stride > 1 or is_first): + avd_stride = stride + stride = 1 + else: + avd_stride = 0 + self.radix = radix + self.drop_block = drop_block + + self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) + self.bn1 = norm_layer(group_width) + self.act1 = act_layer(inplace=True) + self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None + + if self.radix >= 1: + self.conv2 = SplitAttn( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_block=drop_block) + self.bn2 = nn.Identity() + self.act2 = nn.Identity() + else: + self.conv2 = nn.Conv2d( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(group_width) + self.act2 = act_layer(inplace=True) + self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None + + self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) + self.bn3 = norm_layer(planes*4) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + if self.drop_block is not None: + out = self.drop_block(out) + out = self.act1(out) + + if self.avd_first is not None: + out = self.avd_first(out) + + out = self.conv2(out) + out = self.bn2(out) + if self.drop_block is not None: + out = self.drop_block(out) + out = self.act2(out) + + if self.avd_last is not None: + out = self.avd_last(out) + + out = self.conv3(out) + out = self.bn3(out) + if self.drop_block is not None: + out = self.drop_block(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.act3(out) + return out + + +def _create_resnest(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def resnest14d(pretrained=False, **kwargs): + """ ResNeSt-14d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[1, 1, 1, 1], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest14d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest26d(pretrained=False, **kwargs): + """ ResNeSt-26d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[2, 2, 2, 2], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest26d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d(pretrained=False, **kwargs): + """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest50d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest101e(pretrained=False, **kwargs): + """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 23, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest101e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest200e(pretrained=False, **kwargs): + """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 24, 36, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest200e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest269e(pretrained=False, **kwargs): + """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 30, 48, 8], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest269e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_4s2x40d(pretrained=False, **kwargs): + """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, + block_args=dict(radix=4, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_1s4x24d(pretrained=False, **kwargs): + """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, + block_args=dict(radix=1, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **model_kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/resnet.py b/testbed/huggingface__pytorch-image-models/timm/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..dad42f38a064068c48f401622ab13a8a28ffdf57 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/resnet.py @@ -0,0 +1,1455 @@ +"""PyTorch ResNet + +This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with +additional dropout and dynamic global avg/max pool. + +ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman +Copyright 2020 Ross Wightman +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, create_attn, get_attn, create_classifier +from .registry import register_model + +__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # ResNet and Wide ResNet + 'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'), + 'resnet18d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), + 'resnet34d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth', + interpolation='bicubic'), + 'resnet26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), + 'resnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth', + interpolation='bicubic'), + 'resnet50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet50t': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet101': _cfg(url='', interpolation='bicubic'), + 'resnet101d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet152': _cfg(url='', interpolation='bicubic'), + 'resnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet200': _cfg(url='', interpolation='bicubic'), + 'resnet200d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'), + 'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'), + 'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'), + 'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'), + 'wide_resnet50_2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth', + interpolation='bicubic'), + 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'), + + # ResNeXt + 'resnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth', + interpolation='bicubic'), + 'resnext50d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'resnext101_32x4d': _cfg(url=''), + 'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'), + 'resnext101_64x4d': _cfg(url=''), + 'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'), + + # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags + # from https://github.com/facebookresearch/WSL-Images + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'), + 'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'), + 'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'), + 'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'), + + # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ssl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'), + 'ssl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'), + 'ssl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'), + 'ssl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'), + 'ssl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'), + 'ssl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'), + + # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'swsl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'), + 'swsl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'), + 'swsl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'), + 'swsl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'), + 'swsl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'), + 'swsl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'), + + # Squeeze-Excitation ResNets, to eventually replace the models in senet.py + 'seresnet18': _cfg( + url='', + interpolation='bicubic'), + 'seresnet34': _cfg( + url='', + interpolation='bicubic'), + 'seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth', + interpolation='bicubic'), + 'seresnet50t': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnet101': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320) + ), + 'seresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'seresnet269d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + + + # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py + 'seresnext26d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext26t_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth', + interpolation='bicubic'), + 'seresnext101_32x4d': _cfg( + url='', + interpolation='bicubic'), + 'seresnext101_32x8d': _cfg( + url='', + interpolation='bicubic'), + 'senet154': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + + # Efficient Channel Attention ResNets + 'ecaresnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnetlight': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth', + interpolation='bicubic'), + 'ecaresnet50d': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50d_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnet101d': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnet101d_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'ecaresnet269d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), + crop_pct=1.0, test_input_size=(3, 352, 352)), + + # Efficient Channel Attention ResNeXts + 'ecaresnext26t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnext50t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + + # ResNets with anti-aliasing blur pool + 'resnetblur18': _cfg( + interpolation='bicubic'), + 'resnetblur50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth', + interpolation='bicubic'), + + # ResNet-RS models + 'resnetrs50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', + input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs200': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs200_ema-623d2f59.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs270': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs350': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs420': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), + interpolation='bicubic', first_conv='conv1.0'), +} + + +def get_padding(kernel_size, stride, dilation=1): + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(BasicBlock, self).__init__() + + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock does not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d( + inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, + dilation=first_dilation, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + self.aa = aa_layer(channels=first_planes, stride=stride) if use_aa else None + + self.conv2 = nn.Conv2d( + first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) + self.bn2 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act2 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn2.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + if self.aa is not None: + x = self.aa(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act2(x) + + return x + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + + self.conv2 = nn.Conv2d( + first_planes, width, kernel_size=3, stride=1 if use_aa else stride, + padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(width) + self.act2 = act_layer(inplace=True) + self.aa = aa_layer(channels=width, stride=stride) if use_aa else None + + self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act2(x) + if self.aa is not None: + x = self.aa(x) + + x = self.conv3(x) + x = self.bn3(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + + return x + + +def downsample_conv( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 + p = get_padding(kernel_size, stride, first_dilation) + + return nn.Sequential(*[ + nn.Conv2d( + in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), + norm_layer(out_channels) + ]) + + +def downsample_avg( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + if stride == 1 and dilation == 1: + pool = nn.Identity() + else: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + + return nn.Sequential(*[ + pool, + nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), + norm_layer(out_channels) + ]) + + +def drop_blocks(drop_block_rate=0.): + return [ + None, None, + DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None, + DropBlock2d(drop_block_rate, 3, 1.00) if drop_block_rate else None] + + +def make_blocks( + block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32, + down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs): + stages = [] + feature_info = [] + net_num_blocks = sum(block_repeats) + net_block_idx = 0 + net_stride = 4 + dilation = prev_dilation = 1 + for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))): + stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it + stride = 1 if stage_idx == 0 else 2 + if net_stride >= output_stride: + dilation *= stride + stride = 1 + else: + net_stride *= stride + + downsample = None + if stride != 1 or inplanes != planes * block_fn.expansion: + down_kwargs = dict( + in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, + stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer')) + downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) + + block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) + blocks = [] + for block_idx in range(num_blocks): + downsample = downsample if block_idx == 0 else None + stride = stride if block_idx == 0 else 1 + block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule + blocks.append(block_fn( + inplanes, planes, stride, downsample, first_dilation=prev_dilation, + drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs)) + prev_dilation = dilation + inplanes = planes * block_fn.expansion + net_block_idx += 1 + + stages.append((stage_name, nn.Sequential(*blocks))) + feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) + + return stages, feature_info + + +class ResNet(nn.Module): + """ResNet / ResNeXt / SE-ResNeXt / SE-Net + + This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that + * have > 1 stride in the 3x3 conv layer of bottleneck + * have conv-bn-act ordering + + This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s + variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the + 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. + + ResNet variants (the same modifications can be used in SE/ResNeXt models as well): + * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b + * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) + * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample + * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample + * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) + * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample + * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample + + ResNeXt + * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths + * same c,d, e, s variants as ResNet can be enabled + + SE-ResNeXt + * normal - 7x7 stem, stem_width = 64 + * same c, d, e, s variants as ResNet can be enabled + + SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, + reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block + + Parameters + ---------- + block : Block + Class for the residual block. Options are BasicBlockGl, BottleneckGl. + layers : list of int + Numbers of layers in each block + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + cardinality : int, default 1 + Number of convolution groups for 3x3 conv in Bottleneck. + base_width : int, default 64 + Factor determining bottleneck channels. `planes * base_width / 64 * cardinality` + stem_width : int, default 64 + Number of channels in stem convolutions + stem_type : str, default '' + The type of stem: + * '', default - a single 7x7 conv with a width of stem_width + * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 + * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 + block_reduce_first: int, default 1 + Reduction factor for first convolution output width of residual blocks, + 1 for all archs except senets, where 2 + down_kernel_size: int, default 1 + Kernel size of residual block downsampling path, 1x1 for most archs, 3x3 for senets + avg_down : bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + output_stride : int, default 32 + Set the output stride of the network, 32, 16, or 8. Typically used in segmentation. + act_layer : nn.Module, activation layer + norm_layer : nn.Module, normalization layer + aa_layer : nn.Module, anti-aliasing layer + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, block, layers, num_classes=1000, in_chans=3, + cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False, + output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0., + drop_block_rate=0., global_pool='avg', zero_init_last_bn=True, block_args=None): + block_args = block_args or dict() + assert output_stride in (8, 16, 32) + self.num_classes = num_classes + self.drop_rate = drop_rate + super(ResNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type + inplanes = stem_width * 2 if deep_stem else 64 + if deep_stem: + stem_chs = (stem_width, stem_width) + if 'tiered' in stem_type: + stem_chs = (3 * (stem_width // 4), stem_width) + self.conv1 = nn.Sequential(*[ + nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), + norm_layer(stem_chs[0]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), + norm_layer(stem_chs[1]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) + else: + self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = norm_layer(inplanes) + self.act1 = act_layer(inplace=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] + + # Stem Pooling + if replace_stem_pool: + self.maxpool = nn.Sequential(*filter(None, [ + nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), + aa_layer(channels=inplanes, stride=2) if aa_layer else None, + norm_layer(inplanes), + act_layer(inplace=True) + ])) + else: + if aa_layer is not None: + self.maxpool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=inplanes, stride=2)]) + else: + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # Feature Blocks + channels = [64, 128, 256, 512] + stage_modules, stage_feature_info = make_blocks( + block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, + output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, + down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, + drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args) + for stage in stage_modules: + self.add_module(*stage) # layer1, layer2, etc + self.feature_info.extend(stage_feature_info) + + # Head (Pooling and Classifier) + self.num_features = 512 * block.expansion + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(zero_init_last_bn=zero_init_last_bn) + + def init_weights(self, zero_init_last_bn=True): + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + x = self.fc(x) + return x + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet18', pretrained, **model_args) + + +@register_model +def resnet18d(pretrained=False, **kwargs): + """Constructs a ResNet-18-D model. + """ + model_args = dict( + block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet18d', pretrained, **model_args) + + +@register_model +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet34', pretrained, **model_args) + + +@register_model +def resnet34d(pretrained=False, **kwargs): + """Constructs a ResNet-34-D model. + """ + model_args = dict( + block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet34d', pretrained, **model_args) + + +@register_model +def resnet26(pretrained=False, **kwargs): + """Constructs a ResNet-26 model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet26', pretrained, **model_args) + + +@register_model +def resnet26t(pretrained=False, **kwargs): + """Constructs a ResNet-26-T model. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet26t', pretrained, **model_args) + + +@register_model +def resnet26d(pretrained=False, **kwargs): + """Constructs a ResNet-26-D model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet26d', pretrained, **model_args) + + +@register_model +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet50', pretrained, **model_args) + + +@register_model +def resnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet50d', pretrained, **model_args) + + +@register_model +def resnet50t(pretrained=False, **kwargs): + """Constructs a ResNet-50-T model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet50t', pretrained, **model_args) + + +@register_model +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('resnet101', pretrained, **model_args) + + +@register_model +def resnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet101d', pretrained, **model_args) + + +@register_model +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('resnet152', pretrained, **model_args) + + +@register_model +def resnet152d(pretrained=False, **kwargs): + """Constructs a ResNet-152-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet152d', pretrained, **model_args) + + +@register_model +def resnet200(pretrained=False, **kwargs): + """Constructs a ResNet-200 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs) + return _create_resnet('resnet200', pretrained, **model_args) + + +@register_model +def resnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet200d', pretrained, **model_args) + + +@register_model +def tv_resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model with original Torchvision weights. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet34', pretrained, **model_args) + + +@register_model +def tv_resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet50', pretrained, **model_args) + + +@register_model +def tv_resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('tv_resnet101', pretrained, **model_args) + + +@register_model +def tv_resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('tv_resnet152', pretrained, **model_args) + + +@register_model +def wide_resnet50_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-50-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet50_2', pretrained, **model_args) + + +@register_model +def wide_resnet101_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-101-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet101_2', pretrained, **model_args) + + +@register_model +def resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext50_32x4d', pretrained, **model_args) + + +@register_model +def resnext50d_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnext50d_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext101_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x8d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x8d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('resnext101_32x8d', pretrained, **model_args) + + +@register_model +def resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt101-64x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('resnext101_64x4d', pretrained, **model_args) + + +@register_model +def tv_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x32d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs) + return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x48d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs) + return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args) + + +@register_model +def ssl_resnet18(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('ssl_resnet18', pretrained, **model_args) + + +@register_model +def ssl_resnet50(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('ssl_resnet50', pretrained, **model_args) + + +@register_model +def ssl_resnext50_32x4d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x4d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def swsl_resnet18(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('swsl_resnet18', pretrained, **model_args) + + +@register_model +def swsl_resnet50(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('swsl_resnet50', pretrained, **model_args) + + +@register_model +def swsl_resnext50_32x4d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x4d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ecaresnet26t(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet26t', pretrained, **model_args) + + +@register_model +def ecaresnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d', pretrained, **model_args) + + +@register_model +def resnetrs50(pretrained=False, **kwargs): + """Constructs a ResNet-RS-50 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs50', pretrained, **model_args) + + +@register_model +def resnetrs101(pretrained=False, **kwargs): + """Constructs a ResNet-RS-101 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs101', pretrained, **model_args) + + +@register_model +def resnetrs152(pretrained=False, **kwargs): + """Constructs a ResNet-RS-152 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs152', pretrained, **model_args) + + +@register_model +def resnetrs200(pretrained=False, **kwargs): + """Constructs a ResNet-RS-200 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs200', pretrained, **model_args) + + +@register_model +def resnetrs270(pretrained=False, **kwargs): + """Constructs a ResNet-RS-270 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs270', pretrained, **model_args) + + + +@register_model +def resnetrs350(pretrained=False, **kwargs): + """Constructs a ResNet-RS-350 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs350', pretrained, **model_args) + + +@register_model +def resnetrs420(pretrained=False, **kwargs): + """Constructs a ResNet-RS-420 model + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs420', pretrained, **model_args) + + +@register_model +def ecaresnet50d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet50t(pretrained=False, **kwargs): + """Constructs an ECA-ResNet-50-T model. + Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50t', pretrained, **model_args) + + +@register_model +def ecaresnetlight(pretrained=False, **kwargs): + """Constructs a ResNet-50-D light model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnetlight', pretrained, **model_args) + + +@register_model +def ecaresnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d', pretrained, **model_args) + + +@register_model +def ecaresnet101d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet200d', pretrained, **model_args) + + +@register_model +def ecaresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet269d', pretrained, **model_args) + + +@register_model +def ecaresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def ecaresnext50t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-50-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args) + + +@register_model +def resnetblur18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model with blur anti-aliasing + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur18', pretrained, **model_args) + + +@register_model +def resnetblur50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with blur anti-aliasing + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur50', pretrained, **model_args) + + +@register_model +def seresnet18(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet18', pretrained, **model_args) + + +@register_model +def seresnet34(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet34', pretrained, **model_args) + + +@register_model +def seresnet50(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50', pretrained, **model_args) + + +@register_model +def seresnet50t(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50t', pretrained, **model_args) + + +@register_model +def seresnet101(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet101', pretrained, **model_args) + + +@register_model +def seresnet152(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152', pretrained, **model_args) + + +@register_model +def seresnet152d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152d', pretrained, **model_args) + + +@register_model +def seresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet200d', pretrained, **model_args) + + +@register_model +def seresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet269d', pretrained, **model_args) + + +@register_model +def seresnext26d_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-D model.` + This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for + combination of deep stem and avg_pool in downsample. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26d_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNet-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26tn_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-T model. + NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note + so keeping this def for backwards compat with any uses out there. Old 't' model is lost. + """ + return seresnext26t_32x4d(pretrained=pretrained, **kwargs) + + +@register_model +def seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x8d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x8d', pretrained, **model_args) + + +@register_model +def senet154(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('senet154', pretrained, **model_args) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/resnetv2.py b/testbed/huggingface__pytorch-image-models/timm/models/resnetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff4da8c41f0de88e64a6404fd5fada8a5545957 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/resnetv2.py @@ -0,0 +1,655 @@ +"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization. + +A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfoer (BiT) source code +at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have +been included here as pretrained models from their original .NPZ checkpoints. + +Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and +extra padding support to allow porting of official Hybrid ResNet pretrained weights from +https://github.com/google-research/vision_transformer + +Thanks to the Google team for the above two repositories and associated papers: +* Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370 +* An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929 +* Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + +Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020. +""" +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict # pylint: disable=g-importing-member + +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv +from .registry import register_model +from .layers import GroupNormAct, BatchNormAct2d, EvoNormBatch2d, EvoNormSample2d,\ + ClassifierHead, DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # pretrained on imagenet21k, finetuned on imagenet1k + 'resnetv2_50x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_50x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x2_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x4_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz', + input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0), # only one at 480x480? + + # trained on imagenet-21k + 'resnetv2_50x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1.npz', + num_classes=21843), + 'resnetv2_50x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3.npz', + num_classes=21843), + 'resnetv2_101x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1.npz', + num_classes=21843), + 'resnetv2_101x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3.npz', + num_classes=21843), + 'resnetv2_152x2_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2.npz', + num_classes=21843), + 'resnetv2_152x4_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4.npz', + num_classes=21843), + + 'resnetv2_50x1_bit_distilled': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R50x1_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher_384': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_384.npz', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic'), + + 'resnetv2_50': _cfg( + interpolation='bicubic'), + 'resnetv2_50d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50t': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_101': _cfg( + interpolation='bicubic'), + 'resnetv2_101d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_152': _cfg( + interpolation='bicubic'), + 'resnetv2_152d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), +} + + +def make_div(v, divisor=8): + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + + Follows the implementation of "Identity Mappings in Deep Residual Networks": + https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua + + Except it puts the stride on 3x3 conv when available. + """ + + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.norm1 = norm_layer(in_chs) + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm2 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm3 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.weight) + + def forward(self, x): + x_preact = self.norm1(x) + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x_preact) + + # residual branch + x = self.conv1(x_preact) + x = self.conv2(self.norm2(x)) + x = self.conv3(self.norm3(x)) + x = self.drop_path(x) + return x + shortcut + + +class Bottleneck(nn.Module): + """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT. + """ + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + act_layer = act_layer or nn.ReLU + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, preact=False, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm1 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm2 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.norm3 = norm_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.act3 = act_layer(inplace=True) + + def zero_init_last(self): + nn.init.zeros_(self.norm3.weight) + + def forward(self, x): + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + # residual + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.conv3(x) + x = self.norm3(x) + x = self.drop_path(x) + x = self.act3(x + shortcut) + return x + + +class DownsampleConv(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, + conv_layer=None, norm_layer=None): + super(DownsampleConv, self).__init__() + self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(x)) + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, + preact=True, conv_layer=None, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(self.pool(x))) + + +class ResNetStage(nn.Module): + """ResNet Stage.""" + def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio=0.25, groups=1, + avg_down=False, block_dpr=None, block_fn=PreActBottleneck, + act_layer=None, conv_layer=None, norm_layer=None, **block_kwargs): + super(ResNetStage, self).__init__() + first_dilation = 1 if dilation in (1, 2) else 2 + layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) + proj_layer = DownsampleAvg if avg_down else DownsampleConv + prev_chs = in_chs + self.blocks = nn.Sequential() + for block_idx in range(depth): + drop_path_rate = block_dpr[block_idx] if block_dpr else 0. + stride = stride if block_idx == 0 else 1 + self.blocks.add_module(str(block_idx), block_fn( + prev_chs, out_chs, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, + first_dilation=first_dilation, proj_layer=proj_layer, drop_path_rate=drop_path_rate, + **layer_kwargs, **block_kwargs)) + prev_chs = out_chs + first_dilation = dilation + proj_layer = None + + def forward(self, x): + x = self.blocks(x) + return x + + +def is_stem_deep(stem_type): + return any([s in stem_type for s in ('deep', 'tiered')]) + + +def create_resnetv2_stem( + in_chs, out_chs=64, stem_type='', preact=True, + conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32)): + stem = OrderedDict() + assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') + + # NOTE conv padding mode can be changed by overriding the conv_layer def + if is_stem_deep(stem_type): + # A 3 deep 3x3 conv stack as in ResNet V1D models + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets + stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) + stem['norm1'] = norm_layer(stem_chs[0]) + stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) + stem['norm2'] = norm_layer(stem_chs[1]) + stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) + if not preact: + stem['norm3'] = norm_layer(out_chs) + else: + # The usual 7x7 stem conv + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + if not preact: + stem['norm'] = norm_layer(out_chs) + + if 'fixed' in stem_type: + # 'fixed' SAME padding approximation that is used in BiT models + stem['pad'] = nn.ConstantPad2d(1, 0.) + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + elif 'same' in stem_type: + # full, input size based 'SAME' padding, used in ViT Hybrid model + stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') + else: + # the usual PyTorch symmetric padding + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + return nn.Sequential(stem) + + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode. + """ + + def __init__( + self, layers, channels=(256, 512, 1024, 2048), + num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, + act_layer=nn.ReLU, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32), + drop_rate=0., drop_path_rate=0., zero_init_last=True): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + wf = width_factor + + self.feature_info = [] + stem_chs = make_div(stem_chs * wf) + self.stem = create_resnetv2_stem( + in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer) + stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) + + prev_chs = stem_chs + curr_stride = 4 + dilation = 1 + block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] + block_fn = PreActBottleneck if preact else Bottleneck + self.stages = nn.Sequential() + for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)): + out_chs = make_div(c * wf) + stride = 1 if stage_idx == 0 else 2 + if curr_stride >= output_stride: + dilation *= stride + stride = 1 + stage = ResNetStage( + prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, + act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn) + prev_chs = out_chs + curr_stride *= stride + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] + self.stages.add_module(str(stage_idx), stage) + + self.num_features = prev_chs + self.norm = norm_layer(self.num_features) if preact else nn.Identity() + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + self.init_weights(zero_init_last=zero_init_last) + + def init_weights(self, zero_init_last=True): + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix='resnet/'): + _load_weights(self, checkpoint_path, prefix) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str = '', zero_init_last=True): + if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +@torch.no_grad() +def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'): + import numpy as np + + def t2p(conv_weights): + """Possibly convert HWIO to OIHW.""" + if conv_weights.ndim == 4: + conv_weights = conv_weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(conv_weights) + + weights = np.load(checkpoint_path) + stem_conv_w = adapt_input_conv( + model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) + model.stem.conv.weight.copy_(stem_conv_w) + model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) + model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) + if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \ + model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: + model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) + model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) + for i, (sname, stage) in enumerate(model.stages.named_children()): + for j, (bname, block) in enumerate(stage.blocks.named_children()): + cname = 'standardized_conv2d' + block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' + block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) + block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) + block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) + block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) + block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) + block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) + block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) + block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) + block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) + if block.downsample is not None: + w = weights[f'{block_prefix}a/proj/{cname}/kernel'] + block.downsample.conv.weight.copy_(t2p(w)) + + +def _create_resnetv2(variant, pretrained=False, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ResNetV2, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=feature_cfg, + pretrained_custom_load=True, + **kwargs) + + +def _create_resnetv2_bit(variant, pretrained=False, **kwargs): + return _create_resnetv2( + variant, pretrained=pretrained, stem_type='fixed', conv_layer=partial(StdConv2d, eps=1e-8), **kwargs) + + +@register_model +def resnetv2_50x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x1_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bit_distilled(pretrained=False, **kwargs): + """ ResNetV2-50x1-BiT Distilled + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_50x1_bit_distilled', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher(pretrained=False, **kwargs): + """ ResNetV2-152x2-BiT Teacher + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher_384(pretrained=False, **kwargs): + """ ResNetV2-152xx-BiT Teacher @ 384x384 + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher_384', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_50(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_50d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50t(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50t', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='tiered', avg_down=True, **kwargs) + + +@register_model +def resnetv2_101(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_101d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101d', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_152(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_152d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152d', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +# @register_model +# def resnetv2_50ebd(pretrained=False, **kwargs): +# # FIXME for testing w/ TPU + PyTorch XLA +# return _create_resnetv2( +# 'resnetv2_50d', pretrained=pretrained, +# layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNormBatch2d, +# stem_type='deep', avg_down=True, **kwargs) +# +# +# @register_model +# def resnetv2_50esd(pretrained=False, **kwargs): +# # FIXME for testing w/ TPU + PyTorch XLA +# return _create_resnetv2( +# 'resnetv2_50d', pretrained=pretrained, +# layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNormSample2d, +# stem_type='deep', avg_down=True, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/rexnet.py b/testbed/huggingface__pytorch-image-models/timm/models/rexnet.py new file mode 100644 index 0000000000000000000000000000000000000000..279780beb6c5cf05d6d89073fbc5d99f1676eebf --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/rexnet.py @@ -0,0 +1,238 @@ +""" ReXNet + +A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - +https://arxiv.org/abs/2007.00992 + +Adapted from original impl at https://github.com/clovaai/rexnet +Copyright (c) 2020-present NAVER Corp. MIT license + +Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman +Copyright 2020 Ross Wightman +""" + +import torch.nn as nn +from functools import partial +from math import ceil + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, create_act_layer, ConvBnAct, DropPath, make_divisible, SEModule +from .registry import register_model +from .efficientnet_builder import efficientnet_init_weights + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + rexnet_100=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth'), + rexnet_130=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth'), + rexnet_150=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth'), + rexnet_200=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth'), + rexnetr_100=_cfg( + url=''), + rexnetr_130=_cfg( + url=''), + rexnetr_150=_cfg( + url=''), + rexnetr_200=_cfg( + url=''), +) + +SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) + + +class LinearBottleneck(nn.Module): + def __init__(self, in_chs, out_chs, stride, exp_ratio=1.0, se_ratio=0., ch_div=1, + act_layer='swish', dw_act_layer='relu6', drop_path=None): + super(LinearBottleneck, self).__init__() + self.use_shortcut = stride == 1 and in_chs <= out_chs + self.in_channels = in_chs + self.out_channels = out_chs + + if exp_ratio != 1.: + dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) + self.conv_exp = ConvBnAct(in_chs, dw_chs, act_layer=act_layer) + else: + dw_chs = in_chs + self.conv_exp = None + + self.conv_dw = ConvBnAct(dw_chs, dw_chs, 3, stride=stride, groups=dw_chs, apply_act=False) + if se_ratio > 0: + self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) + else: + self.se = None + self.act_dw = create_act_layer(dw_act_layer) + + self.conv_pwl = ConvBnAct(dw_chs, out_chs, 1, apply_act=False) + self.drop_path = drop_path + + def feat_channels(self, exp=False): + return self.conv_dw.out_channels if exp else self.out_channels + + def forward(self, x): + shortcut = x + if self.conv_exp is not None: + x = self.conv_exp(x) + x = self.conv_dw(x) + if self.se is not None: + x = self.se(x) + x = self.act_dw(x) + x = self.conv_pwl(x) + if self.use_shortcut: + if self.drop_path is not None: + x = self.drop_path(x) + x[:, 0:self.in_channels] += shortcut + return x + + +def _block_cfg(width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, se_ratio=0., ch_div=1): + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], []) + exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) + depth = sum(layers[:]) * 3 + base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs + + # The following channel configuration is a simple instance to make each layer become an expand layer. + out_chs_list = [] + for i in range(depth // 3): + out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) + base_chs += final_chs / (depth // 3 * 1.0) + + se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) + + return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) + + +def _build_blocks( + block_cfg, prev_chs, width_mult, ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_path_rate=0.): + feat_chs = [prev_chs] + feature_info = [] + curr_stride = 2 + features = [] + num_blocks = len(block_cfg) + for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg): + if stride > 1: + fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] + curr_stride *= stride + block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule + drop_path = DropPath(block_dpr) if block_dpr > 0. else None + features.append(LinearBottleneck( + in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, se_ratio=se_ratio, + ch_div=ch_div, act_layer=act_layer, dw_act_layer=dw_act_layer, drop_path=drop_path)) + prev_chs = chs + feat_chs += [features[-1].feat_channels()] + pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] + features.append(ConvBnAct(prev_chs, pen_chs, act_layer=act_layer)) + return features, feature_info + + +class ReXNetV1(nn.Module): + def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, + initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, se_ratio=1/12., + ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_rate=0.2, drop_path_rate=0.): + super(ReXNetV1, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + + assert output_stride == 32 # FIXME support dilation + stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 + stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) + self.stem = ConvBnAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) + + block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) + features, self.feature_info = _build_blocks( + block_cfg, stem_chs, width_mult, ch_div, act_layer, dw_act_layer, drop_path_rate) + self.num_features = features[-1].out_channels + self.features = nn.Sequential(*features) + + self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) + + efficientnet_init_weights(self) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_rexnet(variant, pretrained, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ReXNetV1, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def rexnet_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x""" + return _create_rexnet('rexnet_100', pretrained, **kwargs) + + +@register_model +def rexnet_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x""" + return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) + + +@register_model +def rexnet_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x""" + return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) + + +@register_model +def rexnet_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x""" + return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) + + +@register_model +def rexnetr_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) + + +@register_model +def rexnetr_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) + + +@register_model +def rexnetr_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) + + +@register_model +def rexnetr_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/selecsls.py b/testbed/huggingface__pytorch-image-models/timm/models/selecsls.py new file mode 100644 index 0000000000000000000000000000000000000000..1f3379db3da5e5303d9af2c78098b9a5424a5fde --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/selecsls.py @@ -0,0 +1,362 @@ +"""PyTorch SelecSLS Net example for ImageNet Classification +License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) +Author: Dushyant Mehta (@mehtadushy) + +SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D +Human Pose Estimation with a Single RGB Camera, Mehta et al." +https://arxiv.org/abs/1907.00837 + +Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models +and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SelecSLS'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'selecsls42': _cfg( + url='', + interpolation='bicubic'), + 'selecsls42b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth', + interpolation='bicubic'), + 'selecsls60': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth', + interpolation='bicubic'), + 'selecsls60b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth', + interpolation='bicubic'), + 'selecsls84': _cfg( + url='', + interpolation='bicubic'), +} + + +class SequentialList(nn.Sequential): + + def __init__(self, *args): + super(SequentialList, self).__init__(*args) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (List[torch.Tensor]) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (List[torch.Tensor]) + pass + + def forward(self, x) -> List[torch.Tensor]: + for module in self: + x = module(x) + return x + + +class SelectSeq(nn.Module): + def __init__(self, mode='index', index=0): + super(SelectSeq, self).__init__() + self.mode = mode + self.index = index + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor]) -> (torch.Tensor) + pass + + def forward(self, x) -> torch.Tensor: + if self.mode == 'index': + return x[self.index] + else: + return torch.cat(x, dim=1) + + +def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): + if padding is None: + padding = ((stride - 1) + dilation * (k - 1)) // 2 + return nn.Sequential( + nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(out_chs), + nn.ReLU(inplace=True) + ) + + +class SelecSLSBlock(nn.Module): + def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): + super(SelecSLSBlock, self).__init__() + self.stride = stride + self.is_first = is_first + assert stride in [1, 2] + + # Process input with 4 conv blocks with the same number of input and output channels + self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) + self.conv2 = conv_bn(mid_chs, mid_chs, 1) + self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) + self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + if not isinstance(x, list): + x = [x] + assert len(x) in [1, 2] + + d1 = self.conv1(x[0]) + d2 = self.conv3(self.conv2(d1)) + d3 = self.conv5(self.conv4(d2)) + if self.is_first: + out = self.conv6(torch.cat([d1, d2, d3], 1)) + return [out, out] + else: + return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] + + +class SelecSLS(nn.Module): + """SelecSLS42 / SelecSLS60 / SelecSLS84 + + Parameters + ---------- + cfg : network config dictionary specifying block type, feature, and head args + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(SelecSLS, self).__init__() + + self.stem = conv_bn(in_chans, 32, stride=2) + self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) + self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way + self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) + self.num_features = cfg['num_features'] + self.feature_info = cfg['feature_info'] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + x = self.head(self.from_seq(x)) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _create_selecsls(variant, pretrained, **kwargs): + cfg = {} + feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] + if variant.startswith('selecsls42'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 144, 144, True, 2), + (144, 144, 144, 288, False, 1), + (288, 0, 304, 304, True, 2), + (304, 304, 304, 480, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.3'), + dict(num_chs=480, reduction=16, module='features.5'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls42b': + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant.startswith('selecsls60'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 128, 128, True, 2), + (128, 128, 128, 128, False, 1), + (128, 128, 128, 288, False, 1), + (288, 0, 288, 288, True, 2), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 416, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.4'), + dict(num_chs=416, reduction=16, module='features.8'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls60b': + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant == 'selecsls84': + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 144, False, 1), + (144, 0, 144, 144, True, 2), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 304, False, 1), + (304, 0, 304, 304, True, 2), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 512, False, 1), + ] + feature_info.extend([ + dict(num_chs=144, reduction=4, module='features.1'), + dict(num_chs=304, reduction=8, module='features.6'), + dict(num_chs=512, reduction=16, module='features.12'), + ]) + # Head can be replaced with alternative configurations depending on the problem + cfg['head'] = [ + (512, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 3, 1), + ] + cfg['num_features'] = 1280 + feature_info.extend([ + dict(num_chs=1024, reduction=32, module='head.1'), + dict(num_chs=1280, reduction=64, module='head.3') + ]) + else: + raise ValueError('Invalid net configuration ' + variant + ' !!!') + cfg['feature_info'] = feature_info + + # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? + return build_model_with_cfg( + SelecSLS, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfg, + feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def selecsls42(pretrained=False, **kwargs): + """Constructs a SelecSLS42 model. + """ + return _create_selecsls('selecsls42', pretrained, **kwargs) + + +@register_model +def selecsls42b(pretrained=False, **kwargs): + """Constructs a SelecSLS42_B model. + """ + return _create_selecsls('selecsls42b', pretrained, **kwargs) + + +@register_model +def selecsls60(pretrained=False, **kwargs): + """Constructs a SelecSLS60 model. + """ + return _create_selecsls('selecsls60', pretrained, **kwargs) + + +@register_model +def selecsls60b(pretrained=False, **kwargs): + """Constructs a SelecSLS60_B model. + """ + return _create_selecsls('selecsls60b', pretrained, **kwargs) + + +@register_model +def selecsls84(pretrained=False, **kwargs): + """Constructs a SelecSLS84 model. + """ + return _create_selecsls('selecsls84', pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/senet.py b/testbed/huggingface__pytorch-image-models/timm/models/senet.py new file mode 100644 index 0000000000000000000000000000000000000000..3d0ba7b3ee573523523c3af574c835ccdf502a32 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/senet.py @@ -0,0 +1,467 @@ +""" +SEResNet implementation from Cadene's pretrained models +https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py +Additional credit to https://github.com/creafz + +Original model: https://github.com/hujie-frank/SENet + +ResNet code gently borrowed from +https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py + +FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate +support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. +""" +import math +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SENet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', + **kwargs + } + + +default_cfgs = { + 'legacy_senet154': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'), + 'legacy_seresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', + interpolation='bicubic'), + 'legacy_seresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), + 'legacy_seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), + 'legacy_seresnet101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), + 'legacy_seresnet152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), + 'legacy_seresnext26_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', + interpolation='bicubic'), + 'legacy_seresnext50_32x4d': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'), + 'legacy_seresnext101_32x4d': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'), +} + + +def _weight_init(m): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + module_input = x + x = x.mean((2, 3), keepdim=True) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class Bottleneck(nn.Module): + """ + Base class for bottlenecks that implements `forward()` method. + """ + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SEBottleneck(Bottleneck): + """ + Bottleneck for SENet154. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes * 2) + self.conv2 = nn.Conv2d( + planes * 2, planes * 4, kernel_size=3, stride=stride, + padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes * 4) + self.conv3 = nn.Conv2d( + planes * 4, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBottleneck(Bottleneck): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `stride=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEResNetBottleneck, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=1, bias=False, stride=stride) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNeXtBottleneck(Bottleneck): + """ + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None, base_width=4): + super(SEResNeXtBottleneck, self).__init__() + width = math.floor(planes * (base_width / 64)) * groups + self.conv1 = nn.Conv2d( + inplanes, width, kernel_size=1, bias=False, stride=1) + self.bn1 = nn.BatchNorm2d(width) + self.conv2 = nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(width) + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEResNetBlock, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes, reduction=reduction) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SENet(nn.Module): + + def __init__(self, block, layers, groups, reduction, drop_rate=0.2, + in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, + downsample_padding=0, num_classes=1000, global_pool='avg'): + """ + Parameters + ---------- + block (nn.Module): Bottleneck class. + - For SENet154: SEBottleneck + - For SE-ResNet models: SEResNetBottleneck + - For SE-ResNeXt models: SEResNeXtBottleneck + layers (list of ints): Number of residual blocks for 4 layers of the + network (layer1...layer4). + groups (int): Number of groups for the 3x3 convolution in each + bottleneck block. + - For SENet154: 64 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 32 + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. + - For all models: 16 + dropout_p (float or None): Drop probability for the Dropout layer. + If `None` the Dropout layer is not used. + - For SENet154: 0.2 + - For SE-ResNet models: None + - For SE-ResNeXt models: None + inplanes (int): Number of input channels for layer1. + - For SENet154: 128 + - For SE-ResNet models: 64 + - For SE-ResNeXt models: 64 + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of + a single 7x7 convolution in layer0. + - For SENet154: True + - For SE-ResNet models: False + - For SE-ResNeXt models: False + downsample_kernel_size (int): Kernel size for downsampling convolutions + in layer2, layer3 and layer4. + - For SENet154: 3 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 1 + downsample_padding (int): Padding for downsampling convolutions in + layer2, layer3 and layer4. + - For SENet154: 1 + - For SE-ResNet models: 0 + - For SE-ResNeXt models: 0 + num_classes (int): Number of outputs in `last_linear` layer. + - For all models: 1000 + """ + super(SENet, self).__init__() + self.inplanes = inplanes + self.num_classes = num_classes + self.drop_rate = drop_rate + if input_3x3: + layer0_modules = [ + ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), + ('bn1', nn.BatchNorm2d(64)), + ('relu1', nn.ReLU(inplace=True)), + ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), + ('bn2', nn.BatchNorm2d(64)), + ('relu2', nn.ReLU(inplace=True)), + ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), + ('bn3', nn.BatchNorm2d(inplanes)), + ('relu3', nn.ReLU(inplace=True)), + ] + else: + layer0_modules = [ + ('conv1', nn.Conv2d( + in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), + ('bn1', nn.BatchNorm2d(inplanes)), + ('relu1', nn.ReLU(inplace=True)), + ] + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) + # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. + self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] + self.layer1 = self._make_layer( + block, + planes=64, + blocks=layers[0], + groups=groups, + reduction=reduction, + downsample_kernel_size=1, + downsample_padding=0 + ) + self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] + self.layer2 = self._make_layer( + block, + planes=128, + blocks=layers[1], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] + self.layer3 = self._make_layer( + block, + planes=256, + blocks=layers[2], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] + self.layer4 = self._make_layer( + block, + planes=512, + blocks=layers[3], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] + self.num_features = 512 * block.expansion + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + _weight_init(m) + + def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, + downsample_kernel_size=1, downsample_padding=0): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, + stride=stride, padding=downsample_padding, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups, reduction)) + + return nn.Sequential(*layers) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.layer0(x) + x = self.pool0(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def logits(self, x): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.logits(x) + return x + + +def _create_senet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + SENet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def legacy_seresnet18(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet18', pretrained, **model_args) + + +@register_model +def legacy_seresnet34(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet34', pretrained, **model_args) + + +@register_model +def legacy_seresnet50(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet50', pretrained, **model_args) + + +@register_model +def legacy_seresnet101(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet101', pretrained, **model_args) + + +@register_model +def legacy_seresnet152(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet152', pretrained, **model_args) + + +@register_model +def legacy_senet154(pretrained=False, **kwargs): + model_args = dict( + block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, + downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) + return _create_senet('legacy_senet154', pretrained, **model_args) + + +@register_model +def legacy_seresnext26_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/sknet.py b/testbed/huggingface__pytorch-image-models/timm/models/sknet.py new file mode 100644 index 0000000000000000000000000000000000000000..4dc2aa534c1c9d27c7a988b72f9c4f5a1f172e95 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/sknet.py @@ -0,0 +1,215 @@ +""" Selective Kernel Networks (ResNet base) + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) +and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer +to the original paper with some modifications of my own to better balance param count vs accuracy. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +from torch import nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SelectiveKernel, ConvBnAct, create_attn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'skresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'), + 'skresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'), + 'skresnet50': _cfg(), + 'skresnet50d': _cfg( + first_conv='conv1.0'), + 'skresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'), +} + + +class SelectiveKernelBasic(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(SelectiveKernelBasic, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock doest not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = SelectiveKernel( + inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs) + conv_kwargs['act_layer'] = None + self.conv2 = ConvBnAct( + first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +class SelectiveKernelBottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, + drop_block=None, drop_path=None): + super(SelectiveKernelBottleneck, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = ConvBnAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) + self.conv2 = SelectiveKernel( + first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, + **conv_kwargs, **sk_kwargs) + conv_kwargs['act_layer'] = None + self.conv3 = ConvBnAct(width, outplanes, kernel_size=1, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +def _create_skresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def skresnet18(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-18 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet18', pretrained, **model_args) + + +@register_model +def skresnet34(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-34 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet34', pretrained, **model_args) + + +@register_model +def skresnet50(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet50', pretrained, **model_args) + + +@register_model +def skresnet50d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50-D model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet50d', pretrained, **model_args) + + +@register_model +def skresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to + the SKNet-50 model in the Select Kernel Paper + """ + sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnext50_32x4d', pretrained, **model_args) + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/swin_transformer.py b/testbed/huggingface__pytorch-image-models/timm/models/swin_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..2ee106d287c7e56c7ac509fcc03c6d1abdf89994 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/swin_transformer.py @@ -0,0 +1,652 @@ +""" Swin Transformer +A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` + - https://arxiv.org/pdf/2103.14030 + +Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below + +""" +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- +import logging +import math +from copy import deepcopy +from typing import Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_ +from .registry import register_model +from .vision_transformer import checkpoint_filter_fn, _init_vit_weights + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # patch models (my experiments) + 'swin_base_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_base_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth', + ), + + 'swin_large_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_large_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth', + ), + + 'swin_small_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth', + ), + + 'swin_tiny_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth', + ), + + 'swin_base_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_base_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', + num_classes=21841), + + 'swin_large_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_large_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth', + num_classes=21841), + +} + + +def window_partition(x, window_size: int): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size: int, H: int, W: int): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask: Optional[torch.Tensor] = None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock( + dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if not torch.jit.is_scripting() and self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + +class SwinTransformer(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, + embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), + window_size=7, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, weight_init='', **kwargs): + super().__init__() + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + self.patch_grid = self.patch_embed.grid_size + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + else: + self.absolute_pos_embed = None + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + layers = [] + for i_layer in range(self.num_layers): + layers += [BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + ] + self.layers = nn.Sequential(*layers) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1d(1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0. + if weight_init.startswith('jax'): + for n, m in self.named_modules(): + _init_vit_weights(m, n, head_bias=head_bias, jax_impl=True) + else: + self.apply(_init_vit_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.absolute_pos_embed is not None: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + x = self.layers(x) + x = self.norm(x) # B L C + x = self.avgpool(x.transpose(1, 2)) # B C 1 + x = torch.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs): + if default_cfg is None: + default_cfg = deepcopy(default_cfgs[variant]) + overlay_external_default_cfg(default_cfg, kwargs) + default_num_classes = default_cfg['num_classes'] + default_img_size = default_cfg['input_size'][-2:] + + num_classes = kwargs.pop('num_classes', default_num_classes) + img_size = kwargs.pop('img_size', default_img_size) + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + SwinTransformer, variant, pretrained, + default_cfg=default_cfg, + img_size=img_size, + num_classes=num_classes, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + + +@register_model +def swin_base_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_small_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-S @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_tiny_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-T @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-B @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-B @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-L @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-L @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/tnt.py b/testbed/huggingface__pytorch-image-models/timm/models/tnt.py new file mode 100644 index 0000000000000000000000000000000000000000..8186cc4aea0c53c5a6217e3cdd0b9193bb6d1359 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/tnt.py @@ -0,0 +1,268 @@ +""" Transformer in Transformer (TNT) in PyTorch + +A PyTorch implement of TNT as described in +'Transformer in Transformer' - https://arxiv.org/abs/2103.00112 + +The official mindspore code is released and available at +https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT +""" +import math +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.helpers import build_model_with_cfg +from timm.models.layers import Mlp, DropPath, trunc_normal_ +from timm.models.layers.helpers import to_2tuple +from timm.models.registry import register_model +from timm.models.vision_transformer import resize_pos_embed + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'pixel_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'tnt_s_patch16_224': _cfg( + url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), + 'tnt_b_patch16_224': _cfg( + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), +} + + +class Attention(nn.Module): + """ Multi-Head Attention + """ + def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.hidden_dim = hidden_dim + self.num_heads = num_heads + head_dim = hidden_dim // num_heads + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + + self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop, inplace=True) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop, inplace=True) + + def forward(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k = qk[0], qk[1] # make torchscript happy (cannot use tensor as tuple) + v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + """ TNT Block + """ + def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4., + qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + # Inner transformer + self.norm_in = norm_layer(in_dim) + self.attn_in = Attention( + in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.norm_mlp_in = norm_layer(in_dim) + self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4), + out_features=in_dim, act_layer=act_layer, drop=drop) + + self.norm1_proj = norm_layer(in_dim) + self.proj = nn.Linear(in_dim * num_pixel, dim, bias=True) + # Outer transformer + self.norm_out = norm_layer(dim) + self.attn_out = Attention( + dim, dim, num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm_mlp = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), + out_features=dim, act_layer=act_layer, drop=drop) + + def forward(self, pixel_embed, patch_embed): + # inner + pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed))) + pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))) + # outer + B, N, C = patch_embed.size() + patch_embed[:, 1:] = patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1)) + patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed))) + patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed))) + return pixel_embed, patch_embed + + +class PixelEmbed(nn.Module): + """ Image to Pixel Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # grid_size property necessary for resizing positional embedding + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + num_patches = (self.grid_size[0]) * (self.grid_size[1]) + self.img_size = img_size + self.num_patches = num_patches + self.in_dim = in_dim + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) + self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2) + return x + + +class TNT(nn.Module): + """ Transformer in Transformer - https://arxiv.org/abs/2103.00112 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12, + num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.pixel_embed = PixelEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = norm_layer(num_pixel * in_dim) + self.proj = nn.Linear(num_pixel * in_dim, embed_dim) + self.norm2_proj = norm_layer(embed_dim) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pixel_pos = nn.Parameter(torch.zeros(1, in_dim, new_patch_size[0], new_patch_size[1])) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + blocks = [] + for i in range(depth): + blocks.append(Block( + dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head, + mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[i], norm_layer=norm_layer)) + self.blocks = nn.ModuleList(blocks) + self.norm = norm_layer(embed_dim) + + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'patch_pos', 'pixel_pos', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.pos_drop(patch_embed) + + for blk in self.blocks: + pixel_embed, patch_embed = blk(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return patch_embed[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if state_dict['patch_pos'].shape != model.patch_pos.shape: + state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'], + model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size) + return state_dict + + +def _create_tnt(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + TNT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def tnt_s_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def tnt_b_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **model_cfg) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/models/tresnet.py b/testbed/huggingface__pytorch-image-models/timm/models/tresnet.py new file mode 100644 index 0000000000000000000000000000000000000000..372bfb7bc0ce89241121f8b85ea928f376af8bd5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/tresnet.py @@ -0,0 +1,297 @@ +""" +TResNet: High Performance GPU-Dedicated Architecture +https://arxiv.org/pdf/2003.13630.pdf + +Original model: https://github.com/mrT23/TResNet + +""" +from collections import OrderedDict + +import torch +import torch.nn as nn + +from .helpers import build_model_with_cfg +from .layers import SpaceToDepthModule, BlurPool2d, InplaceAbn, ClassifierHead, SEModule +from .registry import register_model + +__all__ = ['tresnet_m', 'tresnet_l', 'tresnet_xl'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': (0, 0, 0), 'std': (1, 1, 1), + 'first_conv': 'body.conv1.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'tresnet_m': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_1k_miil_83_1.pth'), + 'tresnet_m_miil_in21k': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_miil_in21k.pth', num_classes=11221), + 'tresnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth'), + 'tresnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth'), + 'tresnet_m_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth'), + 'tresnet_l_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth'), + 'tresnet_xl_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_448-8c1815de.pth') +} + + +def IABN2Float(module: nn.Module) -> nn.Module: + """If `module` is IABN don't use half precision.""" + if isinstance(module, InplaceAbn): + module.float() + for child in module.children(): + IABN2Float(child) + return module + + +def conv2d_iabn(ni, nf, stride, kernel_size=3, groups=1, act_layer="leaky_relu", act_param=1e-2): + return nn.Sequential( + nn.Conv2d( + ni, nf, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False), + InplaceAbn(nf, act_layer=act_layer, act_param=act_param) + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None): + super(BasicBlock, self).__init__() + if stride == 1: + self.conv1 = conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3) + else: + if aa_layer is None: + self.conv1 = conv2d_iabn(inplanes, planes, stride=2, act_param=1e-3) + else: + self.conv1 = nn.Sequential( + conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + self.conv2 = conv2d_iabn(planes, planes, stride=1, act_layer="identity") + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + rd_chs = max(planes * self.expansion // 4, 64) + self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + + if self.se is not None: + out = self.se(out) + + out += shortcut + out = self.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, + act_layer="leaky_relu", aa_layer=None): + super(Bottleneck, self).__init__() + self.conv1 = conv2d_iabn( + inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer, act_param=1e-3) + if stride == 1: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3) + else: + if aa_layer is None: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=2, act_layer=act_layer, act_param=1e-3) + else: + self.conv2 = nn.Sequential( + conv2d_iabn(planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + reduction_chs = max(planes * self.expansion // 8, 64) + self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None + + self.conv3 = conv2d_iabn( + planes, planes * self.expansion, kernel_size=1, stride=1, act_layer="identity") + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + if self.se is not None: + out = self.se(out) + + out = self.conv3(out) + out = out + shortcut # no inplace + out = self.relu(out) + + return out + + +class TResNet(nn.Module): + def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, global_pool='fast', drop_rate=0.): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(TResNet, self).__init__() + + aa_layer = BlurPool2d + + # TResnet stages + self.inplanes = int(64 * width_factor) + self.planes = int(64 * width_factor) + conv1 = conv2d_iabn(in_chans * 16, self.planes, stride=1, kernel_size=3) + layer1 = self._make_layer( + BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer) # 56x56 + layer2 = self._make_layer( + BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer) # 28x28 + layer3 = self._make_layer( + Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer) # 14x14 + layer4 = self._make_layer( + Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer) # 7x7 + + # body + self.body = nn.Sequential(OrderedDict([ + ('SpaceToDepth', SpaceToDepthModule()), + ('conv1', conv1), + ('layer1', layer1), + ('layer2', layer2), + ('layer3', layer3), + ('layer4', layer4)])) + + self.feature_info = [ + dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? + dict(num_chs=self.planes, reduction=4, module='body.layer1'), + dict(num_chs=self.planes * 2, reduction=8, module='body.layer2'), + dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), + dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), + ] + + # head + self.num_features = (self.planes * 8) * Bottleneck.expansion + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + # model initilization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') + elif isinstance(m, nn.BatchNorm2d) or isinstance(m, InplaceAbn): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # residual connections special initialization + for m in self.modules(): + if isinstance(m, BasicBlock): + m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight)) # BN to zero + if isinstance(m, Bottleneck): + m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight)) # BN to zero + if isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + + def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + layers = [] + if stride == 2: + # avg pooling before 1x1 conv + layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) + layers += [conv2d_iabn( + self.inplanes, planes * block.expansion, kernel_size=1, stride=1, act_layer="identity")] + downsample = nn.Sequential(*layers) + + layers = [] + layers.append(block( + self.inplanes, planes, stride, downsample, use_se=use_se, aa_layer=aa_layer)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block(self.inplanes, planes, use_se=use_se, aa_layer=aa_layer)) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='fast'): + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + return self.body(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_tresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + TResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def tresnet_m(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_miil_in21k(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_miil_in21k', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl_448', pretrained=pretrained, **model_kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/twins.py b/testbed/huggingface__pytorch-image-models/timm/models/twins.py new file mode 100644 index 0000000000000000000000000000000000000000..4aed09d90f4d832e798bc7dd39d3712cb20b966d --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/twins.py @@ -0,0 +1,422 @@ +""" Twins +A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers` + - https://arxiv.org/pdf/2104.13840.pdf + +Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below + +""" +# -------------------------------------------------------- +# Twins +# Copyright (c) 2021 Meituan +# Licensed under The Apache 2.0 License [see LICENSE for details] +# Written by Xinjie Li, Xiangxiang Chu +# -------------------------------------------------------- +import math +from copy import deepcopy +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import Mlp, DropPath, to_2tuple, trunc_normal_ +from .registry import register_model +from .vision_transformer import Attention +from .helpers import build_model_with_cfg, overlay_external_default_cfg + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'twins_pcpvt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth', + ), + 'twins_pcpvt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_base-e5ecb09b.pth', + ), + 'twins_pcpvt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_large-d273f802.pth', + ), + 'twins_svt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_small-42e5f78c.pth', + ), + 'twins_svt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_base-c2265010.pth', + ), + 'twins_svt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_large-90f6aaa9.pth', + ), +} + +Size_ = Tuple[int, int] + + +class LocallyGroupedAttn(nn.Module): + """ LSA: self attention within a group + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1): + assert ws != 1 + super(LocallyGroupedAttn, self).__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.ws = ws + + def forward(self, x, size: Size_): + # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for + # both. You can choose any one, we recommend forward_padding because it's neat. However, + # the masking implementation is more reasonable and accurate. + B, N, C = x.shape + H, W = size + x = x.view(B, H, W, C) + pad_l = pad_t = 0 + pad_r = (self.ws - W % self.ws) % self.ws + pad_b = (self.ws - H % self.ws) % self.ws + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + _h, _w = Hp // self.ws, Wp // self.ws + x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) + qkv = self.qkv(x).reshape( + B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + x = x.reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + # def forward_mask(self, x, size: Size_): + # B, N, C = x.shape + # H, W = size + # x = x.view(B, H, W, C) + # pad_l = pad_t = 0 + # pad_r = (self.ws - W % self.ws) % self.ws + # pad_b = (self.ws - H % self.ws) % self.ws + # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + # _, Hp, Wp, _ = x.shape + # _h, _w = Hp // self.ws, Wp // self.ws + # mask = torch.zeros((1, Hp, Wp), device=x.device) + # mask[:, -pad_b:, :].fill_(1) + # mask[:, :, -pad_r:].fill_(1) + # + # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C + # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws) + # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws + # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0)) + # qkv = self.qkv(x).reshape( + # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + # # n_h, B, _w*_h, nhead, ws*ws, dim + # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head + # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws + # attn = attn + attn_mask.unsqueeze(2) + # attn = attn.softmax(dim=-1) + # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head + # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + # if pad_r > 0 or pad_b > 0: + # x = x[:, :H, :W, :].contiguous() + # x = x.reshape(B, N, C) + # x = self.proj(x) + # x = self.proj_drop(x) + # return x + + +class GlobalSubSampleAttn(nn.Module): + """ GSA: using a key to summarize the information for a group to be efficient. + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=True) + self.kv = nn.Linear(dim, dim * 2, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.sr = None + self.norm = None + + def forward(self, x, size: Size_): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr is not None: + x = x.permute(0, 2, 1).reshape(B, C, *size) + x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) + x = self.norm(x) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None): + super().__init__() + self.norm1 = norm_layer(dim) + if ws is None: + self.attn = Attention(dim, num_heads, False, None, attn_drop, drop) + elif ws == 1: + self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, drop, sr_ratio) + else: + self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, drop, ws) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Size_): + x = x + self.drop_path(self.attn(self.norm1(x), size)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PosConv(nn.Module): + # PEG from https://arxiv.org/abs/2102.10882 + def __init__(self, in_chans, embed_dim=768, stride=1): + super(PosConv, self).__init__() + self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) + self.stride = stride + + def forward(self, x, size: Size_): + B, N, C = x.shape + cnn_feat_token = x.transpose(1, 2).view(B, C, *size) + x = self.proj(cnn_feat_token) + if self.stride == 1: + x += cnn_feat_token + x = x.flatten(2).transpose(1, 2) + return x + + def no_weight_decay(self): + return ['proj.%d.weight' % i for i in range(4)] + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \ + f"img_size {img_size} should be divided by patch_size {patch_size}." + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x) -> Tuple[torch.Tensor, Size_]: + B, C, H, W = x.shape + + x = self.proj(x).flatten(2).transpose(1, 2) + x = self.norm(x) + out_size = (H // self.patch_size[0], W // self.patch_size[1]) + + return x, out_size + + +class Twins(nn.Module): + """ Twins Vision Transfomer (Revisiting Spatial Attention) + + Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git + """ + def __init__( + self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dims=(64, 128, 256, 512), + num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None, + block_cls=Block): + super().__init__() + self.num_classes = num_classes + self.depths = depths + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + + img_size = to_2tuple(img_size) + prev_chs = in_chans + self.patch_embeds = nn.ModuleList() + self.pos_drops = nn.ModuleList() + for i in range(len(depths)): + self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) + self.pos_drops.append(nn.Dropout(p=drop_rate)) + prev_chs = embed_dims[i] + img_size = tuple(t // patch_size for t in img_size) + patch_size = 2 + + self.blocks = nn.ModuleList() + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + cur = 0 + for k in range(len(depths)): + _block = nn.ModuleList([block_cls( + dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k], + ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])]) + self.blocks.append(_block) + cur += depths[k] + + self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) + + self.norm = norm_layer(self.num_features) + + # classification head + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # init weights + self.apply(self._init_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()]) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + + def forward_features(self, x): + B = x.shape[0] + for i, (embed, drop, blocks, pos_blk) in enumerate( + zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): + x, size = embed(x) + x = drop(x) + for j, blk in enumerate(blocks): + x = blk(x, size) + if j == 0: + x = pos_blk(x, size) # PEG here + if i < len(self.depths) - 1: + x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() + x = self.norm(x) + return x.mean(dim=1) # GAP here + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_twins(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + Twins, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + return model + + +@register_model +def twins_pcpvt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_large', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_large', pretrained=pretrained, **model_kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/vgg.py b/testbed/huggingface__pytorch-image-models/timm/models/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..8bea03e7ce31bada1790090561c99db9faa5ca76 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/vgg.py @@ -0,0 +1,261 @@ +"""VGG + +Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for +timm functionality. + +Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Union, List, Dict, Any, cast + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct +from .registry import register_model + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'vgg11': _cfg(url='https://download.pytorch.org/models/vgg11-bbd30ac9.pth'), + 'vgg13': _cfg(url='https://download.pytorch.org/models/vgg13-c768596a.pth'), + 'vgg16': _cfg(url='https://download.pytorch.org/models/vgg16-397923af.pth'), + 'vgg19': _cfg(url='https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'), + 'vgg11_bn': _cfg(url='https://download.pytorch.org/models/vgg11_bn-6002323d.pth'), + 'vgg13_bn': _cfg(url='https://download.pytorch.org/models/vgg13_bn-abd245e5.pth'), + 'vgg16_bn': _cfg(url='https://download.pytorch.org/models/vgg16_bn-6c64b313.pth'), + 'vgg19_bn': _cfg(url='https://download.pytorch.org/models/vgg19_bn-c79401a0.pth'), +} + + +cfgs: Dict[str, List[Union[str, int]]] = { + 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +class ConvMlp(nn.Module): + + def __init__(self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, + drop_rate: float = 0.2, act_layer: nn.Module = None, conv_layer: nn.Module = None): + super(ConvMlp, self).__init__() + self.input_kernel_size = kernel_size + mid_features = int(out_features * mlp_ratio) + self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) + self.act1 = act_layer(True) + self.drop = nn.Dropout(drop_rate) + self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) + self.act2 = act_layer(True) + + def forward(self, x): + if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: + # keep the input size >= 7x7 + output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) + x = F.adaptive_avg_pool2d(x, output_size) + x = self.fc1(x) + x = self.act1(x) + x = self.drop(x) + x = self.fc2(x) + x = self.act2(x) + return x + + +class VGG(nn.Module): + + def __init__( + self, + cfg: List[Any], + num_classes: int = 1000, + in_chans: int = 3, + output_stride: int = 32, + mlp_ratio: float = 1.0, + act_layer: nn.Module = nn.ReLU, + conv_layer: nn.Module = nn.Conv2d, + norm_layer: nn.Module = None, + global_pool: str = 'avg', + drop_rate: float = 0., + ) -> None: + super(VGG, self).__init__() + assert output_stride == 32 + self.num_classes = num_classes + self.num_features = 4096 + self.drop_rate = drop_rate + self.feature_info = [] + prev_chs = in_chans + net_stride = 1 + pool_layer = nn.MaxPool2d + layers: List[nn.Module] = [] + for v in cfg: + last_idx = len(layers) - 1 + if v == 'M': + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) + layers += [pool_layer(kernel_size=2, stride=2)] + net_stride *= 2 + else: + v = cast(int, v) + conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) + if norm_layer is not None: + layers += [conv2d, norm_layer(v), act_layer(inplace=True)] + else: + layers += [conv2d, act_layer(inplace=True)] + prev_chs = v + self.features = nn.Sequential(*layers) + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) + self.pre_logits = ConvMlp( + prev_chs, self.num_features, 7, mlp_ratio=mlp_ratio, + drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer) + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + self._initialize_weights() + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + x = self.pre_logits(x) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.head(x) + return x + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + k_r = k + k_r = k_r.replace('classifier.0', 'pre_logits.fc1') + k_r = k_r.replace('classifier.3', 'pre_logits.fc2') + k_r = k_r.replace('classifier.6', 'head.fc') + if 'classifier.0.weight' in k: + v = v.reshape(-1, 512, 7, 7) + if 'classifier.3.weight' in k: + v = v.reshape(-1, 4096, 1, 1) + out_dict[k_r] = v + return out_dict + + +def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: + cfg = variant.split('_')[0] + # NOTE: VGG is one of the only models with stride==1 features, so indices are offset from other models + out_indices = kwargs.get('out_indices', (0, 1, 2, 3, 4, 5)) + model = build_model_with_cfg( + VGG, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfgs[cfg], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + pretrained_filter_fn=_filter_fn, + **kwargs) + return model + + +@register_model +def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg11', pretrained=pretrained, **model_args) + + +@register_model +def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg13', pretrained=pretrained, **model_args) + + +@register_model +def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg16', pretrained=pretrained, **model_args) + + +@register_model +def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg19', pretrained=pretrained, **model_args) + + +@register_model +def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/visformer.py b/testbed/huggingface__pytorch-image-models/timm/models/visformer.py new file mode 100644 index 0000000000000000000000000000000000000000..7740f38132aef6fb254aca6260881754a0212191 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/visformer.py @@ -0,0 +1,409 @@ +""" Visformer + +Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533 + +From original at https://github.com/danczs/Visformer + +""" +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier +from .registry import register_model + + +__all__ = ['Visformer'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + visformer_tiny=_cfg(), + visformer_small=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/visformer_small-839e1f5b.pth' + ), +) + + +class SpatialMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, + act_layer=nn.GELU, drop=0., group=8, spatial_conv=False): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.in_features = in_features + self.out_features = out_features + self.spatial_conv = spatial_conv + if self.spatial_conv: + if group < 2: # net setting + hidden_features = in_features * 5 // 6 + else: + hidden_features = in_features * 2 + self.hidden_features = hidden_features + self.group = group + self.drop = nn.Dropout(drop) + self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) + self.act1 = act_layer() + if self.spatial_conv: + self.conv2 = nn.Conv2d( + hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) + self.act2 = act_layer() + else: + self.conv2 = None + self.act2 = None + self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.act1(x) + x = self.drop(x) + if self.conv2 is not None: + x = self.conv2(x) + x = self.act2(x) + x = self.conv3(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.): + super().__init__() + self.dim = dim + self.num_heads = num_heads + head_dim = round(dim // num_heads * head_dim_ratio) + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, C, H, W = x.shape + x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) + q, k, v = x[0], x[1], x[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__(self, dim, num_heads, head_dim_ratio=1., mlp_ratio=4., + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm2d, + group=8, attn_disabled=False, spatial_conv=False): + super().__init__() + self.spatial_conv = spatial_conv + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + if attn_disabled: + self.norm1 = None + self.attn = None + else: + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=drop) + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = SpatialMlp( + in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, + group=group, spatial_conv=spatial_conv) # new setting + + def forward(self, x): + if self.attn is not None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Visformer(nn.Module): + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384, + depth=12, num_heads=6, mlp_ratio=4., drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=LayerNorm2d, attn_stage='111', pos_embed=True, spatial_conv='111', + vit_stem=False, group=8, global_pool='avg', conv_init=False, embed_norm=None): + super().__init__() + img_size = to_2tuple(img_size) + self.num_classes = num_classes + self.embed_dim = embed_dim + self.init_channels = init_channels + self.img_size = img_size + self.vit_stem = vit_stem + self.conv_init = conv_init + if isinstance(depth, (list, tuple)): + self.stage_num1, self.stage_num2, self.stage_num3 = depth + depth = sum(depth) + else: + self.stage_num1 = self.stage_num3 = depth // 3 + self.stage_num2 = depth - self.stage_num1 - self.stage_num3 + self.pos_embed = pos_embed + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + # stage 1 + if self.vit_stem: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // 16 for x in img_size] + else: + if self.init_channels is None: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // 8 for x in img_size] + else: + self.stem = nn.Sequential( + nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(self.init_channels), + nn.ReLU(inplace=True) + ) + img_size = [x // 2 for x in img_size] + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // 4 for x in img_size] + + if self.pos_embed: + if self.vit_stem: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + else: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size)) + self.pos_drop = nn.Dropout(p=drop_rate) + self.stage1 = nn.ModuleList([ + Block( + dim=embed_dim//2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[0] == '0'), spatial_conv=(spatial_conv[0] == '1') + ) + for i in range(self.stage_num1) + ]) + + # stage2 + if not self.vit_stem: + self.patch_embed2 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // 2 for x in img_size] + if self.pos_embed: + self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + self.stage2 = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[1] == '0'), spatial_conv=(spatial_conv[1] == '1') + ) + for i in range(self.stage_num1, self.stage_num1+self.stage_num2) + ]) + + # stage 3 + if not self.vit_stem: + self.patch_embed3 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, + embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False) + img_size = [x // 2 for x in img_size] + if self.pos_embed: + self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) + self.stage3 = nn.ModuleList([ + Block( + dim=embed_dim*2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[2] == '0'), spatial_conv=(spatial_conv[2] == '1') + ) + for i in range(self.stage_num1+self.stage_num2, depth) + ]) + + # head + self.num_features = embed_dim if self.vit_stem else embed_dim * 2 + self.norm = norm_layer(self.num_features) + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # weights init + if self.pos_embed: + trunc_normal_(self.pos_embed1, std=0.02) + if not self.vit_stem: + trunc_normal_(self.pos_embed2, std=0.02) + trunc_normal_(self.pos_embed3, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + if self.conv_init: + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + else: + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0.) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + if self.stem is not None: + x = self.stem(x) + + # stage 1 + x = self.patch_embed1(x) + if self.pos_embed: + x = x + self.pos_embed1 + x = self.pos_drop(x) + for b in self.stage1: + x = b(x) + + # stage 2 + if not self.vit_stem: + x = self.patch_embed2(x) + if self.pos_embed: + x = x + self.pos_embed2 + x = self.pos_drop(x) + for b in self.stage2: + x = b(x) + + # stage3 + if not self.vit_stem: + x = self.patch_embed3(x) + if self.pos_embed: + x = x + self.pos_embed3 + x = self.pos_drop(x) + for b in self.stage3: + x = b(x) + + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + x = self.head(x) + return x + + +def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg( + Visformer, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + return model + + +@register_model +def visformer_tiny(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def visformer_small(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_small', pretrained=pretrained, **model_cfg) + return model + + +# @register_model +# def visformer_net1(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net2(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net3(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net4(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net5(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net6(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net7(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model + + + + diff --git a/testbed/huggingface__pytorch-image-models/timm/models/vision_transformer.py b/testbed/huggingface__pytorch-image-models/timm/models/vision_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8f52defd27160a5dffd43f0d773c756ff716e3 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/vision_transformer.py @@ -0,0 +1,896 @@ +""" Vision Transformer (ViT) in PyTorch + +A PyTorch implement of Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 + +The official jax code is released and available at https://github.com/google-research/vision_transformer + +DeiT model defs and weights from https://github.com/facebookresearch/deit, +paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 + +Acknowledgments: +* The paper authors for releasing code and weights, thanks! +* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out +for some einops/einsum fun +* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT +* Bert reference code checks against Huggingface Transformers and Tensorflow Bert + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging +from functools import partial +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_ +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # patch models (weights from official Google JAX impl) + 'vit_tiny_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_tiny_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_base_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_base_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch32_224': _cfg( + url='', # no official model weights for this combo, only for in21k + ), + 'vit_large_patch32_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_large_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + + # patch models, imagenet21k (weights from official Google JAX impl) + 'vit_tiny_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_large_patch32_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', + num_classes=21843), + 'vit_large_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', + num_classes=21843), + 'vit_huge_patch14_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', + hf_hub='timm/vit_huge_patch14_224_in21k', + num_classes=21843), + + # SAM trained models (https://arxiv.org/abs/2106.01548) + 'vit_base_patch32_sam_224': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz'), + 'vit_base_patch16_sam_224': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz'), + + # deit models (FB weights) + 'deit_tiny_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_small_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_base_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_base_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0), + 'deit_tiny_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_small_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0, + classifier=('head', 'head_dist')), + + # ViT ImageNet-21K-P pretraining by MILL + 'vit_base_patch16_224_miil_in21k': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + 'vit_base_patch16_224_miil': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm' + '/vit_base_patch16_224_1k_miil_84_4.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', + ), +} + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + + Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` + - https://arxiv.org/abs/2012.12877 + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, + act_layer=None, weight_init=''): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + distilled (bool): model includes a distillation token and head as in DeiT models + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + weight_init: (str): weight init scheme + """ + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 2 if distilled else 1 + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = embed_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size and not distilled: + self.num_features = representation_size + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh()) + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier head(s) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + trunc_normal_(self.pos_embed, std=.02) + if self.dist_token is not None: + trunc_normal_(self.dist_token, std=.02) + if mode.startswith('jax'): + # leave cls token as zeros to match jax impl + named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) + else: + trunc_normal_(self.cls_token, std=.02) + self.apply(_init_vit_weights) + + def _init_weights(self, m): + # this fn left here for compat with downstream users + _init_vit_weights(m) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'dist_token'} + + def get_classifier(self): + if self.dist_token is None: + return self.head + else: + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.num_tokens == 2: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks + if self.dist_token is None: + x = torch.cat((cls_token, x), dim=1) + else: + x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) + x = self.pos_drop(x + self.pos_embed) + x = self.blocks(x) + x = self.norm(x) + if self.dist_token is None: + return self.pre_logits(x[:, 0]) + else: + return x[:, 0], x[:, 1] + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple + if self.training and not torch.jit.is_scripting(): + # during inference, return the average of both classifier predictions + return x, x_dist + else: + return (x + x_dist) / 2 + else: + x = self.head(x) + return x + + +def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False): + """ ViT weight initialization + * When called without n, head_bias, jax_impl args it will behave exactly the same + as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). + * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + elif name.startswith('pre_logits'): + lecun_normal_(module.weight) + nn.init.zeros_(module.bias) + else: + if jax_impl: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + else: + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif jax_impl and isinstance(module, nn.Conv2d): + # NOTE conv was left to pytorch default in my original init + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): + nn.init.zeros_(module.bias) + nn.init.ones_(module.weight) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) + if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: + model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) + model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) + if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: + model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) + model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + _logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == 'pos_embed' and v.shape != model.pos_embed.shape: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed( + v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + out_dict[k] = v + return out_dict + + +def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + # NOTE this extra code to support handling of repr size for in21k pretrained models + default_num_classes = default_cfg['num_classes'] + num_classes = kwargs.get('num_classes', default_num_classes) + repr_size = kwargs.pop('representation_size', None) + if repr_size is not None and num_classes != default_num_classes: + # Remove representation layer if fine-tuning. This may not always be the desired action, + # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? + _logger.warning("Removing representation layer for fine-tuning.") + repr_size = None + + model = build_model_with_cfg( + VisionTransformer, variant, pretrained, + default_cfg=default_cfg, + representation_size=repr_size, + pretrained_filter_fn=checkpoint_filter_fn, + pretrained_custom_load='npz' in default_cfg['url'], + **kwargs) + return model + + +@register_model +def vit_tiny_patch16_224(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_384(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) @ 384x384. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) at 384x384. + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_sam_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + # NOTE original SAM weights release worked with representation_size=768 + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=0, **kwargs) + model = _create_vision_transformer('vit_base_patch16_sam_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_sam_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + # NOTE original SAM weights release worked with representation_size=768 + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=0, **kwargs) + model = _create_vision_transformer('vit_base_patch32_sam_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict( + patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_huge_patch14_224_in21k(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_small_patch16_224(pretrained=False, **kwargs): + """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('deit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_224(pretrained=False, **kwargs): + """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('deit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_384(pretrained=False, **kwargs): + """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('deit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer( + 'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_small_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer( + 'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + 'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_384(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs) + return model \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/vision_transformer_hybrid.py b/testbed/huggingface__pytorch-image-models/timm/models/vision_transformer_hybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..d5f0a5377ec9492c5ed55ceb3ce5a4378cbb8e3c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/vision_transformer_hybrid.py @@ -0,0 +1,363 @@ +""" Hybrid Vision Transformer (ViT) in PyTorch + +A PyTorch implement of the Hybrid Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.TODO + +NOTE These hybrid model definitions depend on code in vision_transformer.py. +They were moved here to keep file sizes sane. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import StdConv2dSame, StdConv2d, to_2tuple +from .resnet import resnet26d, resnet50d +from .resnetv2 import ResNetV2, create_resnetv2_stem +from .registry import register_model +from timm.models.vision_transformer import _create_vision_transformer + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # hybrid in-1k models (weights from official JAX impl where they exist) + 'vit_tiny_r_s16_p8_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + first_conv='patch_embed.backbone.conv'), + 'vit_tiny_r_s16_p8_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_r26_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', + ), + 'vit_small_r26_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_r26_s32_224': _cfg(), + 'vit_base_r50_s16_224': _cfg(), + 'vit_base_r50_s16_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_r50_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz' + ), + 'vit_large_r50_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0 + ), + + # hybrid in-21k models (weights from official Google JAX impl where they exist) + 'vit_tiny_r_s16_p8_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv'), + 'vit_small_r26_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + 'vit_base_r50_s16_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', + num_classes=21843, crop_pct=0.9), + 'vit_large_r50_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + + # hybrid models (using timm resnet backbones) + 'vit_small_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_small_resnet50d_s16_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet50d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), +} + + +class HybridEmbed(nn.Module): + """ CNN Feature Map Embedding + Extract feature map from CNN, flatten, project to embedding dim. + """ + def __init__(self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768): + super().__init__() + assert isinstance(backbone, nn.Module) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.backbone = backbone + if feature_size is None: + with torch.no_grad(): + # NOTE Most reliable way of determining output dims is to run forward pass + training = backbone.training + if training: + backbone.eval() + o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + o = o[-1] # last feature if backbone outputs list/tuple of features + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0 + self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +def _create_vision_transformer_hybrid(variant, backbone, pretrained=False, **kwargs): + embed_layer = partial(HybridEmbed, backbone=backbone) + kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set + return _create_vision_transformer( + variant, pretrained=pretrained, embed_layer=embed_layer, default_cfg=default_cfgs[variant], **kwargs) + + +def _resnetv2(layers=(3, 4, 9), **kwargs): + """ ResNet-V2 backbone helper""" + padding_same = kwargs.get('padding_same', True) + stem_type = 'same' if padding_same else '' + conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) + if len(layers): + backbone = ResNetV2( + layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), + preact=False, stem_type=stem_type, conv_layer=conv_layer) + else: + backbone = create_resnetv2_stem( + kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) + return backbone + + +@register_model +def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_384(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-B/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224(pretrained=False, **kwargs): + """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_384(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_384(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_384(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_r50_s32_384(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_224_in21k(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid. ImageNet-21k. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224_in21k(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2(layers=(3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_224_in21k(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_224_in21k(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet50d_s16_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/models/vovnet.py b/testbed/huggingface__pytorch-image-models/timm/models/vovnet.py new file mode 100644 index 0000000000000000000000000000000000000000..ec5b3e81608b05c54b4e3725b1838d8395aa33ca --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/vovnet.py @@ -0,0 +1,406 @@ +""" VoVNet (V1 & V2) + +Papers: +* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 +* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Looked at https://github.com/youngwanLEE/vovnet-detectron2 & +https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +for some reference, rewrote most of the code. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .registry import register_model +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, SeparableConvBnAct, BatchNormAct2d, ClassifierHead, DropPath,\ + create_attn, create_norm_act, get_norm_act_layer + + +# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & +# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +model_cfgs = dict( + vovnet39a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=False, + depthwise=False, + attn='', + ), + vovnet57a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=False, + depthwise=False, + attn='', + + ), + ese_vovnet19b_slim_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + + ), + ese_vovnet19b_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + ), + ese_vovnet19b_slim=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet19b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet57b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet99b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 3, 9, 3], + residual=True, + depthwise=False, + attn='ese', + ), + eca_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='eca', + ), +) +model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] +model_cfgs['ese_vovnet99b_iabn'] = model_cfgs['ese_vovnet99b'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + vovnet39a=_cfg(url=''), + vovnet57a=_cfg(url=''), + ese_vovnet19b_slim_dw=_cfg(url=''), + ese_vovnet19b_dw=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet19b_dw-a8741004.pth'), + ese_vovnet19b_slim=_cfg(url=''), + ese_vovnet39b=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet39b-f912fe73.pth'), + ese_vovnet57b=_cfg(url=''), + ese_vovnet99b=_cfg(url=''), + eca_vovnet39b=_cfg(url=''), + ese_vovnet39b_evos=_cfg(url=''), + ese_vovnet99b_iabn=_cfg(url=''), +) + + +class SequentialAppendList(nn.Sequential): + def __init__(self, *args): + super(SequentialAppendList, self).__init__(*args) + + def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: + for i, module in enumerate(self): + if i == 0: + concat_list.append(module(x)) + else: + concat_list.append(module(concat_list[-1])) + x = torch.cat(concat_list, dim=1) + return x + + +class OsaBlock(nn.Module): + + def __init__(self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, + depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None): + super(OsaBlock, self).__init__() + + self.residual = residual + self.depthwise = depthwise + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + next_in_chs = in_chs + if self.depthwise and next_in_chs != mid_chs: + assert not residual + self.conv_reduction = ConvBnAct(next_in_chs, mid_chs, 1, **conv_kwargs) + else: + self.conv_reduction = None + + mid_convs = [] + for i in range(layer_per_block): + if self.depthwise: + conv = SeparableConvBnAct(mid_chs, mid_chs, **conv_kwargs) + else: + conv = ConvBnAct(next_in_chs, mid_chs, 3, **conv_kwargs) + next_in_chs = mid_chs + mid_convs.append(conv) + self.conv_mid = SequentialAppendList(*mid_convs) + + # feature aggregation + next_in_chs = in_chs + layer_per_block * mid_chs + self.conv_concat = ConvBnAct(next_in_chs, out_chs, **conv_kwargs) + + if attn: + self.attn = create_attn(attn, out_chs) + else: + self.attn = None + + self.drop_path = drop_path + + def forward(self, x): + output = [x] + if self.conv_reduction is not None: + x = self.conv_reduction(x) + x = self.conv_mid(x, output) + x = self.conv_concat(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.residual: + x = x + output[0] + return x + + +class OsaStage(nn.Module): + + def __init__(self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, + residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, + drop_path_rates=None): + super(OsaStage, self).__init__() + + if downsample: + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + else: + self.pool = None + + blocks = [] + for i in range(block_per_stage): + last_block = i == block_per_stage - 1 + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + blocks += [OsaBlock( + in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, + attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path) + ] + in_chs = out_chs + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + if self.pool is not None: + x = self.pool(x) + x = self.blocks(x) + return x + + +class VovNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0., stem_stride=4, + output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rate=0.): + """ VovNet (v2) + """ + super(VovNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert stem_stride in (4, 2) + assert output_stride == 32 # FIXME support dilation + + stem_chs = cfg["stem_chs"] + stage_conv_chs = cfg["stage_conv_chs"] + stage_out_chs = cfg["stage_out_chs"] + block_per_stage = cfg["block_per_stage"] + layer_per_block = cfg["layer_per_block"] + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + # Stem module + last_stem_stride = stem_stride // 2 + conv_type = SeparableConvBnAct if cfg["depthwise"] else ConvBnAct + self.stem = nn.Sequential(*[ + ConvBnAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), + conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), + conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), + ]) + self.feature_info = [dict( + num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] + current_stride = stem_stride + + # OSA stages + stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) + in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] + stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) + stages = [] + for i in range(4): # num_stages + downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 + stages += [OsaStage( + in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, + downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args) + ] + self.num_features = stage_out_chs[i] + current_stride *= 2 if downsample else 1 + self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + elif isinstance(m, nn.Linear): + nn.init.zeros_(m.bias) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + return self.stages(x) + + def forward(self, x): + x = self.forward_features(x) + return self.head(x) + + +def _create_vovnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + VovNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def vovnet39a(pretrained=False, **kwargs): + return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) + + +@register_model +def vovnet57a(pretrained=False, **kwargs): + return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet57b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet99b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) + + +@register_model +def eca_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) + + +# Experimental Models + +@register_model +def ese_vovnet39b_evos(pretrained=False, **kwargs): + def norm_act_fn(num_features, **nkwargs): + return create_norm_act('EvoNormSample', num_features, jit=False, **nkwargs) + return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs) + + +@register_model +def ese_vovnet99b_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_vovnet( + 'ese_vovnet99b_iabn', pretrained=pretrained, norm_layer=norm_layer, act_layer=nn.LeakyReLU, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/xception.py b/testbed/huggingface__pytorch-image-models/timm/models/xception.py new file mode 100644 index 0000000000000000000000000000000000000000..86f558cb5b2b890ef74d11c99eea50c33eff653e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/xception.py @@ -0,0 +1,232 @@ +""" +Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) + +@author: tstandley +Adapted by cadene + +Creates an Xception Model as defined in: + +Francois Chollet +Xception: Deep Learning with Depthwise Separable Convolutions +https://arxiv.org/pdf/1610.02357.pdf + +This weights ported from the Keras implementation. Achieves the following performance on the validation set: + +Loss:0.9173 Prec@1:78.892 Prec@5:94.292 + +REMEMBER to set your image size to 3x299x299 for both test and validation + +normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + +The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 +""" + +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['Xception'] + +default_cfgs = { + 'xception': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', + 'input_size': (3, 299, 299), + 'pool_size': (10, 10), + 'crop_pct': 0.8975, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } +} + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d( + in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_channels != in_channels or strides != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_channels) + else: + self.skip = None + + rep = [] + for i in range(reps): + if grow_first: + inc = in_channels if i == 0 else out_channels + outc = out_channels + else: + inc = in_channels + outc = in_channels if i < (reps - 1) else out_channels + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) + rep.append(nn.BatchNorm2d(outc)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + + +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception, self).__init__() + self.drop_rate = drop_rate + self.global_pool = global_pool + self.num_classes = num_classes + self.num_features = 2048 + + self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, 2, 2, start_with_relu=False) + self.block2 = Block(128, 256, 2, 2) + self.block3 = Block(256, 728, 2, 2) + + self.block4 = Block(728, 728, 3, 1) + self.block5 = Block(728, 728, 3, 1) + self.block6 = Block(728, 728, 3, 1) + self.block7 = Block(728, 728, 3, 1) + + self.block8 = Block(728, 728, 3, 1) + self.block9 = Block(728, 728, 3, 1) + self.block10 = Block(728, 728, 3, 1) + self.block11 = Block(728, 728, 3, 1) + + self.block12 = Block(728, 1024, 2, 2, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(self.num_features) + self.act4 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block2.rep.0'), + dict(num_chs=256, reduction=8, module='block3.rep.0'), + dict(num_chs=728, reduction=16, module='block12.rep.0'), + dict(num_chs=2048, reduction=32, module='act4'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # #------- init weights -------- + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def xception(pretrained=False, **kwargs): + return _xception('xception', pretrained=pretrained, **kwargs) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/xception_aligned.py b/testbed/huggingface__pytorch-image-models/timm/models/xception_aligned.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7f5c05e06e0e1962074b0dffae2bdb731d2bb6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/xception_aligned.py @@ -0,0 +1,238 @@ +"""Pytorch impl of Aligned Xception 41, 65, 71 + +This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at +https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md + +Hacked together by / Copyright 2020 Ross Wightman +""" +from functools import partial + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct, create_conv2d +from .layers.helpers import to_3tuple +from .registry import register_model + +__all__ = ['XceptionAligned'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10), + 'crop_pct': 0.903, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + xception41=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth'), + xception65=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth'), + xception71=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth'), +) + + +class SeparableConv2d(nn.Module): + def __init__( + self, inplanes, planes, kernel_size=3, stride=1, dilation=1, padding='', + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + self.conv_dw = create_conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, depthwise=True) + self.bn_dw = norm_layer(inplanes) + if act_layer is not None: + self.act_dw = act_layer(inplace=True) + else: + self.act_dw = None + + # pointwise convolution + self.conv_pw = create_conv2d(inplanes, planes, kernel_size=1) + self.bn_pw = norm_layer(planes) + if act_layer is not None: + self.act_pw = act_layer(inplace=True) + else: + self.act_pw = None + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn_dw(x) + if self.act_dw is not None: + x = self.act_dw(x) + x = self.conv_pw(x) + x = self.bn_pw(x) + if self.act_pw is not None: + x = self.act_pw(x) + return x + + +class XceptionModule(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, pad_type='', + start_with_relu=True, no_skip=False, act_layer=nn.ReLU, norm_layer=None): + super(XceptionModule, self).__init__() + out_chs = to_3tuple(out_chs) + self.in_channels = in_chs + self.out_channels = out_chs[-1] + self.no_skip = no_skip + if not no_skip and (self.out_channels != self.in_channels or stride != 1): + self.shortcut = ConvBnAct( + in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, act_layer=None) + else: + self.shortcut = None + + separable_act_layer = None if start_with_relu else act_layer + self.stack = nn.Sequential() + for i in range(3): + if start_with_relu: + self.stack.add_module(f'act{i + 1}', nn.ReLU(inplace=i > 0)) + self.stack.add_module(f'conv{i + 1}', SeparableConv2d( + in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, + act_layer=separable_act_layer, norm_layer=norm_layer)) + in_chs = out_chs[i] + + def forward(self, x): + skip = x + x = self.stack(x) + if self.shortcut is not None: + skip = self.shortcut(skip) + if not self.no_skip: + x = x + skip + return x + + +class XceptionAligned(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, block_cfg, num_classes=1000, in_chans=3, output_stride=32, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_rate=0., global_pool='avg'): + super(XceptionAligned, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) + self.stem = nn.Sequential(*[ + ConvBnAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), + ConvBnAct(32, 64, kernel_size=3, stride=1, **layer_args) + ]) + + curr_dilation = 1 + curr_stride = 2 + self.feature_info = [] + self.blocks = nn.Sequential() + for i, b in enumerate(block_cfg): + b['dilation'] = curr_dilation + if b['stride'] > 1: + self.feature_info += [dict( + num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=f'blocks.{i}.stack.act3')] + next_stride = curr_stride * b['stride'] + if next_stride > output_stride: + curr_dilation *= b['stride'] + b['stride'] = 1 + else: + curr_stride = next_stride + self.blocks.add_module(str(i), XceptionModule(**b, **layer_args)) + self.num_features = self.blocks[-1].out_channels + + self.feature_info += [dict( + num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))] + + self.head = ClassifierHead( + in_chs=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + XceptionAligned, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True, feature_cls='hook'), + **kwargs) + + +@register_model +def xception41(pretrained=False, **kwargs): + """ Modified Aligned Xception-41 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 8), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception41', pretrained=pretrained, **model_args) + + +@register_model +def xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception65', pretrained=pretrained, **model_args) + + +@register_model +def xception71(pretrained=False, **kwargs): + """ Modified Aligned Xception-71 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=1), + dict(in_chs=256, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=1), + dict(in_chs=728, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception71', pretrained=pretrained, **model_args) diff --git a/testbed/huggingface__pytorch-image-models/timm/models/xcit.py b/testbed/huggingface__pytorch-image-models/timm/models/xcit.py new file mode 100644 index 0000000000000000000000000000000000000000..b7af3b262b4b429eb1d0be07b5306e2bf3273eb9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/models/xcit.py @@ -0,0 +1,810 @@ +""" Cross-Covariance Image Transformer (XCiT) in PyTorch + +Same as the official implementation, with some minor adaptations. + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + +Paper: + - https://arxiv.org/abs/2106.09681 +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +import math +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .vision_transformer import _cfg, Mlp +from .registry import register_model +from .layers import DropPath, trunc_normal_, to_2tuple +from .cait import ClassAttn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # Patch size 16 + 'xcit_nano_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), + 'xcit_nano_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), + 'xcit_nano_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), + 'xcit_tiny_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), + 'xcit_tiny_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), + 'xcit_tiny_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), + 'xcit_tiny_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), + 'xcit_small_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), + 'xcit_small_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), + 'xcit_small_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), + 'xcit_small_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), + 'xcit_medium_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), + 'xcit_medium_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), + 'xcit_large_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), + 'xcit_large_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), + + # Patch size 8 + 'xcit_nano_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), + 'xcit_nano_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), + 'xcit_nano_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), + 'xcit_tiny_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), + 'xcit_tiny_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), + 'xcit_tiny_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), + 'xcit_tiny_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), + 'xcit_small_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), + 'xcit_small_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), + 'xcit_small_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), + 'xcit_small_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), + 'xcit_medium_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), + 'xcit_medium_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), + 'xcit_large_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), + 'xcit_large_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), +} + + +class PositionalEncodingFourier(nn.Module): + """ + Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper. + Based on the official XCiT code + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + """ + + def __init__(self, hidden_dim=32, dim=768, temperature=10000): + super().__init__() + self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + self.eps = 1e-6 + + def forward(self, B: int, H: int, W: int): + device = self.token_projection.weight.device + y_embed = torch.arange(1, H+1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W) + x_embed = torch.arange(1, W+1, dtype=torch.float32, device=device).repeat(1, H, 1) + y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos) + return pos.repeat(B, 1, 1, 1) # (B, C, H, W) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution + batch norm""" + return torch.nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), + nn.BatchNorm2d(out_planes) + ) + + +class ConvPatchEmbed(nn.Module): + """Image to Patch Embedding using multiple convolutional layers""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): + super().__init__() + img_size = to_2tuple(img_size) + num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + if patch_size == 16: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 8, 2), + act_layer(), + conv3x3(embed_dim // 8, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + elif patch_size == 8: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + else: + raise('For convolutional projection, patch size has to be in [8, 16]') + + def forward(self, x): + x = self.proj(x) + Hp, Wp = x.shape[2], x.shape[3] + x = x.flatten(2).transpose(1, 2) # (B, N, C) + return x, (Hp, Wp) + + +class LPI(nn.Module): + """ + Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the + implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable + 3x3 convolutions with GeLU and BatchNorm2d + """ + + def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): + super().__init__() + out_features = out_features or in_features + + padding = kernel_size // 2 + + self.conv1 = torch.nn.Conv2d( + in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) + self.act = act_layer() + self.bn = nn.BatchNorm2d(in_features) + self.conv2 = torch.nn.Conv2d( + in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) + + def forward(self, x, H: int, W: int): + B, N, C = x.shape + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.conv1(x) + x = self.act(x) + x = self.bn(x) + x = self.conv2(x) + x = x.reshape(B, C, N).permute(0, 2, 1) + return x + + +class ClassAttentionBlock(nn.Module): + """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False): + super().__init__() + self.norm1 = norm_layer(dim) + + self.attn = ClassAttn( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + if eta is not None: # LayerScale Initialization (no layerscale when None) + self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + else: + self.gamma1, self.gamma2 = 1.0, 1.0 + + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + self.tokens_norm = tokens_norm + + def forward(self, x): + x_norm1 = self.norm1(x) + x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) + x = x + self.drop_path(self.gamma1 * x_attn) + if self.tokens_norm: + x = self.norm2(x) + else: + x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) + x_res = x + cls_token = x[:, 0:1] + cls_token = self.gamma2 * self.mlp(cls_token) + x = torch.cat([cls_token, x[:, 1:]], dim=1) + x = x_res + self.drop_path(x) + return x + + +class XCA(nn.Module): + """ Cross-Covariance Attention (XCA) + Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax + normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + # Paper section 3.2 l2-Normalization and temperature scaling + q = torch.nn.functional.normalize(q, dim=-1) + k = torch.nn.functional.normalize(k, dim=-1) + attn = (q @ k.transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, C', N), permute -> (B, N, H, C') + x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return {'temperature'} + + +class XCABlock(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1.): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm3 = norm_layer(dim) + self.local_mp = LPI(in_features=dim, act_layer=act_layer) + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + + def forward(self, x, H: int, W: int): + x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) + # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + +class XCiT(nn.Module): + """ + Based on timm and DeiT code bases + https://github.com/rwightman/pytorch-image-models/tree/master/timm + https://github.com/facebookresearch/deit/ + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False): + """ + Args: + img_size (int, tuple): input image size + patch_size (int): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate (constant across all layers) + norm_layer: (nn.Module): normalization layer + cls_attn_layers: (int) Depth of Class attention layers + use_pos_embed: (bool) whether to use positional encoding + eta: (float) layerscale initialization value + tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA + + Notes: + - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch + interaction (class LPI) and the patch embedding (class ConvPatchEmbed) + """ + super().__init__() + img_size = to_2tuple(img_size) + assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ + '`patch_size` should divide image dimensions evenly' + + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = ConvPatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.use_pos_embed = use_pos_embed + if use_pos_embed: + self.pos_embed = PositionalEncodingFourier(dim=embed_dim) + self.pos_drop = nn.Dropout(p=drop_rate) + + self.blocks = nn.ModuleList([ + XCABlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta) + for _ in range(depth)]) + + self.cls_attn_blocks = nn.ModuleList([ + ClassAttentionBlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm) + for _ in range(cls_attn_layers)]) + + # Classifier head + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Init weights + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) + x, (Hp, Wp) = self.patch_embed(x) + + if self.use_pos_embed: + # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) + pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x, Hp, Wp) + + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + for blk in self.cls_attn_blocks: + x = blk(x) + + x = self.norm(x)[:, 0] + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + # For consistency with timm's transformer models while being compatible with official weights source we rename + # pos_embeder to pos_embed. Also account for use_pos_embed == False + use_pos_embed = getattr(model, 'pos_embed', None) is not None + pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] + for k in pos_embed_keys: + if use_pos_embed: + state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) + else: + del state_dict[k] + # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors + # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v + if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): + num_ca_blocks = len(model.cls_attn_blocks) + for i in range(num_ca_blocks): + qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') + qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] + qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) + if qkv_bias is not None: + qkv_bias = qkv_bias.reshape(3, -1) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] + return state_dict + + +def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + model = build_model_with_cfg( + XCiT, variant, pretrained, default_cfg=default_cfg, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384, **kwargs) + model = _create_xcit('xcit_nano_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +# Patch size 8x8 models +@register_model +def xcit_nano_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/__init__.py b/testbed/huggingface__pytorch-image-models/timm/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee4958eb562bcfe06a5da72be4b76ee610a0ccc --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/__init__.py @@ -0,0 +1,15 @@ +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .adamw import AdamW +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP +from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/adabelief.py b/testbed/huggingface__pytorch-image-models/timm/optim/adabelief.py new file mode 100644 index 0000000000000000000000000000000000000000..951d715cc0b605df2f7313c95840b7784c4d0a70 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/adabelief.py @@ -0,0 +1,201 @@ +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdaBelief(Optimizer): + r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-16) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + decoupled_decay (boolean, optional): (default: True) If set as True, then + the optimizer uses decoupled weight decay as in AdamW + fixed_decay (boolean, optional): (default: False) This is used when weight_decouple + is set as True. + When fixed_decay == True, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay$. + When fixed_decay == False, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the + weight decay ratio decreases with learning rate (lr). + rectify (boolean, optional): (default: True) If set as True, then perform the rectified + update similar to RAdam + degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update + when variance of gradient is high + reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 + + For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer' + For example train/args for EfficientNet see these gists + - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037 + - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3 + """ + + def __init__( + self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, + decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True): + + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + + if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): + for param in params: + if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): + param['buffer'] = [[None, None, None] for _ in range(10)] + + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, + degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, + fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)]) + super(AdaBelief, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdaBelief, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def reset(self): + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + amsgrad = group['amsgrad'] + + # State initialization + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError( + 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + state = self.state[p] + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p_fp32) + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p_fp32) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p_fp32) + + # perform weight decay, check if decoupled weight decay + if group['decoupled_decay']: + if not group['fixed_decay']: + p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) + else: + p_fp32.mul_(1.0 - group['weight_decay']) + else: + if group['weight_decay'] != 0: + grad.add_(p_fp32, alpha=group['weight_decay']) + + # get current state variable + exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Update first and second moment running average + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + grad_residual = grad - exp_avg + exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) + + if amsgrad: + max_exp_avg_var = state['max_exp_avg_var'] + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + # update + if not group['rectify']: + # Default update + step_size = group['lr'] / bias_correction1 + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + # Rectified update, forked from RAdam + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + elif group['degenerated_to_sgd']: + step_size = 1.0 / (1 - beta1 ** state['step']) + else: + step_size = -1 + buffered[2] = step_size + + if num_sma >= 5: + denom = exp_avg_var.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) + elif step_size > 0: + p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) + + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/adafactor.py b/testbed/huggingface__pytorch-image-models/timm/optim/adafactor.py new file mode 100644 index 0000000000000000000000000000000000000000..06057433a9bffa555bdc13b27a1c56cff26acf15 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/adafactor.py @@ -0,0 +1,167 @@ +""" Adafactor Optimizer + +Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py + +Original header/copyright below. + +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import torch +import math + + +class Adafactor(torch.optim.Optimizer): + """Implements Adafactor algorithm. + This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` + (see https://arxiv.org/abs/1804.04235) + + Note that this optimizer internally adjusts the learning rate depending on the + *scale_parameter*, *relative_step* and *warmup_init* options. + + To use a manual (external) learning rate schedule you should set `scale_parameter=False` and + `relative_step=False`. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): external learning rate (default: None) + eps (tuple[float, float]): regularization constants for square gradient + and parameter scale respectively (default: (1e-30, 1e-3)) + clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) + decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) + beta1 (float): coefficient used for computing running averages of gradient (default: None) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) + warmup_init (bool): time-dependent learning rate computation depends on + whether warm-up initialization is being used (default: False) + """ + + def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0, + decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False): + relative_step = not lr + if warmup_init and not relative_step: + raise ValueError('warmup_init requires relative_step=True') + + beta1 = None if betas is None else betas[0] # make it compat with standard betas arg + defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, + beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, + relative_step=relative_step, warmup_init=warmup_init) + super(Adafactor, self).__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + if param_group['relative_step']: + min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 + lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) + param_scale = 1.0 + if param_group['scale_parameter']: + param_scale = max(param_group['eps_scale'], param_state['RMS']) + param_group['lr'] = lr_t * param_scale + return param_group['lr'] + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group['beta1'] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError('Adafactor does not support sparse gradients.') + + state = self.state[p] + + factored, use_first_moment = self._get_options(group, grad.shape) + # State Initialization + if len(state) == 0: + state['step'] = 0 + + if use_first_moment: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(grad) + if factored: + state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad) + state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad) + else: + state['exp_avg_sq'] = torch.zeros_like(grad) + + state['RMS'] = 0 + else: + if use_first_moment: + state['exp_avg'] = state['exp_avg'].to(grad) + if factored: + state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) + state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) + else: + state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + state['step'] += 1 + state['RMS'] = self._rms(p_fp32) + lr_t = self._get_lr(group, state) + + beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) + update = grad ** 2 + group['eps'] + if factored: + exp_avg_sq_row = state['exp_avg_sq_row'] + exp_avg_sq_col = state['exp_avg_sq_col'] + + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state['exp_avg_sq'] + + exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) + update.mul_(lr_t) + + if use_first_moment: + exp_avg = state['exp_avg'] + exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) + update = exp_avg + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) + + p_fp32.add_(-update) + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/adahessian.py b/testbed/huggingface__pytorch-image-models/timm/optim/adahessian.py new file mode 100644 index 0000000000000000000000000000000000000000..985c67ca686a65f61f5c5b1a7db3e5bba815a19b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/adahessian.py @@ -0,0 +1,156 @@ +""" AdaHessian Optimizer + +Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py +Originally licensed MIT, Copyright 2020, David Samuel +""" +import torch + + +class Adahessian(torch.optim.Optimizer): + """ + Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): learning rate (default: 0.1) + betas ((float, float), optional): coefficients used for computing running averages of gradient and the + squared hessian trace (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) + hessian_power (float, optional): exponent of the hessian trace (default: 1.0) + update_each (int, optional): compute the hessian trace approximation only after *this* number of steps + (to save time) (default: 1) + n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) + """ + + def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, + hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= hessian_power <= 1.0: + raise ValueError(f"Invalid Hessian power value: {hessian_power}") + + self.n_samples = n_samples + self.update_each = update_each + self.avg_conv_kernel = avg_conv_kernel + + # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training + self.seed = 2147483647 + self.generator = torch.Generator().manual_seed(self.seed) + + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) + super(Adahessian, self).__init__(params, defaults) + + for p in self.get_params(): + p.hess = 0.0 + self.state[p]["hessian step"] = 0 + + @property + def is_second_order(self): + return True + + def get_params(self): + """ + Gets all parameters in all param_groups with gradients + """ + + return (p for group in self.param_groups for p in group['params'] if p.requires_grad) + + def zero_hessian(self): + """ + Zeros out the accumalated hessian traces. + """ + + for p in self.get_params(): + if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: + p.hess.zero_() + + @torch.no_grad() + def set_hessian(self): + """ + Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. + """ + + params = [] + for p in filter(lambda p: p.grad is not None, self.get_params()): + if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step + params.append(p) + self.state[p]["hessian step"] += 1 + + if len(params) == 0: + return + + if self.generator.device != params[0].device: # hackish way of casting the generator to the right device + self.generator = torch.Generator(params[0].device).manual_seed(self.seed) + + grads = [p.grad for p in params] + + for i in range(self.n_samples): + # Rademacher distribution {-1.0, 1.0} + zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] + h_zs = torch.autograd.grad( + grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) + for h_z, z, p in zip(h_zs, zs, params): + p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step. + Arguments: + closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) + """ + + loss = None + if closure is not None: + loss = closure() + + self.zero_hessian() + self.set_hessian() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None or p.hess is None: + continue + + if self.avg_conv_kernel and p.dim() == 4: + p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() + + # Perform correct stepweight decay as in AdamW + p.mul_(1 - group['lr'] * group['weight_decay']) + + state = self.state[p] + + # State initialization + if len(state) == 1: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of Hessian diagonal square values + state['exp_hessian_diag_sq'] = torch.zeros_like(p) + + exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] + beta1, beta2 = group['betas'] + state['step'] += 1 + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) + exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + k = group['hessian_power'] + denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) + + # make update + step_size = group['lr'] / bias_correction1 + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/adamp.py b/testbed/huggingface__pytorch-image-models/timm/optim/adamp.py new file mode 100644 index 0000000000000000000000000000000000000000..ee187633ab745dbb0344dcdc3dcb1cf40e6ae5e9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/adamp.py @@ -0,0 +1,105 @@ +""" +AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer +import math + + +def _channel_view(x) -> torch.Tensor: + return x.reshape(x.size(0), -1) + + +def _layer_view(x) -> torch.Tensor: + return x.reshape(1, -1) + + +def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): + wd = 1. + expand_size = (-1,) + (1,) * (len(p.shape) - 1) + for view_func in [_channel_view, _layer_view]: + param_view = view_func(p) + grad_view = view_func(grad) + cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() + + # FIXME this is a problem for PyTorch XLA + if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): + p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) + wd = wd_ratio + return perturb, wd + + return perturb, wd + + +class AdamP(Optimizer): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) + super(AdamP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad + beta1, beta2 = group['betas'] + nesterov = group['nesterov'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Adam + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + step_size = group['lr'] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if group['weight_decay'] > 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) + + # Step + p.add_(perturb, alpha=-step_size) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/adamw.py b/testbed/huggingface__pytorch-image-models/timm/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..66478bc6ef3c50ab9d40cabb0cfb2bd24277c815 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/adamw.py @@ -0,0 +1,122 @@ +""" AdamW Optimizer +Impl copied from PyTorch master + +NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed +someday +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdamW(Optimizer): + r"""Implements AdamW algorithm. + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + step_size = group['lr'] / bias_correction1 + + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/lamb.py b/testbed/huggingface__pytorch-image-models/timm/optim/lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..12c7c49b8a01ef793c97654ac938259ca6508449 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/lamb.py @@ -0,0 +1,192 @@ +""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math + +import torch +from torch.optim import Optimizer + + +class Lamb(Optimizer): + """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0) + trust_clip (bool): enable LAMBC trust ratio clipping (default: False) + always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__( + self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, + weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False): + defaults = dict( + lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, + grad_averaging=grad_averaging, max_grad_norm=max_grad_norm, + trust_clip=trust_clip, always_adapt=always_adapt) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + # FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes + # scalar types properly https://github.com/pytorch/pytorch/issues/9190 + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device) + clip_global_grad_norm = torch.where( + global_grad_norm > max_grad_norm, + global_grad_norm / max_grad_norm, + one_tensor) + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on parameters that are + # excluded from weight decay, unless always_adapt == True, then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, w_norm / g_norm, one_tensor), + one_tensor, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.minimum(trust_ratio, one_tensor) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/lars.py b/testbed/huggingface__pytorch-image-models/timm/optim/lars.py new file mode 100644 index 0000000000000000000000000000000000000000..98198e675c91e867d36865c67d360f65698218e7 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/lars.py @@ -0,0 +1,135 @@ +""" PyTorch LARS / LARC Optimizer + +An implementation of LARS (SGD) + LARC in PyTorch + +Based on: + * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + +Additional cleanup and modifications to properly support PyTorch XLA. + +Copyright 2021 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer + + +class Lars(Optimizer): + """ LARS for PyTorch + + Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf + + Args: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate (default: 1.0). + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) + eps (float): eps for division denominator (default: 1e-8) + trust_clip (bool): enable LARC trust ratio clipping (default: False) + always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) + """ + + def __init__( + self, + params, + lr=1.0, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + trust_coeff=0.001, + eps=1e-8, + trust_clip=False, + always_adapt=False, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + trust_coeff=trust_coeff, + eps=eps, + trust_clip=trust_clip, + always_adapt=always_adapt, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("nesterov", False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + trust_coeff = group['trust_coeff'] + eps = group['eps'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + + # apply LARS LR adaptation, LARC clipping, weight decay + # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + if weight_decay != 0 or group['always_adapt']: + w_norm = p.norm(2.0) + g_norm = grad.norm(2.0) + trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, trust_ratio, one_tensor), + one_tensor, + ) + if group['trust_clip']: + trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) + grad.add(p, alpha=weight_decay) + grad.mul_(trust_ratio) + + # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = torch.clone(grad).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + grad = grad.add(buf, alpha=momentum) + else: + grad = buf + + p.add_(grad, alpha=-group['lr']) + + return loss \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/lookahead.py b/testbed/huggingface__pytorch-image-models/timm/optim/lookahead.py new file mode 100644 index 0000000000000000000000000000000000000000..462c3acd247016a94acd39a27dd44f29ae854d31 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/lookahead.py @@ -0,0 +1,61 @@ +""" Lookahead Optimizer Wrapper. +Implementation modified from: https://github.com/alphadl/lookahead.pytorch +Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer +from collections import defaultdict + + +class Lookahead(Optimizer): + def __init__(self, base_optimizer, alpha=0.5, k=6): + # NOTE super().__init__() not called on purpose + if not 0.0 <= alpha <= 1.0: + raise ValueError(f'Invalid slow update rate: {alpha}') + if not 1 <= k: + raise ValueError(f'Invalid lookahead steps: {k}') + defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) + self._base_optimizer = base_optimizer + self.param_groups = base_optimizer.param_groups + self.defaults = base_optimizer.defaults + self.defaults.update(defaults) + self.state = defaultdict(dict) + # manually add our defaults to the param groups + for name, default in defaults.items(): + for group in self._base_optimizer.param_groups: + group.setdefault(name, default) + + @torch.no_grad() + def update_slow(self, group): + for fast_p in group["params"]: + if fast_p.grad is None: + continue + param_state = self._base_optimizer.state[fast_p] + if 'lookahead_slow_buff' not in param_state: + param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) + param_state['lookahead_slow_buff'].copy_(fast_p) + slow = param_state['lookahead_slow_buff'] + slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) + fast_p.copy_(slow) + + def sync_lookahead(self): + for group in self._base_optimizer.param_groups: + self.update_slow(group) + + @torch.no_grad() + def step(self, closure=None): + loss = self._base_optimizer.step(closure) + for group in self._base_optimizer.param_groups: + group['lookahead_step'] += 1 + if group['lookahead_step'] % group['lookahead_k'] == 0: + self.update_slow(group) + return loss + + def state_dict(self): + return self._base_optimizer.state_dict() + + def load_state_dict(self, state_dict): + self._base_optimizer.load_state_dict(state_dict) + self.param_groups = self._base_optimizer.param_groups diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/madgrad.py b/testbed/huggingface__pytorch-image-models/timm/optim/madgrad.py new file mode 100644 index 0000000000000000000000000000000000000000..a76713bf27ed1daf0ce598ac5f25c6238c7fdb57 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/madgrad.py @@ -0,0 +1,184 @@ +""" PyTorch MADGRAD optimizer + +MADGRAD: https://arxiv.org/abs/2101.11075 + +Code from: https://github.com/facebookresearch/madgrad +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import TYPE_CHECKING, Any, Callable, Optional + +import torch +import torch.optim + +if TYPE_CHECKING: + from torch.optim.optimizer import _params_t +else: + _params_t = Any + + +class MADGRAD(torch.optim.Optimizer): + """ + MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimization. + + .. _MADGRAD: https://arxiv.org/abs/2101.11075 + + MADGRAD is a general purpose optimizer that can be used in place of SGD or + Adam may converge faster and generalize better. Currently GPU-only. + Typically, the same learning rate schedule that is used for SGD or Adam may + be used. The overall learning rate is not comparable to either method and + should be determined by a hyper-parameter sweep. + + MADGRAD requires less weight decay than other methods, often as little as + zero. Momentum values used for SGD or Adam's beta1 should work here also. + + On sparse problems both weight_decay and momentum should be set to 0. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). + """ + + def __init__( + self, + params: _params_t, + lr: float = 1e-2, + momentum: float = 0.9, + weight_decay: float = 0, + eps: float = 1e-6, + decoupled_decay: bool = False, + ): + if momentum < 0 or momentum >= 1: + raise ValueError(f"Momentum {momentum} must be in the range [0,1]") + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if eps < 0: + raise ValueError(f"Eps must be non-negative") + + defaults = dict( + lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) + super().__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self) -> bool: + return False + + @property + def supports_flat_params(self) -> bool: + return True + + @torch.no_grad() + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + eps = group['eps'] + lr = group['lr'] + eps + weight_decay = group['weight_decay'] + momentum = group['momentum'] + ck = 1 - momentum + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if momentum != 0.0 and grad.is_sparse: + raise RuntimeError("momentum != 0 is not compatible with sparse gradients") + + state = self.state[p] + if len(state) == 0: + state['step'] = 0 + state['grad_sum_sq'] = torch.zeros_like(p) + state['s'] = torch.zeros_like(p) + if momentum != 0: + state['x0'] = torch.clone(p).detach() + + state['step'] += 1 + grad_sum_sq = state['grad_sum_sq'] + s = state['s'] + lamb = lr * math.sqrt(state['step']) + + # Apply weight decay + if weight_decay != 0: + if group['decoupled_decay']: + p.mul_(1.0 - group['lr'] * weight_decay) + else: + if grad.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + grad.add_(p, alpha=weight_decay) + + if grad.is_sparse: + grad = grad.coalesce() + grad_val = grad._values() + + p_masked = p.sparse_mask(grad) + grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) + s_masked = s.sparse_mask(grad) + + # Compute x_0 from other known quantities + rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) + x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) + + # Dense + sparse op + grad_sq = grad * grad + grad_sum_sq.add_(grad_sq, alpha=lamb) + grad_sum_sq_masked.add_(grad_sq, alpha=lamb) + + rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) + + s.add_(grad, alpha=lamb) + s_masked._values().add_(grad_val, alpha=lamb) + + # update masked copy of p + p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) + # Copy updated masked p to dense p using an add operation + p_masked._values().add_(p_kp1_masked_vals, alpha=-1) + p.add_(p_masked, alpha=-1) + else: + if momentum == 0: + # Compute x_0 from other known quantities + rms = grad_sum_sq.pow(1 / 3).add_(eps) + x0 = p.addcdiv(s, rms, value=1) + else: + x0 = state['x0'] + + # Accumulate second moments + grad_sum_sq.addcmul_(grad, grad, value=lamb) + rms = grad_sum_sq.pow(1 / 3).add_(eps) + + # Update s + s.add_(grad, alpha=lamb) + + # Step + if momentum == 0: + p.copy_(x0.addcdiv(s, rms, value=-1)) + else: + z = x0.addcdiv(s, rms, value=-1) + + # p is a moving average of z + p.mul_(1 - ck).add_(z, alpha=ck) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/nadam.py b/testbed/huggingface__pytorch-image-models/timm/optim/nadam.py new file mode 100644 index 0000000000000000000000000000000000000000..6268d5d451ed2fe26b47e46476dc1feee7da9649 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/nadam.py @@ -0,0 +1,92 @@ +import math + +import torch +from torch.optim.optimizer import Optimizer + + +class Nadam(Optimizer): + """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). + + It has been proposed in `Incorporating Nesterov Momentum into Adam`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + schedule_decay (float, optional): momentum schedule decay (default: 4e-3) + + __ http://cs229.stanford.edu/proj2015/054_report.pdf + __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf + + Originally taken from: https://github.com/pytorch/pytorch/pull/1408 + NOTE: Has potential issues but does work well on some problems. + """ + + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, schedule_decay=4e-3): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay) + super(Nadam, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_schedule'] = 1. + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Warming momentum schedule + m_schedule = state['m_schedule'] + schedule_decay = group['schedule_decay'] + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + eps = group['eps'] + state['step'] += 1 + t = state['step'] + bias_correction2 = 1 - beta2 ** t + + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) + momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) + m_schedule_new = m_schedule * momentum_cache_t + m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 + state['m_schedule'] = m_schedule_new + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) + p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) + p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/nvnovograd.py b/testbed/huggingface__pytorch-image-models/timm/optim/nvnovograd.py new file mode 100644 index 0000000000000000000000000000000000000000..fda3f4a620fcca5593034dfb9683f2c8f3b78ac1 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/nvnovograd.py @@ -0,0 +1,120 @@ +""" Nvidia NovoGrad Optimizer. +Original impl by Nvidia from Jasper example: + - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper +Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` + - https://arxiv.org/abs/1905.11286 +""" + +import torch +from torch.optim.optimizer import Optimizer +import math + + +class NvNovoGrad(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0.98)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(NvNovoGrad, self).__init__(params, defaults) + + def __setstate__(self, state): + super(NvNovoGrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/optim_factory.py b/testbed/huggingface__pytorch-image-models/timm/optim/optim_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..e174915679b7347d72edc09cd29d954a82fe3e4e --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/optim_factory.py @@ -0,0 +1,217 @@ +""" Optimizer Factory w/ Custom Weight Decay +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Optional + +import torch +import torch.nn as nn +import torch.optim as optim + +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP + +try: + from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD + has_apex = True +except ImportError: + has_apex = False + + +def add_weight_decay(model, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + no_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0.}, + {'params': decay, 'weight_decay': weight_decay}] + + +def optimizer_kwargs(cfg): + """ cfg/argparse to kwargs helper + Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. + """ + kwargs = dict( + opt=cfg.opt, + lr=cfg.lr, + weight_decay=cfg.weight_decay, + momentum=cfg.momentum) + if getattr(cfg, 'opt_eps', None) is not None: + kwargs['eps'] = cfg.opt_eps + if getattr(cfg, 'opt_betas', None) is not None: + kwargs['betas'] = cfg.opt_betas + if getattr(cfg, 'opt_args', None) is not None: + kwargs.update(cfg.opt_args) + return kwargs + + +def create_optimizer(args, model, filter_bias_and_bn=True): + """ Legacy optimizer factory for backwards compatibility. + NOTE: Use create_optimizer_v2 for new code. + """ + return create_optimizer_v2( + model, + **optimizer_kwargs(cfg=args), + filter_bias_and_bn=filter_bias_and_bn, + ) + + +def create_optimizer_v2( + model_or_params, + opt: str = 'sgd', + lr: Optional[float] = None, + weight_decay: float = 0., + momentum: float = 0.9, + filter_bias_and_bn: bool = True, + **kwargs): + """ Create an optimizer. + + TODO currently the model is passed in and all parameters are selected for optimization. + For more general use an interface that allows selection of parameters to optimize and lr groups, one of: + * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion + * expose the parameters interface and leave it up to caller + + Args: + model_or_params (nn.Module): model containing parameters to optimize + opt: name of optimizer to create + lr: initial learning rate + weight_decay: weight decay to apply in optimizer + momentum: momentum for momentum based optimizers (others may use betas via kwargs) + filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay + **kwargs: extra optimizer specific kwargs to pass through + + Returns: + Optimizer + """ + if isinstance(model_or_params, nn.Module): + # a model was passed in, extract parameters and add weight decays to appropriate layers + if weight_decay and filter_bias_and_bn: + skip = {} + if hasattr(model_or_params, 'no_weight_decay'): + skip = model_or_params.no_weight_decay() + parameters = add_weight_decay(model_or_params, weight_decay, skip) + weight_decay = 0. + else: + parameters = model_or_params.parameters() + else: + # iterable of parameters or param groups passed in + parameters = model_or_params + + opt_lower = opt.lower() + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(weight_decay=weight_decay, **kwargs) + if lr is not None: + opt_args.setdefault('lr', lr) + + # basic SGD & related + if opt_lower == 'sgd' or opt_lower == 'nesterov': + # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) + + # adaptive + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'nadam': + try: + # NOTE PyTorch >= 1.10 should have native NAdam + optimizer = optim.Nadam(parameters, **opt_args) + except AttributeError: + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamax': + optimizer = optim.Adamax(parameters, **opt_args) + elif opt_lower == 'adabelief': + optimizer = AdaBelief(parameters, rectify=False, **opt_args) + elif opt_lower == 'radabelief': + optimizer = AdaBelief(parameters, rectify=True, **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + elif opt_lower == 'adagrad': + opt_args.setdefault('eps', 1e-8) + optimizer = optim.Adagrad(parameters, **opt_args) + elif opt_lower == 'adafactor': + optimizer = Adafactor(parameters, **opt_args) + elif opt_lower == 'lamb': + optimizer = Lamb(parameters, **opt_args) + elif opt_lower == 'lambc': + optimizer = Lamb(parameters, trust_clip=True, **opt_args) + elif opt_lower == 'larc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) + elif opt_lower == 'lars': + optimizer = Lars(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'nlarc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) + elif opt_lower == 'nlars': + optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'madgrad': + optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'madgradw': + optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) + elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': + optimizer = NvNovoGrad(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) + + # second order + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + + # NVIDIA fused optimizers, require APEX to be installed + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + + else: + assert False and "Invalid optimizer" + raise ValueError + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/radam.py b/testbed/huggingface__pytorch-image-models/timm/optim/radam.py new file mode 100644 index 0000000000000000000000000000000000000000..eb8d22e06c42e487c831297008851b4adc254d78 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/radam.py @@ -0,0 +1,89 @@ +"""RAdam Optimizer. +Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam +Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)]) + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_fp32 = p.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + state['step'] += 1 + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = group['lr'] * math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) + + # more conservative since it's an approximated value + if num_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + p_fp32.add_(exp_avg, alpha=-step_size) + + p.copy_(p_fp32) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/rmsprop_tf.py b/testbed/huggingface__pytorch-image-models/timm/optim/rmsprop_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..0817887db380261dfee3fcd4bd155b5d923f5248 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/rmsprop_tf.py @@ -0,0 +1,139 @@ +""" RMSProp modified to behave like Tensorflow impl + +Originally cut & paste from PyTorch RMSProp +https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py +Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE + +Modifications Copyright 2021 Ross Wightman +""" + +import torch +from torch.optim import Optimizer + + +class RMSpropTF(Optimizer): + """Implements RMSprop algorithm (TensorFlow style epsilon) + + NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt + and a few other modifications to closer match Tensorflow for matching hyper-params. + + Noteworthy changes include: + 1. Epsilon applied inside square-root + 2. square_avg initialized to ones + 3. LR scaling of update accumulated in momentum buffer + + Proposed by G. Hinton in his + `course `_. + + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks `_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing (decay) constant (default: 0.9) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 + lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer + update as per defaults in Tensorflow + + """ + + def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, + decoupled_decay=False, lr_in_momentum=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict( + lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, + decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) + super(RMSpropTF, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSpropTF, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p) + + square_avg = state['square_avg'] + one_minus_alpha = 1. - group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if group['decoupled_decay']: + p.mul_(1. - group['lr'] * group['weight_decay']) + else: + grad = grad.add(p, alpha=group['weight_decay']) + + # Tensorflow order of ops for updating squared avg + square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) + # square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt + # grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original + else: + avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + # Tensorflow accumulates the LR scaling in the momentum buffer + if group['lr_in_momentum']: + buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr']) + p.add_(-buf) + else: + # PyTorch scales the param update by LR + buf.mul_(group['momentum']).addcdiv_(grad, avg) + p.add_(buf, alpha=-group['lr']) + else: + p.addcdiv_(grad, avg, value=-group['lr']) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/optim/sgdp.py b/testbed/huggingface__pytorch-image-models/timm/optim/sgdp.py new file mode 100644 index 0000000000000000000000000000000000000000..baf05fa55c632371498ec53ff679b11023429df6 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/optim/sgdp.py @@ -0,0 +1,70 @@ +""" +SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer, required +import math + +from .adamp import projection + + +class SGDP(Optimizer): + def __init__(self, params, lr=required, momentum=0, dampening=0, + weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1): + defaults = dict( + lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, + nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) + super(SGDP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['momentum'] = torch.zeros_like(p) + + # SGD + buf = state['momentum'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + d_p = grad + momentum * buf + else: + d_p = buf + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + d_p, wd_ratio = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if weight_decay != 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum)) + + # Step + p.add_(d_p, alpha=-group['lr']) + + return loss diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/__init__.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1961b88fc3c37cdd8c73f9fddd4bfa1ada95f23 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/__init__.py @@ -0,0 +1,8 @@ +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + +from .scheduler_factory import create_scheduler diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/cosine_lr.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/cosine_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..84ee349ec281f89e331be3643b613e158bb3c194 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/cosine_lr.py @@ -0,0 +1,119 @@ +""" Cosine Scheduler + +Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class CosineLRScheduler(Scheduler): + """ + Cosine decay with restarts. + This is described in the paper https://arxiv.org/abs/1608.03983. + + Inspiration from + https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/multistep_lr.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/multistep_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..a5d5fe1980f20d0a1251d7771f475110805f8862 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/multistep_lr.py @@ -0,0 +1,65 @@ +""" MultiStep LR Scheduler + +Basic multi step LR schedule with warmup, noise. +""" +import torch +import bisect +from timm.scheduler.scheduler import Scheduler +from typing import List + +class MultiStepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: List[int], + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def get_curr_decay_steps(self, t): + # find where in the array t goes, + # assumes self.decay_t is sorted + return bisect.bisect_right(self.decay_t, t+1) + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/plateau_lr.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/plateau_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..4f2cacb65a1bf23d10aa6fd296f74579571043cf --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/plateau_lr.py @@ -0,0 +1,113 @@ +""" Plateau Scheduler + +Adapts PyTorch plateau scheduler and allows application of noise, warmup. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +from .scheduler import Scheduler + + +class PlateauLRScheduler(Scheduler): + """Decay the LR by a factor every time the validation loss plateaus.""" + + def __init__(self, + optimizer, + decay_rate=0.1, + patience_t=10, + verbose=True, + threshold=1e-4, + cooldown_t=0, + warmup_t=0, + warmup_lr_init=0, + lr_min=0, + mode='max', + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize=True, + ): + super().__init__(optimizer, 'lr', initialize=initialize) + + self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, + patience=patience_t, + factor=decay_rate, + verbose=verbose, + threshold=threshold, + cooldown=cooldown_t, + mode=mode, + min_lr=lr_min + ) + + self.noise_range = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + self.restore_lr = None + + def state_dict(self): + return { + 'best': self.lr_scheduler.best, + 'last_epoch': self.lr_scheduler.last_epoch, + } + + def load_state_dict(self, state_dict): + self.lr_scheduler.best = state_dict['best'] + if 'last_epoch' in state_dict: + self.lr_scheduler.last_epoch = state_dict['last_epoch'] + + # override the base class step fn completely + def step(self, epoch, metric=None): + if epoch <= self.warmup_t: + lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] + super().update_groups(lrs) + else: + if self.restore_lr is not None: + # restore actual LR from before our last noise perturbation before stepping base + for i, param_group in enumerate(self.optimizer.param_groups): + param_group['lr'] = self.restore_lr[i] + self.restore_lr = None + + self.lr_scheduler.step(metric, epoch) # step the base scheduler + + if self.noise_range is not None: + if isinstance(self.noise_range, (list, tuple)): + apply_noise = self.noise_range[0] <= epoch < self.noise_range[1] + else: + apply_noise = epoch >= self.noise_range + if apply_noise: + self._apply_noise(epoch) + + def _apply_noise(self, epoch): + g = torch.Generator() + g.manual_seed(self.noise_seed + epoch) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + + # apply the noise on top of previous LR, cache the old value so we can restore for normal + # stepping of base scheduler + restore_lr = [] + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + restore_lr.append(old_lr) + new_lr = old_lr + old_lr * noise + param_group['lr'] = new_lr + self.restore_lr = restore_lr diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/poly_lr.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/poly_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1e63b739124d618a13996372d4175692758783 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/poly_lr.py @@ -0,0 +1,116 @@ +""" Polynomial Scheduler + +Polynomial LR schedule with warmup, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging + +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class PolyLRScheduler(Scheduler): + """ Polynomial LR Scheduler w/ warmup, noise, and k-decay + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + power: float = 0.5, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=.5, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.power = power + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/scheduler.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..21d51509c87a0783c6b61986c574a3ed5366e165 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/scheduler.py @@ -0,0 +1,105 @@ +from typing import Dict, Any + +import torch + + +class Scheduler: + """ Parameter Scheduler Base Class + A scheduler base class that can be used to schedule any optimizer parameter groups. + + Unlike the builtin PyTorch schedulers, this is intended to be consistently called + * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value + * At the END of each optimizer update, after incrementing the update count, to calculate next update's value + + The schedulers built on this should try to remain as stateless as possible (for simplicity). + + This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' + and -1 values for special behaviour. All epoch and update counts must be tracked in the training + code and explicitly passed in to the schedulers on the corresponding step or step_update call. + + Based on ideas from: + * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler + * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True) -> None: + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None # any point to having this for all? + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self) -> Dict[str, Any]: + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + + def get_epoch_values(self, epoch: int): + return None + + def get_update_values(self, num_updates: int): + return None + + def step(self, epoch: int, metric: float = None) -> None: + self.metric = metric + values = self.get_epoch_values(epoch) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_update(self, num_updates: int, metric: float = None): + self.metric = metric + values = self.get_update_values(num_updates) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + if apply_noise: + g = torch.Generator() + g.manual_seed(self.noise_seed + t) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + lrs = [v + v * noise for v in lrs] + return lrs diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/scheduler_factory.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/scheduler_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..72a979c21869f303206fdb70bf3a026ea83d9f80 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/scheduler_factory.py @@ -0,0 +1,107 @@ +""" Scheduler Factory +Hacked together by / Copyright 2021 Ross Wightman +""" +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + + +def create_scheduler(args, optimizer): + num_epochs = args.epochs + + if getattr(args, 'lr_noise', None) is not None: + lr_noise = getattr(args, 'lr_noise') + if isinstance(lr_noise, (list, tuple)): + noise_range = [n * num_epochs for n in lr_noise] + if len(noise_range) == 1: + noise_range = noise_range[0] + else: + noise_range = lr_noise * num_epochs + else: + noise_range = None + noise_args = dict( + noise_range_t=noise_range, + noise_pct=getattr(args, 'lr_noise_pct', 0.67), + noise_std=getattr(args, 'lr_noise_std', 1.), + noise_seed=getattr(args, 'seed', 42), + ) + cycle_args = dict( + cycle_mul=getattr(args, 'lr_cycle_mul', 1.), + cycle_decay=getattr(args, 'lr_cycle_decay', 0.1), + cycle_limit=getattr(args, 'lr_cycle_limit', 1), + ) + + lr_scheduler = None + if args.sched == 'cosine': + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'tanh': + lr_scheduler = TanhLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + t_in_epochs=True, + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'step': + lr_scheduler = StepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'multistep': + lr_scheduler = MultiStepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'plateau': + mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max' + lr_scheduler = PlateauLRScheduler( + optimizer, + decay_rate=args.decay_rate, + patience_t=args.patience_epochs, + lr_min=args.min_lr, + mode=mode, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + cooldown_t=0, + **noise_args, + ) + elif args.sched == 'poly': + lr_scheduler = PolyLRScheduler( + optimizer, + power=args.decay_rate, # overloading 'decay_rate' as polynomial power + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + + return lr_scheduler, num_epochs diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/step_lr.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/step_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..f797e1a8cf35999531dd5f1ccbbe09a9d0cf30a9 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/step_lr.py @@ -0,0 +1,63 @@ +""" Step Scheduler + +Basic step LR schedule with warmup, noise. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch + +from .scheduler import Scheduler + + +class StepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: float, + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/testbed/huggingface__pytorch-image-models/timm/scheduler/tanh_lr.py b/testbed/huggingface__pytorch-image-models/timm/scheduler/tanh_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..f2d3c9cdb11ad31766062f1a8d3e69d3f845edc1 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/scheduler/tanh_lr.py @@ -0,0 +1,117 @@ +""" TanH Scheduler + +TanH schedule with warmup, cycle/restarts, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class TanhLRScheduler(Scheduler): + """ + Hyberbolic-Tangent decay with restarts. + This is described in the paper https://arxiv.org/abs/1806.01593 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lb: float = -7., + ub: float = 3., + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + assert lb < ub + assert cycle_limit >= 0 + assert warmup_t >= 0 + assert warmup_lr_init >= 0 + self.lb = lb + self.ub = ub + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + if self.warmup_t: + t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + if i < self.cycle_limit: + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + + tr = t_curr / t_i + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/__init__.py b/testbed/huggingface__pytorch-image-models/timm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d02e62d2d0ce62e594393014208e28c3ace5318b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/__init__.py @@ -0,0 +1,13 @@ +from .agc import adaptive_clip_grad +from .checkpoint_saver import CheckpointSaver +from .clip_grad import dispatch_clip_grad +from .cuda import ApexScaler, NativeScaler +from .distributed import distribute_bn, reduce_tensor +from .jit import set_jit_legacy +from .log import setup_default_logging, FormatterNoInfo +from .metrics import AverageMeter, accuracy +from .misc import natural_key, add_bool_arg +from .model import unwrap_model, get_state_dict +from .model_ema import ModelEma, ModelEmaV2 +from .random import random_seed +from .summary import update_summary, get_outdir diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/agc.py b/testbed/huggingface__pytorch-image-models/timm/utils/agc.py new file mode 100644 index 0000000000000000000000000000000000000000..f51401726ff6810d97d0fa567f4e31b474325a59 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/agc.py @@ -0,0 +1,42 @@ +""" Adaptive Gradient Clipping + +An impl of AGC, as per (https://arxiv.org/abs/2102.06171): + +@article{brock2021high, + author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, + title={High-Performance Large-Scale Image Recognition Without Normalization}, + journal={arXiv preprint arXiv:}, + year={2021} +} + +Code references: + * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets + * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch + + +def unitwise_norm(x, norm_type=2.0): + if x.ndim <= 1: + return x.norm(norm_type) + else: + # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor + # might need special cases for other weights (possibly MHA) where this may not be true + return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) + + +def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + for p in parameters: + if p.grad is None: + continue + p_data = p.detach() + g_data = p.grad.detach() + max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) + grad_norm = unitwise_norm(g_data, norm_type=norm_type) + clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) + new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) + p.grad.detach().copy_(new_grads) diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/checkpoint_saver.py b/testbed/huggingface__pytorch-image-models/timm/utils/checkpoint_saver.py new file mode 100644 index 0000000000000000000000000000000000000000..6aad74ee52655f68220f799efaffcbccdd0748ad --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/checkpoint_saver.py @@ -0,0 +1,150 @@ +""" Checkpoint Saver + +Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import glob +import operator +import os +import logging + +import torch + +from .model import unwrap_model, get_state_dict + + +_logger = logging.getLogger(__name__) + + +class CheckpointSaver: + def __init__( + self, + model, + optimizer, + args=None, + model_ema=None, + amp_scaler=None, + checkpoint_prefix='checkpoint', + recovery_prefix='recovery', + checkpoint_dir='', + recovery_dir='', + decreasing=False, + max_history=10, + unwrap_fn=unwrap_model): + + # objects to save state_dicts of + self.model = model + self.optimizer = optimizer + self.args = args + self.model_ema = model_ema + self.amp_scaler = amp_scaler + + # state + self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness + self.best_epoch = None + self.best_metric = None + self.curr_recovery_file = '' + self.last_recovery_file = '' + + # config + self.checkpoint_dir = checkpoint_dir + self.recovery_dir = recovery_dir + self.save_prefix = checkpoint_prefix + self.recovery_prefix = recovery_prefix + self.extension = '.pth.tar' + self.decreasing = decreasing # a lower metric is better if True + self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs + self.max_history = max_history + self.unwrap_fn = unwrap_fn + assert self.max_history >= 1 + + def save_checkpoint(self, epoch, metric=None): + assert epoch >= 0 + tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) + last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) + self._save(tmp_save_path, epoch, metric) + if os.path.exists(last_save_path): + os.unlink(last_save_path) # required for Windows support. + os.rename(tmp_save_path, last_save_path) + worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None + if (len(self.checkpoint_files) < self.max_history + or metric is None or self.cmp(metric, worst_file[1])): + if len(self.checkpoint_files) >= self.max_history: + self._cleanup_checkpoints(1) + filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension + save_path = os.path.join(self.checkpoint_dir, filename) + os.link(last_save_path, save_path) + self.checkpoint_files.append((save_path, metric)) + self.checkpoint_files = sorted( + self.checkpoint_files, key=lambda x: x[1], + reverse=not self.decreasing) # sort in descending order if a lower metric is not better + + checkpoints_str = "Current checkpoints:\n" + for c in self.checkpoint_files: + checkpoints_str += ' {}\n'.format(c) + _logger.info(checkpoints_str) + + if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): + self.best_epoch = epoch + self.best_metric = metric + best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) + if os.path.exists(best_save_path): + os.unlink(best_save_path) + os.link(last_save_path, best_save_path) + + return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) + + def _save(self, save_path, epoch, metric=None): + save_state = { + 'epoch': epoch, + 'arch': type(self.model).__name__.lower(), + 'state_dict': get_state_dict(self.model, self.unwrap_fn), + 'optimizer': self.optimizer.state_dict(), + 'version': 2, # version < 2 increments epoch before save + } + if self.args is not None: + save_state['arch'] = self.args.model + save_state['args'] = self.args + if self.amp_scaler is not None: + save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() + if self.model_ema is not None: + save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) + if metric is not None: + save_state['metric'] = metric + torch.save(save_state, save_path) + + def _cleanup_checkpoints(self, trim=0): + trim = min(len(self.checkpoint_files), trim) + delete_index = self.max_history - trim + if delete_index < 0 or len(self.checkpoint_files) <= delete_index: + return + to_delete = self.checkpoint_files[delete_index:] + for d in to_delete: + try: + _logger.debug("Cleaning checkpoint: {}".format(d)) + os.remove(d[0]) + except Exception as e: + _logger.error("Exception '{}' while deleting checkpoint".format(e)) + self.checkpoint_files = self.checkpoint_files[:delete_index] + + def save_recovery(self, epoch, batch_idx=0): + assert epoch >= 0 + filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension + save_path = os.path.join(self.recovery_dir, filename) + self._save(save_path, epoch) + if os.path.exists(self.last_recovery_file): + try: + _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) + os.remove(self.last_recovery_file) + except Exception as e: + _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) + self.last_recovery_file = self.curr_recovery_file + self.curr_recovery_file = save_path + + def find_recovery(self): + recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) + files = glob.glob(recovery_path + '*' + self.extension) + files = sorted(files) + return files[0] if len(files) else '' diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/clip_grad.py b/testbed/huggingface__pytorch-image-models/timm/utils/clip_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb40697a221edd6d8e622ff3306dad5e58afd94 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/clip_grad.py @@ -0,0 +1,23 @@ +import torch + +from timm.utils.agc import adaptive_clip_grad + + +def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): + """ Dispatch to gradient clipping method + + Args: + parameters (Iterable): model parameters to clip + value (float): clipping value/factor/norm, mode dependant + mode (str): clipping mode, one of 'norm', 'value', 'agc' + norm_type (float): p-norm, default 2.0 + """ + if mode == 'norm': + torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) + elif mode == 'value': + torch.nn.utils.clip_grad_value_(parameters, value) + elif mode == 'agc': + adaptive_clip_grad(parameters, value, norm_type=norm_type) + else: + assert False, f"Unknown clip mode ({mode})." + diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/log.py b/testbed/huggingface__pytorch-image-models/timm/utils/log.py new file mode 100644 index 0000000000000000000000000000000000000000..c99469e0884f3e45905ef7c7f0d1e491092697ad --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/log.py @@ -0,0 +1,28 @@ +""" Logging helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import logging.handlers + + +class FormatterNoInfo(logging.Formatter): + def __init__(self, fmt='%(levelname)s: %(message)s'): + logging.Formatter.__init__(self, fmt) + + def format(self, record): + if record.levelno == logging.INFO: + return str(record.getMessage()) + return logging.Formatter.format(self, record) + + +def setup_default_logging(default_level=logging.INFO, log_path=''): + console_handler = logging.StreamHandler() + console_handler.setFormatter(FormatterNoInfo()) + logging.root.addHandler(console_handler) + logging.root.setLevel(default_level) + if log_path: + file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) + file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") + file_handler.setFormatter(file_formatter) + logging.root.addHandler(file_handler) diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/metrics.py b/testbed/huggingface__pytorch-image-models/timm/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9fdbe13ef15c541679906239374ff8a7eedf5181 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/metrics.py @@ -0,0 +1,32 @@ +""" Eval metrics and related + +Hacked together by / Copyright 2020 Ross Wightman +""" + + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = min(max(topk), output.size()[1]) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.reshape(1, -1).expand_as(pred)) + return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/misc.py b/testbed/huggingface__pytorch-image-models/timm/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..39c0097c60ed602547f832f1f8dafbe37f156064 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/misc.py @@ -0,0 +1,18 @@ +""" Misc utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import re + + +def natural_key(string_): + """See http://www.codinghorror.com/blog/archives/001018.html""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def add_bool_arg(parser, name, default=False, help=''): + dest_name = name.replace('-', '_') + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) + group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) + parser.set_defaults(**{dest_name: default}) diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/model.py b/testbed/huggingface__pytorch-image-models/timm/utils/model.py new file mode 100644 index 0000000000000000000000000000000000000000..bd46e2f49c6d5ee2de304bfda1456bd1716c6886 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/model.py @@ -0,0 +1,92 @@ +""" Model / state_dict utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +from .model_ema import ModelEma +import torch +import fnmatch + +def unwrap_model(model): + if isinstance(model, ModelEma): + return unwrap_model(model.ema) + else: + return model.module if hasattr(model, 'module') else model + + +def get_state_dict(model, unwrap_fn=unwrap_model): + return unwrap_fn(model).state_dict() + + +def avg_sq_ch_mean(model, input, output): + "calculate average channel square mean of output activations" + return torch.mean(output.mean(axis=[0,2,3])**2).item() + + +def avg_ch_var(model, input, output): + "calculate average channel variance of output activations" + return torch.mean(output.var(axis=[0,2,3])).item()\ + + +def avg_ch_var_residual(model, input, output): + "calculate average channel variance of output activations" + return torch.mean(output.var(axis=[0,2,3])).item() + + +class ActivationStatsHook: + """Iterates through each of `model`'s modules and matches modules using unix pattern + matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is + a match. + + Arguments: + model (nn.Module): model from which we will extract the activation stats + hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string + matching with the name of model's modules. + hook_fns (List[Callable]): List of hook functions to be registered at every + module in `layer_names`. + + Inspiration from https://docs.fast.ai/callback.hook.html. + + Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example + on how to plot Signal Propogation Plots using `ActivationStatsHook`. + """ + + def __init__(self, model, hook_fn_locs, hook_fns): + self.model = model + self.hook_fn_locs = hook_fn_locs + self.hook_fns = hook_fns + if len(hook_fn_locs) != len(hook_fns): + raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ + their lengths are different.") + self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) + for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): + self.register_hook(hook_fn_loc, hook_fn) + + def _create_hook(self, hook_fn): + def append_activation_stats(module, input, output): + out = hook_fn(module, input, output) + self.stats[hook_fn.__name__].append(out) + return append_activation_stats + + def register_hook(self, hook_fn_loc, hook_fn): + for name, module in self.model.named_modules(): + if not fnmatch.fnmatch(name, hook_fn_loc): + continue + module.register_forward_hook(self._create_hook(hook_fn)) + + +def extract_spp_stats(model, + hook_fn_locs, + hook_fns, + input_shape=[8, 3, 224, 224]): + """Extract average square channel mean and variance of activations during + forward pass to plot Signal Propogation Plots (SPP). + + Paper: https://arxiv.org/abs/2101.08692 + + Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 + """ + x = torch.normal(0., 1., input_shape) + hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) + _ = model(x) + return hook.stats + \ No newline at end of file diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/model_ema.py b/testbed/huggingface__pytorch-image-models/timm/utils/model_ema.py new file mode 100644 index 0000000000000000000000000000000000000000..073d5c5ea1a4afc5aa3817b6354b2566f8cc2cf5 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/model_ema.py @@ -0,0 +1,126 @@ +""" Exponential Moving Average (EMA) of model updates + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn + +_logger = logging.getLogger(__name__) + + +class ModelEma: + """ Model Exponential Moving Average (DEPRECATED) + + Keep a moving average of everything in the model state_dict (parameters and buffers). + This version is deprecated, it does not work with scripted models. Will be removed eventually. + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device='', resume=''): + # make a copy of the model for accumulating moving average of weights + self.ema = deepcopy(model) + self.ema.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if device: + self.ema.to(device=device) + self.ema_has_module = hasattr(self.ema, 'module') + if resume: + self._load_checkpoint(resume) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def _load_checkpoint(self, checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + assert isinstance(checkpoint, dict) + if 'state_dict_ema' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict_ema'].items(): + # ema model may have been wrapped by DataParallel, and need module prefix + if self.ema_has_module: + name = 'module.' + k if not k.startswith('module') else k + else: + name = k + new_state_dict[name] = v + self.ema.load_state_dict(new_state_dict) + _logger.info("Loaded state_dict_ema") + else: + _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") + + def update(self, model): + # correct a mismatch in state dict keys + needs_module = hasattr(model, 'module') and not self.ema_has_module + with torch.no_grad(): + msd = model.state_dict() + for k, ema_v in self.ema.state_dict().items(): + if needs_module: + k = 'module.' + k + model_v = msd[k].detach() + if self.device: + model_v = model_v.to(device=self.device) + ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) + + +class ModelEmaV2(nn.Module): + """ Model Exponential Moving Average V2 + + Keep a moving average of everything in the model state_dict (parameters and buffers). + V2 of this module is simpler, it does not match params/buffers based on name but simply + iterates in order. It works with torchscript (JIT of full model). + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device=None): + super(ModelEmaV2, self).__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if self.device is not None: + self.module.to(device=device) + + def _update(self, model, update_fn): + with torch.no_grad(): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if self.device is not None: + model_v = model_v.to(device=self.device) + ema_v.copy_(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/random.py b/testbed/huggingface__pytorch-image-models/timm/utils/random.py new file mode 100644 index 0000000000000000000000000000000000000000..a9679983e96a9a6634c0b77aaf7b996e70eff50b --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/random.py @@ -0,0 +1,9 @@ +import random +import numpy as np +import torch + + +def random_seed(seed=42, rank=0): + torch.manual_seed(seed + rank) + np.random.seed(seed + rank) + random.seed(seed + rank) diff --git a/testbed/huggingface__pytorch-image-models/timm/utils/summary.py b/testbed/huggingface__pytorch-image-models/timm/utils/summary.py new file mode 100644 index 0000000000000000000000000000000000000000..9f5af9a08598556c3fed136f258f88bd578c1e1c --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/utils/summary.py @@ -0,0 +1,39 @@ +""" Summary utilities + +Hacked together by / Copyright 2020 Ross Wightman +""" +import csv +import os +from collections import OrderedDict +try: + import wandb +except ImportError: + pass + +def get_outdir(path, *paths, inc=False): + outdir = os.path.join(path, *paths) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif inc: + count = 1 + outdir_inc = outdir + '-' + str(count) + while os.path.exists(outdir_inc): + count = count + 1 + outdir_inc = outdir + '-' + str(count) + assert count < 100 + outdir = outdir_inc + os.makedirs(outdir) + return outdir + + +def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + if log_wandb: + wandb.log(rowd) + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd) diff --git a/testbed/huggingface__pytorch-image-models/timm/version.py b/testbed/huggingface__pytorch-image-models/timm/version.py new file mode 100644 index 0000000000000000000000000000000000000000..779b9fc38903e947e97099da8d030e800605ea99 --- /dev/null +++ b/testbed/huggingface__pytorch-image-models/timm/version.py @@ -0,0 +1 @@ +__version__ = '0.4.13' diff --git a/testbed/huggingface__trl/docs/source/alignprop_trainer.mdx b/testbed/huggingface__trl/docs/source/alignprop_trainer.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d76b5665da3ada42ec9e6cd6a36ef06e1f8345cf --- /dev/null +++ b/testbed/huggingface__trl/docs/source/alignprop_trainer.mdx @@ -0,0 +1,93 @@ +# Aligning Text-to-Image Diffusion Models with Reward Backpropagation + +[![](https://img.shields.io/badge/All_models-AlignProp-blue)](https://huggingface.co/models?other=alignprop,trl) + +## The why + +If your reward function is differentiable, directly backpropagating gradients from the reward models to the diffusion model is significantly more sample and compute efficient (25x) than doing policy gradient algorithm like DDPO. +AlignProp does full backpropagation through time, which allows updating the earlier steps of denoising via reward backpropagation. + +
+ + +## Getting started with `examples/scripts/alignprop.py` + +The `alignprop.py` script is a working example of using the `AlignProp` trainer to finetune a Stable Diffusion model. This example explicitly configures a small subset of the overall parameters associated with the config object (`AlignPropConfig`). + +**Note:** one A100 GPU is recommended to get this running. For lower memory setting, consider setting truncated_backprop_rand to False. With default settings this will do truncated backpropagation with K=1. + +Almost every configuration parameter has a default. There is only one commandline flag argument that is required of the user to get things up and running. The user is expected to have a [huggingface user access token](https://huggingface.co/docs/hub/security-tokens) that will be used to upload the model post finetuning to HuggingFace hub. The following bash command is to be entered to get things running + +```batch +python alignprop.py --hf_user_access_token +``` + +To obtain the documentation of `stable_diffusion_tuning.py`, please run `python stable_diffusion_tuning.py --help` + +The following are things to keep in mind (The code checks this for you as well) in general while configuring the trainer (beyond the use case of using the example script) + +- The configurable randomized truncation range (`--alignprop_config.truncated_rand_backprop_minmax=(0,50)`) the first number should be equal and greater to 0, while the second number should equal or less to the number of diffusion timesteps (sample_num_steps) +- The configurable truncation backprop absolute step (`--alignprop_config.truncated_backprop_timestep=49`) the number should be less than the number of diffusion timesteps (sample_num_steps), it only matters when truncated_backprop_rand is set to False + +## Setting up the image logging hook function + +Expect the function to be given a dictionary with keys +```python +['image', 'prompt', 'prompt_metadata', 'rewards'] + +``` +and `image`, `prompt`, `prompt_metadata`, `rewards`are batched. +You are free to log however you want the use of `wandb` or `tensorboard` is recommended. + +### Key terms + +- `rewards` : The rewards/score is a numerical associated with the generated image and is key to steering the RL process +- `prompt` : The prompt is the text that is used to generate the image +- `prompt_metadata` : The prompt metadata is the metadata associated with the prompt. A situation where this will not be empty is when the reward model comprises of a [`FLAVA`](https://huggingface.co/docs/transformers/model_doc/flava) setup where questions and ground answers (linked to the generated image) are expected with the generated image (See here: https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/rewards.py#L45) +- `image` : The image generated by the Stable Diffusion model + +Example code for logging sampled images with `wandb` is given below. + +```python +# for logging these images to wandb + +def image_outputs_hook(image_data, global_step, accelerate_logger): + # For the sake of this example, we only care about the last batch + # hence we extract the last element of the list + result = {} + images, prompts, rewards = [image_data['images'],image_data['prompts'],image_data['rewards']] + for i, image in enumerate(images): + pil = Image.fromarray( + (image.cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8) + ) + pil = pil.resize((256, 256)) + result[f"{prompts[i]:.25} | {rewards[i]:.2f}"] = [pil] + accelerate_logger.log_images( + result, + step=global_step, + ) + +``` + +### Using the finetuned model + +Assuming you've done with all the epochs and have pushed up your model to the hub, you can use the finetuned model as follows + +```python +from diffusers import StableDiffusionPipeline +pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +pipeline.to("cuda") + +pipeline.load_lora_weights('mihirpd/alignprop-trl-aesthetics') + +prompts = ["squirrel", "crab", "starfish", "whale","sponge", "plankton"] +results = pipeline(prompts) + +for prompt, image in zip(prompts,results.images): + image.save(f"dump/{prompt}.png") +``` + +## Credits + +This work is heavily influenced by the repo [here](https://github.com/mihirp1998/AlignProp/) and the associated paper [Aligning Text-to-Image Diffusion Models with Reward Backpropagation + by Mihir Prabhudesai, Anirudh Goyal, Deepak Pathak, Katerina Fragkiadaki](https://huggingface.co/papers/2310.03739). diff --git a/testbed/huggingface__trl/docs/source/callbacks.mdx b/testbed/huggingface__trl/docs/source/callbacks.mdx new file mode 100644 index 0000000000000000000000000000000000000000..dfcf4fd8b7a9b80a9ff9b309170ec8ebebd1e8bf --- /dev/null +++ b/testbed/huggingface__trl/docs/source/callbacks.mdx @@ -0,0 +1,17 @@ +# Callbacks + +## SyncRefModelCallback + +[[autodoc]] SyncRefModelCallback + +## RichProgressCallback + +[[autodoc]] RichProgressCallback + +## WinRateCallback + +[[autodoc]] WinRateCallback + +## LogCompletionsCallback + +[[autodoc]] LogCompletionsCallback \ No newline at end of file diff --git a/testbed/huggingface__trl/docs/source/data_utils.mdx b/testbed/huggingface__trl/docs/source/data_utils.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6bbfc5b32df180eb6a8b9d5c359bee7d5a534b45 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/data_utils.mdx @@ -0,0 +1,15 @@ +## Data Utilities + +[[autodoc]] is_conversational + +[[autodoc]] apply_chat_template + +[[autodoc]] maybe_apply_chat_template + +[[autodoc]] extract_prompt + +[[autodoc]] maybe_extract_prompt + +[[autodoc]] unpair_preference_dataset + +[[autodoc]] maybe_unpair_preference_dataset diff --git a/testbed/huggingface__trl/docs/source/installation.mdx b/testbed/huggingface__trl/docs/source/installation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bf74b64175fb15459b2cc1b61caea5ce159888f0 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/installation.mdx @@ -0,0 +1,24 @@ +# Installation +You can install TRL either from pypi or from source: + +## pypi +Install the library with pip: + +```bash +pip install trl +``` + +### Source +You can also install the latest version from source. First clone the repo and then run the installation with `pip`: + +```bash +git clone https://github.com/huggingface/trl.git +cd trl/ +pip install -e . +``` + +If you want the development install you can replace the pip install with the following: + +```bash +pip install -e ".[dev]" +``` \ No newline at end of file diff --git a/testbed/huggingface__trl/docs/source/learning_tools.mdx b/testbed/huggingface__trl/docs/source/learning_tools.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7d693dd2c9c74b43ce2e4dd2160af5a9b5ac4a21 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/learning_tools.mdx @@ -0,0 +1,233 @@ +# Learning Tools (Experimental 🧪) + +Using Large Language Models (LLMs) with tools has been a popular topic recently with awesome works such as [ToolFormer](https://huggingface.co/papers/2302.04761) and [ToolBench](https://huggingface.co/papers/2305.16504). In TRL, we provide a simple example of how to teach LLM to use tools with reinforcement learning. + + +Here's an overview of the scripts in the [trl repository](https://github.com/lvwerra/trl/tree/main/examples/research_projects/tools): + +| File | Description | +|---|---| +| [`calculator.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/calculator.py) | Script to train LLM to use a calculator with reinforcement learning. | +| [`triviaqa.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/triviaqa.py) | Script to train LLM to use a wiki tool to answer questions. | +| [`python_interpreter.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/python_interpreter.py) | Script to train LLM to use python interpreter to solve math puzzles. | + + + +Note that the scripts above rely heavily on the `TextEnvironment` API which is still under active development. The API may change in the future. Please see [`TextEnvironment`](text_environment) for the related docs. + + + +## Learning to Use a Calculator + + +The rough idea is as follows: + +1. Load a tool such as [ybelkada/simple-calculator](https://huggingface.co/spaces/ybelkada/simple-calculator) that parse a text calculation like `"14 + 34"` and return the calulated number: + ```python + from transformers import AutoTokenizer, load_tool + tool = load_tool("ybelkada/simple-calculator") + tool_fn = lambda text: str(round(float(tool(text)), 2)) # rounding to 2 decimal places + ``` +1. Define a reward function that returns a positive reward if the tool returns the correct answer. In the script we create a dummy reward function like `reward_fn = lambda x: 1`, but we override the rewards directly later. +1. Create a prompt on how to use the tools + ```python + # system prompt + prompt = """\ + What is 13.1-3? + + 13.1-310.1 + + Result=10.1 + + What is 4*3? + + 4*312 + + Result=12 + + What is 12.1+1? + + 12.1+113.1 + + Result=13.1 + + What is 12.1-20? + + 12.1-20-7.9 + + Result=-7.9""" + ``` +3. Create a `trl.TextEnvironment` with the model + ```python + env = TextEnvironment( + model, + tokenizer, + {"SimpleCalculatorTool": tool_fn}, + reward_fn, + prompt, + generation_kwargs=generation_kwargs, + ) + ``` +4. Then generate some data such as `tasks = ["\n\nWhat is 13.1-3?", "\n\nWhat is 4*3?"]` and run the environment with `queries, responses, masks, rewards, histories = env.run(tasks)`. The environment will look for the `` token in the prompt and append the tool output to the response; it will also return the mask associated with the response. You can further use the `histories` to visualize the interaction between the model and the tool; `histories[0].show_text()` will show the text with color-coded tool output and `histories[0].show_tokens(tokenizer)` will show visualize the tokens. + ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/learning_tools.png) +1. Finally, we can train the model with `train_stats = ppo_trainer.step(queries, responses, rewards, masks)`. The trainer will use the mask to ignore the tool output when computing the loss, make sure to pass that argument to `step`. + +## Experiment results + +We trained a model with the above script for 10 random seeds. You can reproduce the run with the following command. Feel free to remove the `--slurm-*` arguments if you don't have access to a slurm cluster. + +``` +WANDB_TAGS="calculator_final" python benchmark/benchmark.py \ + --command "python examples/research_projects/tools/calculator.py" \ + --num-seeds 10 \ + --start-seed 1 \ + --workers 10 \ + --slurm-gpus-per-task 1 \ + --slurm-ntasks 1 \ + --slurm-total-cpus 8 \ + --slurm-template-path benchmark/trl.slurm_template +``` + +We can then use [`openrlbenchmark`](https://github.com/openrlbenchmark/openrlbenchmark) which generates the following plot. +``` +# pip install openrlbenchmark==0.2.1a5 +python -m openrlbenchmark.rlops_multi_metrics \ + --filters '?we=openrlbenchmark&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.tracker_project_name&cen=trl_ppo_trainer_config.value.log_with&metrics=env/reward_mean&metrics=objective/kl' \ + 'wandb?tag=calculator_final&cl=calculator_mask' \ + --env-ids trl \ + --check-empty-runs \ + --pc.ncols 2 \ + --pc.ncols-legend 1 \ + --output-filename static/0compare \ + --scan-history +``` + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/learning_tools_chart.png) + +As we can see, while 1-2 experiments crashed for some reason, most of the runs obtained near perfect proficiency in the calculator task. + + +## (Early Experiments 🧪): learning to use a wiki tool for question answering + +In the [ToolFormer](https://huggingface.co/papers/2302.04761) paper, it shows an interesting use case that utilizes a Wikipedia Search tool to help answer questions. In this section, we attempt to perform similar experiments but uses RL instead to teach the model to use a wiki tool on the [TriviaQA](https://nlp.cs.washington.edu/triviaqa/) dataset. + + + + +**Note that many settings are different so the results are not directly comparable.** + + + + + +### Building a search index + +Since [ToolFormer](https://huggingface.co/papers/2302.04761) did not open source, we needed to first replicate the search index. It is mentioned in their paper that the authors built the search index using a BM25 retriever that indexes the Wikipedia dump from [KILT](https://github.com/facebookresearch/KILT) + +Fortunately, [`pyserini`](https://github.com/castorini/pyserini) already implements the BM25 retriever and provides a prebuilt index for the KILT Wikipedia dump. We can use the following code to search the index. + +```python +from pyserini.search.lucene import LuceneSearcher +import json +searcher = LuceneSearcher.from_prebuilt_index('wikipedia-kilt-doc') +def search(query): + hits = searcher.search(query, k=1) + hit = hits[0] + contents = json.loads(hit.raw)['contents'] + return contents +print(search("tennis racket")) +``` +``` +Racket (sports equipment) +A racket or racquet is a sports implement consisting of a handled frame with an open hoop across which a network of strings or catgut is stretched tightly. It is used for striking a ball or shuttlecock in games such as squash, tennis, racquetball, and badminton. Collectively, these games are known as racket sports. Racket design and manufacturing has changed considerably over the centuries. + +The frame of rackets for all sports was traditionally made of solid wood (later laminated wood) and the strings of animal intestine known as catgut. The traditional racket size was limited by the strength and weight of the wooden frame which had to be strong enough to hold the strings and stiff enough to hit the ball or shuttle. Manufacturers started adding non-wood laminates to wood rackets to improve stiffness. Non-wood rackets were made first of steel, then of aluminum, and then carbon fiber composites. Wood is still used for real tennis, rackets, and xare. Most rackets are now made of composite materials including carbon fiber or fiberglass, metals such as titanium alloys, or ceramics. +... +``` + +We then basically deployed this snippet as a Hugging Face space [here](https://huggingface.co/spaces/vwxyzjn/pyserini-wikipedia-kilt-doc), so that we can use the space as a `transformers.Tool` later. + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/pyserini.png) + +### Experiment settings + +We use the following settings: + +* use the `bigcode/starcoderbase` model as the base model +* use the `pyserini-wikipedia-kilt-doc` space as the wiki tool and only uses the first paragrahs of the search result, allowing the `TextEnvironment` to obtain at most `max_tool_reponse=400` response tokens from the tool. +* test if the response contain the answer string, if so, give a reward of 1, otherwise, give a reward of 0. + * notice this is a simplified evaluation criteria. In [ToolFormer](https://huggingface.co/papers/2302.04761), the authors checks if the first 20 words of the response contain the correct answer. +* used the following prompt that demonstrates the usage of the wiki tool. +```python +prompt = """\ +Answer the following question: + +Q: In which branch of the arts is Patricia Neary famous? +A: Ballets +A2: Patricia NearyPatricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe. +Result=Ballets + +Q: Who won Super Bowl XX? +A: Chicago Bears +A2: Super Bowl XXSuper Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans. +Result=Chicago Bears + +Q: """ +``` + + +### Result and Discussion + + +Our experiments show that the agent can learn to use the wiki tool to answer questions. The learning curves would go up mostly, but one of the experiment did crash. + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/triviaqa_learning_curves.png) + +Wandb report is [here](https://wandb.ai/costa-huang/cleanRL/reports/TriviaQA-Final-Experiments--Vmlldzo1MjY0ODk5) for further inspection. + + +Note that the correct rate of the trained model is on the low end, which could be due to the following reasons: + +* **incorrect searches:** When given the question `"What is Bruce Willis' real first name?"` if the model searches for `Bruce Willis`, our wiki tool returns "Patrick Poivey (born 18 February 1948) is a French actor. He is especially known for his voice: he is the French dub voice of Bruce Willis since 1988.` But a correct search should be `Walter Bruce Willis (born March 19, 1955) is an American former actor. He achieved fame with a leading role on the comedy-drama series Moonlighting (1985–1989) and appeared in over a hundred films, gaining recognition as an action hero after his portrayal of John McClane in the Die Hard franchise (1988–2013) and other roles.[1][2]" + + + ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/real_first_name.png) + +* **unnecessarily long response**: The wiki tool by default sometimes output very long sequences. E.g., when the wiki tool searches for "Brown Act" + * Our wiki tool returns "The Ralph M. Brown Act, located at California Government Code 54950 "et seq.", is an act of the California State Legislature, authored by Assemblymember Ralph M. Brown and passed in 1953, that guarantees the public's right to attend and participate in meetings of local legislative bodies." + * [ToolFormer](https://huggingface.co/papers/2302.04761)'s wiki tool returns "The Ralph M. Brown Act is an act of the California State Legislature that guarantees the public's right to attend and participate in meetings of local legislative bodies." which is more succinct. + + ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/brown_act.png) + + +## (Early Experiments 🧪): solving math puzzles with python interpreter + +In this section, we attempt to teach the model to use a python interpreter to solve math puzzles. The rough idea is to give the agent a prompt like the following: + +```python +prompt = """\ +Example of using a Python API to solve math questions. + +Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left? + + +def solution(): + money_initial = 23 + bagels = 5 + bagel_cost = 3 + money_spent = bagels * bagel_cost + money_left = money_initial - money_spent + result = money_left + return result +print(solution()) +72 + +Result = 72 + +Q: """ +``` + + +Training experiment can be found at https://wandb.ai/lvwerra/trl-gsm8k/runs/a5odv01y + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/gms8k_learning_curve.png) diff --git a/testbed/huggingface__trl/docs/source/models.mdx b/testbed/huggingface__trl/docs/source/models.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f96068fc46f160c6d60d3b95712fb277c826f6e9 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/models.mdx @@ -0,0 +1,28 @@ +# Models + +With the `AutoModelForCausalLMWithValueHead` class TRL supports all decoder model architectures in transformers such as GPT-2, OPT, and GPT-Neo. In addition, with `AutoModelForSeq2SeqLMWithValueHead` you can use encoder-decoder architectures such as T5. TRL also requires reference models which are frozen copies of the model that is trained. With `create_reference_model` you can easily create a frozen copy and also share layers between the two models to save memory. + +## PreTrainedModelWrapper + +[[autodoc]] PreTrainedModelWrapper + +## AutoModelForCausalLMWithValueHead + + +[[autodoc]] AutoModelForCausalLMWithValueHead + - __init__ + - forward + - generate + - _init_weights + +## AutoModelForSeq2SeqLMWithValueHead + +[[autodoc]] AutoModelForSeq2SeqLMWithValueHead + - __init__ + - forward + - generate + - _init_weights + +## create_reference_model + +[[autodoc]] create_reference_model \ No newline at end of file diff --git a/testbed/huggingface__trl/docs/source/nash_md_trainer.md b/testbed/huggingface__trl/docs/source/nash_md_trainer.md new file mode 100644 index 0000000000000000000000000000000000000000..881e57e69c3f5482e6898e6ff2c63dea8d93d024 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/nash_md_trainer.md @@ -0,0 +1,159 @@ +# Nash-MD Trainer + +[![](https://img.shields.io/badge/All_models-Nash--MD-blue)](https://huggingface.co/models?other=nash-md,trl) + +## Overview + +Nash-MD was proposed in the paper [Nash Learning from Human Feedback](https://huggingface.co/papers/2312.00886) by Rémi Munos, [Michal Valko](https://huggingface.co/misovalko), Daniele Calandriello, Mohammad Gheshlaghi Azar, Mark Rowland, Daniel Guo, Yunhao Tang, Matthieu Geist, Thomas Mésnard, and Andrea Michi. + +The abstract from the paper is the following: + +> Reinforcement learning from human feedback (RLHF) has emerged as the main paradigm for aligning large language models (LLMs) with human preferences. Typically, RLHF involves the initial step of learning a reward model from human feedback, often expressed as preferences between pairs of text generations produced by a pre-trained LLM. Subsequently, the LLM's policy is fine-tuned by optimizing it to maximize the reward model through a reinforcement learning algorithm. However, an inherent limitation of current reward models is their inability to fully represent the richness of human preferences and their dependency on the sampling distribution. In this study, we introduce an alternative pipeline for the fine-tuning of LLMs using pairwise human feedback. Our approach entails the initial learning of a preference model, which is conditioned on two inputs given a prompt, followed by the pursuit of a policy that consistently generates responses preferred over those generated by any competing policy, thus defining the Nash equilibrium of this preference model. We term this approach Nash learning from human feedback (NLHF). In the context of a tabular policy representation, we present a novel algorithmic solution, Nash-MD, founded on the principles of mirror descent. This algorithm produces a sequence of policies, with the last iteration converging to the regularized Nash equilibrium. Additionally, we explore parametric representations of policies and introduce gradient descent algorithms for deep-learning architectures. To demonstrate the effectiveness of our approach, we present experimental results involving the fine-tuning of a LLM for a text summarization task. We believe NLHF offers a compelling avenue for preference learning and policy optimization with the potential of advancing the field of aligning LLMs with human preferences. + +This post-training method was contributed by [Kashif Rasul](https://huggingface.co/kashif) and [Daniil Tiapkin](https://huggingface.co/dtiapkin), [Pierre Ménard](https://huggingface.co/menardprr), Daniele Calandriello and [Quentin Gallouédec](https://huggingface.co/qgallouedec). + +## Quick start + +This example demonstrates how to train a model using the Nash-MD method. We use the [Qwen 0.5B model](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) as the base model and [`PairRMJudge`] as a judge. We use the prompts from the [UltraFeedback dataset](https://huggingface.co/datasets/openbmb/UltraFeedback). You can view the prompts in the dataset here: + + + +Below is the script to train the model: + +```python +# train_nash_md.py +from datasets import load_dataset +from trl import NashMDConfig, NashMDTrainer, PairRMJudge +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct") +tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") +judge = PairRMJudge() +train_dataset = load_dataset("trl-lib/ultrafeedback-prompt", split="train") + +training_args = NashMDConfig(output_dir="Qwen2-0.5B-NashMD", logging_steps=10) +trainer = NashMDTrainer( + model=model, judge=judge, args=training_args, processing_class=tokenizer, train_dataset=train_dataset +) +trainer.train() +``` + +Execute the script using the following command: + +```bash +accelerate launch train_nash_md.py +``` + +Distributed across 8 GPUs, the training takes approximately 3 hours. + +To see how the [trained model](https://huggingface.co/trl-lib/Qwen2-0.5B-NashMD) performs, you can use the [TRL Chat CLI](clis#chat-interface). + +
$ trl chat --model_name_or_path trl-lib/Qwen2-0.5B-NashMD
+<quentin_gallouedec>:
+What is the best programming language?
+
+<trl-lib/Qwen2-0.5B-NashMD>:
+The best programming language depends on personal preference, the complexity of the project, and the specific requirements of the task. Some programming languages that are often recommended include Python, Java, and JavaScript, and there are many other languages to choose from depending on individual needs.
+
+ +## Expected dataset type + +Nash-MD requires a [prompt-only dataset](dataset_formats#prompt-only). The [`NashMDTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset format. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. + +## Usage tips + +### Use a reward model + +Instead of a judge, you can chose to use a reward model -- see [Reward Bench](https://huggingface.co/spaces/allenai/reward-bench) for a leaderboard of public models you can use. Below is a code example showing how to replace a judge with the [trl-lib/Qwen2-0.5B-Reward](https://huggingface.co/trl-lib/Qwen2-0.5B-Reward) model: + +```diff +- from trl import PairRMJudge ++ from transformers import AutoModelForSequenceClassification + +- judge = PairRMJudge() ++ reward_model = AutoModelForSequenceClassification.from_pretrained("trl-lib/Qwen2-0.5B-Reward", num_labels=1) + + trainer = NashMDTrainer( + ... +- judge=judge, ++ reward_model=reward_model, + ) +``` + + + +Make sure that the SFT model and reward model use the _same_ chat template and the same tokenizer. Otherwise, you may find the model completions are scored incorrectly during training. + + + +### Encourage EOS token generation + +We may want the model to generate completions within a given length. During training, the model will generate completions up to the maximum length specified in the `max_new_tokens` argument of [`NashMDConfig`]. If you want to penalize the model for not generating an EOS token before reaching the maximum length, you can use the `missing_eos_penalty` argument of [`NashMDConfig`]: + +```python +training_args = NashMDConfig(..., max_new_tokens=128, missing_eos_penalty=1.0) +``` + +### Logging Completions + +To better understand your model’s behavior during training, you can log sample completions periodically using the [`LogCompletionsCallback`]. + +```python +trainer = NashMDTrainer(..., eval_dataset=eval_dataset) +completions_callback = LogCompletionsCallback(trainer, num_prompts=8) +trainer.add_callback(completions_callback) +``` + +This callback logs the model's generated completions directly to Weights & Biases. + +![Logged Completions](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/wandb_completions.png) + +## Example script + +We provide an example script to train a model using the Nash-MD method. The script is available in [`examples/scripts/nash_md.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/nash_md.py) + +To test the online DPO script with the [Qwen2.5 0.5B model](https://huggingface.co/trl-lib/Qwen/Qwen2.5-0.5B-Instruct) on the [UltraFeedback dataset](https://huggingface.co/datasets/openbmb/UltraFeedback), run the following command: + +```bash +python examples/scripts/nash_md.py \ + --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct \ + --judge pair_rm \ + --dataset_name trl-lib/ultrafeedback-prompt \ + --learning_rate 5.0e-7 \ + --logging_steps 25 \ + --output_dir Qwen2.5-0.5B-NashMD-PairRM \ + --warmup_ratio 0.1 \ + --push_to_hub +``` + +## Logged metrics + +The logged metrics are as follows: + +* `loss/kl`: The mean KL divergence between the model and reference data. +* `objective/entropy`: The mean entropy of the model and reference data. +* `loss/score`: The mean reinforce score loss. +* `rewards/chosen`: The mean scores (according to the reward model) of the model completions. +* `rewards/rejected`: The mean scores (according to the reward model) of the mixture completions. +* `rewards/probabilities`: The mean probability (according to the reward model or judge) of the model completions chosen vs the mixture completion. +* `rewards/accuracies`: The accuracies of the Nash-MD's implicit reward model. +* `rewards/margins`: The mean reward margin (according to reward model) between the chosen and mixture completions. +* `logps/chosen`: The mean log probabilities of the chosen completions. +* `logps/rejected`: The mean log probabilities of the reference completions. +* `val/model_contain_eos_token`: The amount of times the model's output contains the eos token. +* `val/ref_contain_eos_token`: The amount of times the mixture's output contains the eos token. +* `beta`: The parameter that controls the weight of the loss term representing the deviation from the reference model. Typically fixed, but can be made dynamic by passing a list to [`NashMDConfig`]. +* `mixture_coef`: Logit mixture coefficient for the model and reference model. Typically fixed, but can be made dynamic by passing a list to [`NashMDConfig`]. + +## NashMDTrainer + +[[autodoc]] NashMDTrainer + +## NashMDConfig + +[[autodoc]] NashMDConfig diff --git a/testbed/huggingface__trl/docs/source/online_dpo_trainer.md b/testbed/huggingface__trl/docs/source/online_dpo_trainer.md new file mode 100644 index 0000000000000000000000000000000000000000..49e40957c1326fa7853075901331d6bd49b49ff7 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/online_dpo_trainer.md @@ -0,0 +1,278 @@ +# Online DPO Trainer + +[![](https://img.shields.io/badge/All_models-Online_DPO-blue)](https://huggingface.co/models?other=online-dpo,trl) + +## Overview + +Online DPO was proposed in [Direct Language Model Alignment from Online AI Feedback](https://huggingface.co/papers/2402.04792) by Shangmin Guo, Biao Zhang, Tianlin Liu, Tianqi Liu, Misha Khalman, Felipe Llinares, Alexandre Rame, Thomas Mesnard, Yao Zhao, Bilal Piot, Johan Ferret, and Mathieu Blondel. + +The abstract from the paper is the following: + +> Direct alignment from preferences (DAP) methods, such as DPO, have recently emerged as efficient alternatives to reinforcement learning from human feedback (RLHF), that do not require a separate reward model. However, the preference datasets used in DAP methods are usually collected ahead of training and never updated, thus the feedback is purely offline. Moreover, responses in these datasets are often sampled from a language model distinct from the one being aligned, and since the model evolves over training, the alignment phase is inevitably off-policy. In this study, we posit that online feedback is key and improves DAP methods. Our method, online AI feedback (OAIF), uses an LLM as annotator: on each training iteration, we sample two responses from the current model and prompt the LLM annotator to choose which one is preferred, thus providing online feedback. Despite its simplicity, we demonstrate via human evaluation in several tasks that OAIF outperforms both offline DAP and RLHF methods. We further show that the feedback leveraged in OAIF is easily controllable, via instruction prompts to the LLM annotator. + +This post-training method was contributed by [Michael Noukhovitch](https://huggingface.co/mnoukhov), [Shengyi Costa Huang](https://huggingface.co/vwxyzjn), [Quentin Gallouédec](https://huggingface.co/qgallouedec), and [Edward Beeching](https://huggingface.co/edbeeching). + +## Quick start + +This example demonstrates how to train a model using the online DPO method. We use the [Qwen 0.5B model](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) as the base model and [`PairRMJudge`] as a judge. We use the prompts from the [UltraFeedback dataset](https://huggingface.co/datasets/openbmb/UltraFeedback). You can view the prompts in the dataset here: + + + +Below is the script to train the model: + +```python +# train_online_dpo.py +from datasets import load_dataset +from trl import OnlineDPOConfig, OnlineDPOTrainer, PairRMJudge +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct") +tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") +judge = PairRMJudge() +train_dataset = load_dataset("trl-lib/ultrafeedback-prompt", split="train") + +training_args = OnlineDPOConfig(output_dir="Qwen2-0.5B-OnlineDPO", logging_steps=10) +trainer = OnlineDPOTrainer( + model=model, judge=judge, args=training_args, processing_class=tokenizer, train_dataset=train_dataset +) +trainer.train() +``` + +Execute the script using the following command: + +```bash +accelerate launch train_online_dpo.py +``` + +Distributed across 8 GPUs, the training takes approximately 1 hour. You can verify the training progress by checking the reward graph. An increasing trend in both the reward for rejected and chosen completions indicates that the model is improving and generating better responses over time. + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/online-dpo-qwen2.png) + +To see how the [trained model](https://huggingface.co/trl-lib/Qwen2-0.5B-OnlineDPO) performs, you can use the [TRL Chat CLI](clis#chat-interface). + +
$ trl chat --model_name_or_path trl-lib/Qwen2-0.5B-OnlineDPO
+<quentin_gallouedec>:
+What is the best programming language?
+
+<trl-lib/Qwen2-0.5B-OnlineDPO>:
+The best programming language depends on your specific needs and priorities. Some people prefer imperative programming languages (like Haskell or Lisp), while others prefer functional programming languages (like Scala or Python). It's important to consider your work style, programming environment, and project requirements when choosing a programming language.
+
+ +## Expected dataset type + +Online DPO only requires a [prompt-only dataset](dataset_formats#prompt-only) (unlike offline DPO, that expects [preference dataset](dataset_formats#preference)). The [`OnlineDPOTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset format. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. + +## Usage tips + +### Use a reward model + +Instead of a judge, you can chose to use a reward model -- see [Reward Bench](https://huggingface.co/spaces/allenai/reward-bench) for a leaderboard of public models you can use. Below is a code example showing how to replace a judge with the [trl-lib/Qwen2-0.5B-Reward](https://huggingface.co/trl-lib/Qwen2-0.5B-Reward) model: + +```diff +- from trl import PairRMJudge ++ from transformers import AutoModelForSequenceClassification + +- judge = PairRMJudge() ++ reward_model = AutoModelForSequenceClassification.from_pretrained("trl-lib/Qwen2-0.5B-Reward", num_labels=1) ++ reward_tokenizer = AutoTokenizer.from_pretrained("trl-lib/Qwen2-0.5B-Reward") + + trainer = OnlineDPOTrainer( + ... +- judge=judge, ++ reward_model=reward_model, ++ reward_processing_class=reward_tokenizer, + ... + ) +``` + +### Encourage EOS token generation + +When using a reward model, we may want the model to generate completions within a given length. During training, the model will generate completions up to the maximum length specified in the `max_new_tokens` argument of [`OnlineDPOConfig`]. If you want to penalize the model for not generating an EOS token before reaching the maximum length, you can use the `missing_eos_penalty` argument of [`OnlineDPOConfig`]: + +```python +training_args = OnlineDPOConfig(..., max_new_tokens=128, missing_eos_penalty=1.0) +``` + +### Logging Completions + +To better understand your model’s behavior during training, you can log sample completions periodically using the [`LogCompletionsCallback`]. + +```python +trainer = OnlineDPOTrainer(..., eval_dataset=eval_dataset) +completions_callback = LogCompletionsCallback(trainer, num_prompts=8) +trainer.add_callback(completions_callback) +``` + +This callback logs the model's generated completions directly to Weights & Biases. + +![Logged Completions](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/wandb_completions.png) + + +## Example script + +We provide an example script to train a model using the online DPO method. The script is available in [`examples/scripts/dpo_online.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/dpo_online.py) + +To test the online DPO script with the [Qwen2.5 0.5B model](https://huggingface.co/trl-lib/Qwen/Qwen2.5-0.5B-Instruct) on the [UltraFeedback dataset](https://huggingface.co/datasets/openbmb/UltraFeedback), run the following command: + +```bash +python examples/scripts/dpo_online.py \ + --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct \ + --judge pair_rm \ + --dataset_name trl-lib/ultrafeedback-prompt \ + --learning_rate 5.0e-7 \ + --logging_steps 25 \ + --output_dir Qwen2.5-0.5B-Online-DPO-PairRM \ + --warmup_ratio 0.1 \ + --push_to_hub +``` + +## Logged metrics + +The logged metrics are as follows. Here is an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/w4apmsi9) + +* `objective/kl`: The mean Kullback-Leibler (KL) divergence between the current model and reference model. +* `objective/entropy`: The mean entropy of the model, indicating the randomness of the actions chosen by the model. +* `objective/non_score_reward`: The mean reward from non-score-related sources, basically `beta * kl.sum(1)`, where `beta` is the KL penalty coefficient and `kl` is the per-token KL divergence. +* `objective/rlhf_reward`: The mean RLHF reward, which is `scores - non_score_reward`. The `rlhf_reward` is the ultimate objective of online DPO training. If training works as intended, this metric should keep going up. +* `objective/scores`: The mean scores returned by the reward model. +* `objective/scores_margin`: The mean score margin (according to the external reward model) between the chosen and rejected completions. +* `rewards/chosen`: The mean reward (according to online DPO's implicit reward model)of the chosen completions. +* `rewards/rejected`: The mean reward (according to online DPO's implicit reward model) of the rejected completions. +* `rewards/accuracies`: The accuracies of the online DPO's implicit reward model. +* `rewards/margins`: The mean reward margin (according to online DPO's implicit reward model) between the chosen and rejected completions. +* `logps/chosen`: The mean log probabilities of the chosen completions. +* `logps/rejected`: The mean log probabilities of the rejected completions. +* `val/contain_eos_token`: The fraction of completions which contain an EOS token. +* `beta`: The parameter that controls the weight of the loss term representing the deviation from the reference model. Typically fixed, but can be made dynamic by passing a list to [`OnlineDPOConfig`]. + +## Benchmark experiments + +To validate the online DPO implementation works, we ran experiments with the Pythia 1B, 2.8B, and 6.9B models on a single node of 8 x H100s. Here are the commands we used to run the experiments. We take the SFT / RM models directly from [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). + + +``` +# 1B Online DPO experiment +accelerate launch --config_file examples/accelerate_configs/multi_gpu.yaml \ + examples/scripts/dpo_online.py \ + --model_name_or_path trl-lib/pythia-1b-deduped-tldr-sft \ + --reward_model_path trl-lib/pythia-1b-deduped-tldr-rm \ + --dataset_name trl-lib/tldr \ + --learning_rate 5.0e-7 \ + --output_dir pythia-1b-deduped-tldr-online-dpo \ + --beta 0.1 \ + --per_device_train_batch_size 8 \ + --gradient_accumulation_steps 2 \ + --num_train_epochs 3 \ + --max_new_tokens 53 \ + --warmup_ratio 0.1 \ + --missing_eos_penalty 1.0 \ + --logging_steps 20 \ + --save_steps 0.1 \ + --push_to_hub + +# 2.8B Online DPO experiment +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ + examples/scripts/dpo_online.py \ + --model_name_or_path trl-lib/pythia-2.8b-deduped-tldr-sft \ + --reward_model_path trl-lib/pythia-2.8b-deduped-tldr-rm \ + --dataset_name trl-lib/tldr \ + --learning_rate 5.0e-7 \ + --output_dir pythia-2.8b-deduped-tldr-online-dpo \ + --beta 0.1 \ + --per_device_train_batch_size 8 \ + --gradient_accumulation_steps 2 \ + --num_train_epochs 3 \ + --max_new_tokens 53 \ + --warmup_ratio 0.1 \ + --missing_eos_penalty 1.0 \ + --bf16 \ + --logging_steps 20 \ + --save_steps 0.1 \ + --push_to_hub + +# 6.9B Online DPO experiment +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ + examples/scripts/dpo_online.py \ + --model_name_or_path trl-lib/pythia-6.9b-deduped-tldr-sft \ + --reward_model_path trl-lib/pythia-6.9b-deduped-tldr-rm \ + --dataset_name trl-lib/tldr \ + --learning_rate 5.0e-7 \ + --output_dir pythia-6.9b-deduped-tldr-online-dpo \ + --beta 0.1 \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 4 \ + --num_train_epochs 3 \ + --max_new_tokens 53 \ + --warmup_ratio 0.1 \ + --missing_eos_penalty 1.0 \ + --bf16 \ + --gradient_checkpointing \ + --logging_steps 20 \ + --save_steps 0.1 \ + --push_to_hub +``` + +Checkpoints and experiment tracking are available at: + +- [🤗 Model checkpoints](https://huggingface.co/collections/trl-lib/online-dpo-66acd3fa38a331a9cd457b07) +- [🐝 Tracked experiment](https://wandb.ai/huggingface/trl/reports/Online-DPO-experiments-for-TL-DR-summarisation--Vmlldzo5MTczMDU0) + + +To evaluate, we use [vLLM](https://github.com/vllm-project/vllm) to load the checkpoints and GPT-4o mini as a judge model to evaluate the generated TL;DR against the reference TL;DR. +For more information on how to use judges, see [Judges](judges). + +```bash +$ python examples/scripts/evals/judge_tldr.py --model_name_or_path trl-lib/pythia-1b-deduped-tldr-sft --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 33.00% +python examples/scripts/evals/judge_tldr.py --model_name_or_path trl-lib/pythia-6.9b-deduped-tldr-sft --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 41.50% +python examples/scripts/evals/judge_tldr.py --model_name_or_path trl-lib/pythia-1b-deduped-tldr-online-dpo --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 62.60% +python examples/scripts/evals/judge_tldr.py --model_name_or_path trl-lib/pythia-6.9b-deduped-tldr-online-dpo --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 74.20% +``` + +We can then plot the RLHF scaling chart. + +```python +import matplotlib.pyplot as plt + +results = { + "SFT": {1.0e9: 0.21, 2.8e9: 0.27, 6.9e9: 0.316}, + "online-dpo": {1.0e9: 0.542, 2.8e9: 0.746, 6.9e9: 0.796}, + "offline-dpo": {1.0e9: 0.422, 2.8e9: 0.517, 6.9e9: 0.701}, +} + + +plt.plot(results["SFT"].keys(), results["SFT"].values(), label="SFT", marker="o") +plt.plot(results["online-dpo"].keys(), results["online-dpo"].values(), label="Online-dpo with RM judge", marker="o") +plt.plot(results["offline-dpo"].keys(), results["offline-dpo"].values(), label="Offline-dpo", marker="o") +plt.axhline(y=0.5, color="black", linestyle="-.", label="Human reference summary") +plt.xscale("log") +plt.xlabel("Model size") +plt.ylabel("Win rate against reference summaries\n(according to GPT-4-0613)") +plt.title("DPO scaling by model size") +plt.legend() +plt.xlim(5e8, 1.2e10) +plt.xticks([1e9, 3e9, 1e10], ["1B", "3B", "10B"]) +plt.grid(True, which="both", ls="--", c="0.7") +plt.tight_layout() +plt.show() +``` + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/online_dpo_scaling.png) + +The online DPO checkpoint gets increasingly more win rate as we scale up the model sizes. This is a good sign that the online DPO implementation is working as intended. + +## OnlineDPOTrainer + +[[autodoc]] OnlineDPOTrainer + +## OnlineDPOConfig + +[[autodoc]] OnlineDPOConfig diff --git a/testbed/huggingface__trl/docs/source/orpo_trainer.md b/testbed/huggingface__trl/docs/source/orpo_trainer.md new file mode 100644 index 0000000000000000000000000000000000000000..02d0b9c86b9db4c7e78a618c0c78eee37b3c6d0c --- /dev/null +++ b/testbed/huggingface__trl/docs/source/orpo_trainer.md @@ -0,0 +1,129 @@ +# ORPO Trainer + +[![](https://img.shields.io/badge/All_models-ORPO-blue)](https://huggingface.co/models?other=orpo,trl) + +## Overview + +Odds Ratio Preference Optimization (ORPO) was introduced in [ORPO: Monolithic Preference Optimization without Reference Model](https://huggingface.co/papers/2403.07691) by [Jiwoo Hong](https://huggingface.co/JW17), [Noah Lee](https://huggingface.co/nlee-208), and [James Thorne](https://huggingface.co/j6mes). + +The abstract from the paper is the following: + +> While recent preference alignment algorithms for language models have demonstrated promising results, supervised fine-tuning (SFT) remains imperative for achieving successful convergence. In this paper, we study the crucial role of SFT within the context of preference alignment, emphasizing that a minor penalty for the disfavored generation style is sufficient for preference-aligned SFT. Building on this foundation, we introduce a straightforward and innovative reference model-free monolithic odds ratio preference optimization algorithm, ORPO, eliminating the necessity for an additional preference alignment phase. We demonstrate, both empirically and theoretically, that the odds ratio is a sensible choice for contrasting favored and disfavored styles during SFT across the diverse sizes from 125M to 7B. Specifically, fine-tuning Phi-2 (2.7B), Llama-2 (7B), and Mistral (7B) with ORPO on the UltraFeedback alone surpasses the performance of state-of-the-art language models with more than 7B and 13B parameters: achieving up to 12.20% on AlpacaEval_{2.0} (Figure 1), 66.19% on IFEval (instruction-level loose, Table 6), and 7.32 in MT-Bench (Figure 12). We release code and model checkpoints for Mistral-ORPO-alpha (7B) and Mistral-ORPO-beta (7B). + +It studies the crucial role of SFT within the context of preference alignment. Using preference data the method posits that a minor penalty for the disfavored generation together with a strong adaption signal to the chosen response via a simple log odds ratio term appended to the NLL loss is sufficient for preference-aligned SFT. + +Thus ORPO is a reference model-free preference optimization algorithm eliminating the necessity for an additional preference alignment phase thus saving compute and memory. + +The official code can be found in [xfactlab/orpo](https://github.com/xfactlab/orpo). + +This post-training method was contributed by [Kashif Rasul](https://huggingface.co/kashif), [Lewis Tunstall](https://huggingface.co/lewtun) and [Alvaro Bartolome](https://huggingface.co/alvarobartt). + +## Quick start + +This example demonstrates how to train a model using the ORPO method. We use the [Qwen 0.5B model](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) as the base model. We use the preference data from the [UltraFeedback dataset](https://huggingface.co/datasets/openbmb/UltraFeedback). You can view the data in the dataset here: + + + +Below is the script to train the model: + +```python +# train_orpo.py +from datasets import load_dataset +from trl import ORPOConfig, ORPOTrainer +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct") +tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") +train_dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train") + +training_args = ORPOConfig(output_dir="Qwen2-0.5B-ORPO", logging_steps=10) +trainer = ORPOTrainer(model=model, args=training_args, processing_class=tokenizer, train_dataset=train_dataset) +trainer.train() +``` + +Execute the script using the following command: + +```bash +accelerate launch train_orpo.py +``` + +Distributed across 8 GPUs, the training takes approximately 30 minutes. You can verify the training progress by checking the reward graph. An increasing trend in the reward margin indicates that the model is improving and generating better responses over time. + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/orpo-qwen2-reward-margin.png) + +To see how the [trained model](https://huggingface.co/trl-lib/Qwen2-0.5B-ORPO) performs, you can use the [TRL Chat CLI](clis#chat-interface). + +
$ trl chat --model_name_or_path trl-lib/Qwen2-0.5B-ORPO
+<quentin_gallouedec>:
+What is the best programming language?
+
+<trl-lib/Qwen2-0.5B-ORPO>:
+It's challenging to determine the best programming language as no one language is perfect, as the complexity of a task and the type of project are significant factors. Some popular languages include Java, Python, JavaScript, and
+C++. If you have specific needs or requirements for a specific project, it's important to choose the language that best suits those needs.                                                                                          
+
+Here are some other factors to consider when choosing a programming language for a project:
+
+ • Language proficiency: A good programming language is more likely to be easy to understand and use, and will allow developers to collaborate on projects more efficiently.                                     
+ • Ease of use: There are tools and libraries available to make programming more accessible, so developers should choose a language that can help them get started easier.
+ • Code readability: A clear and concise codebase should be easy to read and understand, especially when working with large projects.
+ • Tool and framework support: There are numerous libraries available for Python, Java, and JavaScript, along with tools like IDEs and static code analysis tools.
+ • Accessibility: Some languages and tools have features that make them more accessible to developers with disabilities, such as support for screen readers.
+ • Version control: As your projects grow and complexity increases, version control tools can be beneficial for tracking changes.
+
+
+ +## Expected dataset type + +ORPO requires a [preference dataset](dataset_formats#preference). The [`ORPOTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset format. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. + +Although the [`ORPOTrainer`] supports both explicit and implicit prompts, we recommend using explicit prompts. If provided with an implicit prompt dataset, the trainer will automatically extract the prompt from the `"chosen"` and `"rejected"` columns. For more information, refer to the [preference style](dataset_formats#preference) section. + +## Example script + +We provide an example script to train a model using the ORPO method. The script is available in [`examples/scripts/orpo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/orpo.py) + +To test the ORPO script with the [Qwen2 0.5B model](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) on the [UltraFeedback dataset](https://huggingface.co/datasets/trl-lib/ultrafeedback_binarized), run the following command: + +```bash +accelerate launch examples/scripts/orpo.py \ + --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ + --dataset_name trl-lib/ultrafeedback_binarized \ + --num_train_epochs 1 \ + --logging_steps 25 \ + --output_dir Qwen2-0.5B-ORPO +``` + +## Usage tips + +### For Mixture of Experts Models: Enabling the auxiliary loss + +MOEs are the most efficient if the load is about equally distributed between experts. +To ensure that we train MOEs similarly during preference-tuning, it is beneficial to add the auxiliary loss from the load balancer to the final loss. + +This option is enabled by setting `output_router_logits=True` in the model config (e.g. [`~transformers.MixtralConfig`]). +To scale how much the auxiliary loss contributes to the total loss, use the hyperparameter `router_aux_loss_coef=...` (default: `0.001`) in the model config. + +## Logged metrics + +While training and evaluating we record the following reward metrics: + +- `rewards/chosen`: the mean log probabilities of the policy model for the chosen responses scaled by beta +- `rewards/rejected`: the mean log probabilities of the policy model for the rejected responses scaled by beta +- `rewards/accuracies`: mean of how often the chosen rewards are > than the corresponding rejected rewards +- `rewards/margins`: the mean difference between the chosen and corresponding rejected rewards +- `log_odds_chosen`: the mean log odds ratio of the chosen responses over the rejected responses +- `log_odds_ratio`: the mean of the `log(sigmoid(log_odds_chosen))` +- `nll_loss`: the mean negative log likelihood loss from the SFT part of the loss over chosen responses + +## ORPOTrainer + +[[autodoc]] ORPOTrainer + +## ORPOConfig + +[[autodoc]] ORPOConfig diff --git a/testbed/huggingface__trl/docs/source/ppo_trainer.md b/testbed/huggingface__trl/docs/source/ppo_trainer.md new file mode 100644 index 0000000000000000000000000000000000000000..a1cdc6529beaab9af95f4d99aab047d866ecae59 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/ppo_trainer.md @@ -0,0 +1,237 @@ +# PPO Trainer + +[![](https://img.shields.io/badge/All_models-PPO-blue)](https://huggingface.co/models?other=ppo,trl) + +TRL supports training LLMs with [Proximal Policy Optimization (PPO)](https://huggingface.co/papers/1707.06347). + +References: +- [Fine-Tuning Language Models from Human Preferences](https://github.com/openai/lm-human-preferences) +- [Learning to Summarize from Human Feedback](https://github.com/openai/summarize-from-feedback) +- [The N Implementation Details of RLHF with PPO](https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo) +- [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031) + +## Get started + +To just run a PPO script to make sure the trainer can run, you can run the following command to train a PPO model with a dummy reward model. + +```bash +python examples/scripts/ppo/ppo.py \ + --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ + --dataset_train_split descriptiveness \ + --learning_rate 3e-6 \ + --num_ppo_epochs 1 \ + --num_mini_batches 1 \ + --output_dir models/minimal/ppo \ + --per_device_train_batch_size 64 \ + --gradient_accumulation_steps 1 \ + --total_episodes 10000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --missing_eos_penalty 1.0 +``` + + +## Explanation of the logged metrics + +The logged metrics are as follows. Here is an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/dd2o3g35) + +* `eps`: Tracks the number of episodes per second. +* `objective/kl`: The mean Kullback-Leibler (KL) divergence between the current policy and reference policy. +* `objective/entropy`: The mean entropy of the policy, indicating the randomness of the actions chosen by the policy. +* `objective/non_score_reward`: The mean reward from non-score-related sources, basically `beta * kl.sum(1)`, where `beta` is the KL penalty coefficient and `kl` is the per-token KL divergence. +* `objective/rlhf_reward`: The mean RLHF reward, which is `score - non_score_reward`. +* `objective/scores`: The mean scores returned by the reward model / environment. +* `policy/approxkl_avg`: The average approximate KL divergence between consecutive PPO policies. Note that this is not the same as `objective/kl`. +* `policy/clipfrac_avg`: The average fraction of policy updates that are clipped, indicating how often the policy updates are constrained to prevent large changes. +* `loss/policy_avg`: The average policy loss, indicating how well the policy is performing. +* `loss/value_avg`: The average value loss, indicating the difference between the predicted value and the actual reward. +* `val/clipfrac_avg`: The average fraction of value function updates that are clipped, similar to policy/clipfrac_avg but for the value function. +* `policy/entropy_avg`: The average entropy of the policy during training, indicating how diverse the policy's actions are. +* `val/ratio`: The mean ratio of the current policy probability to the old policy probability, providing a measure of how much the policy has changed. +* `val/ratio_var`: The variance of the `val/ratio`, indicating the variability in policy changes. +* `val/num_eos_tokens`: The number of end-of-sequence (EOS) tokens generated, which can indicate the number of complete responses. +* `lr`: lr: The current learning rate used by the optimizer. +* `episode`: episode: The current global step or episode count in the training process. + + +## Cookbook + +* Debugging TIP: `objective/rlhf_reward`: this is the ultimate objective of the RLHF training. If training works as intended, this metric should keep going up. +* Debugging TIP: `val/ratio`: this number should float around 1.0, and it gets clipped by `--cliprange 0.2` with PPO's surrogate loss. So if this `ratio` is too high like 2.0 or 1000.0 or too small like 0.1, it means the updates between consecutive policies are too drastic. You should try undertand why this is happening and try to fix it. +* Memory TIP: If you are running out of memory, you can try to reduce the `--per_device_train_batch_size` or increase the `--gradient_accumulation_steps` to reduce the memory footprint. +* Memory TIP: If you have multiple GPUs, you can also run training with DeepSpeed stage 3 to reduce the memory footprint `accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml`. +* Usage TIP: We recommend to use the "EOS trick" via `--missing_eos_penalty`, which subtracts a static scalar penalty from the score of completions that do not end with an EOS token. This can help the model learn to generate more coherent completions. + + +## What is my model doing exactly? + +To help you understand what your model is doing, we periodically log some sample completions from the model. Here is an example of a completion. In an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/dd2o3g35), it looks like the following, allowing you to see the model's response at different stages of training. By default we generate `--num_sample_generations 10` during training, but you can customize the number of generations. + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/ppov2_completions.gif?download=true) + + +In the logs the sampled generations look like + +``` +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┓ +┃ query ┃ model response ┃ score ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━┩ +│ SUBREDDIT: r/AskReddit │ I'm in love with a friend, and │ 3.921875 │ +│ │ I don't know how to get rid of │ │ +│ TITLE: How do you get someone │ those feelings. I'm │ │ +│ out of your head? │ desperate.<|endoftext|>[PAD][P… │ │ +│ │ │ │ +│ POST: Hi, │ │ │ +│ I'm 22, and I have been with my │ │ │ +│ girlfriend for 5 years now. We │ │ │ +│ recently moved together. We've │ │ │ +│ always loved each other │ │ │ +│ intensely. │ │ │ +│ │ │ │ +│ Problem, I recently started to │ │ │ +│ have feelings for an other │ │ │ +│ person (a friend). This person │ │ │ +│ has had a boyfriend for now 3 │ │ │ +│ years, and has absolutely no │ │ │ +│ ideas. Those feelings were so │ │ │ +│ strong, it was hard to hide │ │ │ +│ them. After 2 months of me │ │ │ +│ being distant and really sad, │ │ │ +│ my girlfriend forced me to say │ │ │ +│ what was bothering me. I'm not │ │ │ +│ a good liar, and now she knows. │ │ │ +│ │ │ │ +│ We decided to give us a week │ │ │ +│ alone, I went to my parents. │ │ │ +│ │ │ │ +│ Now, I'm completely lost. I │ │ │ +│ keep on thinking about this │ │ │ +│ person, and I hate that. I │ │ │ +│ would like for those feelings │ │ │ +│ to go away, to leave me alone. │ │ │ +│ But I can't. │ │ │ +│ │ │ │ +│ What do I do? It's been 3 │ │ │ +│ months now, and I'm just │ │ │ +│ desperate. │ │ │ +│ │ │ │ +│ TL;DR: │ │ │ +├─────────────────────────────────┼─────────────────────────────────┼──────────┤ +│ SUBREDDIT: r/pettyrevenge │ My mom woke me up with a loud │ 6.84375 │ +│ │ TV. I blasted Gangnam Style on │ │ +│ TITLE: So, my mom woke me up │ repeat, with the bass cranked │ │ +│ with a loud TV. │ up as high as it could │ │ +│ │ go.<|endoftext|>[PAD][PAD][PAD… │ │ +│ POST: She was in her living │ │ │ +│ room, watching TV. This was at │ │ │ +│ about 8:30 in the morning, and │ │ │ +│ she was exercising. She turned │ │ │ +│ the TV up extra loud to hear it │ │ │ +│ over her excercycle, and woke │ │ │ +│ me up. I went in there asking │ │ │ +│ for her to turn it down. She │ │ │ +│ said she didn't have to; I │ │ │ +│ explained that I always used │ │ │ +│ headphones so she didn't have │ │ │ +│ to deal with my noise and that │ │ │ +│ she should give me a little │ │ │ +│ more respect, given that I paid │ │ │ +│ rent at the time. │ │ │ +│ │ │ │ +│ She disagreed. I went back to │ │ │ +│ my room, rather pissed off at │ │ │ +│ the lack of equality. I had no │ │ │ +│ lock on my door; but I had a │ │ │ +│ dresser right next to it, so I │ │ │ +│ pulled one of the drawers out │ │ │ +│ enough so that it caused the │ │ │ +│ door to not be openable. Then, │ │ │ +│ I turned my speakers up really │ │ │ +│ loud and blasted Gangnam Style │ │ │ +│ on repeat, with the bass │ │ │ +│ cranked up as high as it could │ │ │ +│ go. │ │ │ +│ │ │ │ +│ If you hate Gangnam Style for │ │ │ +│ being overplayed, you will see │ │ │ +│ why I chose that particular │ │ │ +│ song. I personally don't mind │ │ │ +│ it. But here's the thing about │ │ │ +│ my bass; it vibrates the walls, │ │ │ +│ making one hell of a lot of │ │ │ +│ noise. Needless to say, my mom │ │ │ +│ was not pleased and shut off │ │ │ +│ the internet. But it was oh so │ │ │ +│ worth it. │ │ │ +│ │ │ │ +│ TL;DR: │ │ │ +└─────────────────────────────────┴─────────────────────────────────┴──────────┘ +``` + +## Implementation details + +This PPO implementation is based on the [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). + +## Benchmark experiments + +To validate the PPO implementation works, we ran experiment on the 1B model. Here are the command we used to run the experiment. We take the SFT / RM models directly from [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). + +``` +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ + examples/scripts/ppo/ppo_tldr.py \ + --output_dir models/minimal/ppo_tldr \ + --learning_rate 3e-6 \ + --per_device_train_batch_size 16 \ + --gradient_accumulation_steps 4 \ + --total_episodes 1000000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ + --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ + --local_rollout_forward_batch_size 16 \ + --missing_eos_penalty 1.0 \ + --stop_token eos +``` + +Checkpoints and experiment tracking are available at: + +- [🤗 Model checkpoint](https://huggingface.co/vwxyzjn/ppo_tldr) +- [🐝 Tracked experiment](https://wandb.ai/huggingface/trl/runs/dd2o3g35) + +To evaluate, we use [vLLM](https://github.com/vllm-project/vllm) to load the checkpoints and GPT-4o mini as a judge model to evaluate the generated TL;DR against the reference TL;DR. +For more information on how to use judges, see [Judges](judges). + +```bash +$ python examples/scripts/evals/judge_tldr.py --model_name_or_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 33.00% +$ python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 64.70% +``` + +The PPO checkpoint gets a 64.7% preferred rate vs the 33.0% preference rate of the SFT checkpoint. This is a good sign that the PPO training is working as intended. + +Metrics: + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/benchmark/pr-1540/ppov2.png) + + +```bash +# pip install openrlbenchmark==0.2.1a5 +# see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation +# to use it, change `?we=huggingface&wpn=trl` to your own project and `?tag=pr-1540` to your own tag +python -m openrlbenchmark.rlops_multi_metrics \ + --filters '?we=huggingface&wpn=trl&xaxis=train/episode&ceik=output_dir&cen=sft_model_path&metrics=train/objective/rlhf_reward&metrics=train/objective/scores&metrics=train/objective/kl&metrics=train/objective/non_score_reward&metrics=train/objective/entropy&metrics=train/policy/approxkl_avg&metrics=train/policy/clipfrac_avg&metrics=train/loss/policy_avg&metrics=train/loss/value_avg&metrics=train/val/clipfrac_avg&metrics=train/policy/entropy_avg&metrics=train/val/ratio&metrics=train/val/ratio_var&metrics=train/val/num_eos_tokens&metrics=train/lr&metrics=train/eps' \ + "cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr?tag=pr-1540" \ + --env-ids models/minimal/ppo_tldr \ + --pc.ncols 4 \ + --pc.ncols-legend 1 \ + --pc.xlabel "Episode" \ + --output-filename benchmark/trl/pr-1540/ppo \ + --scan-history +``` + +## PPOTrainer + +[[autodoc]] PPOTrainer + +## PPOConfig + +[[autodoc]] PPOConfig \ No newline at end of file diff --git a/testbed/huggingface__trl/docs/source/reward_trainer.mdx b/testbed/huggingface__trl/docs/source/reward_trainer.mdx new file mode 100644 index 0000000000000000000000000000000000000000..09c2ac863c4f222620f6aa37ff45f12e278ca394 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/reward_trainer.mdx @@ -0,0 +1,90 @@ +# Reward Modeling + +[![](https://img.shields.io/badge/All_models-Reward_Trainer-blue)](https://huggingface.co/models?other=reward-trainer,trl) + +TRL supports custom reward modeling for anyone to perform reward modeling on their dataset and model. + +Check out a complete flexible example at [`examples/scripts/reward_modeling.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py). + +## Expected dataset type + +The [`RewardTrainer`] requires a [*implicit prompt* preference dataset](dataset_formats#preference). It means that the dataset should only contain the columns `"chosen"` and `"rejected"` (and not `"prompt"`). +The [`RewardTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset format. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. + +You can also use a pretokenized dataset, in which case the dataset should contain the following columns: `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`. + +## Using the `RewardTrainer` + +After preparing your dataset, you can use the [`RewardTrainer`] in the same way as the `Trainer` class from 🤗 Transformers. +You should pass an `AutoModelForSequenceClassification` model to the [`RewardTrainer`], along with a [`RewardConfig`] which configures the hyperparameters of the training. + +### Leveraging 🤗 PEFT to train a reward model + +Just pass a `peft_config` in the keyword arguments of [`RewardTrainer`], and the trainer should automatically take care of converting the model into a PEFT model! + +```python +from peft import LoraConfig, TaskType +from transformers import AutoModelForSequenceClassification, AutoTokenizer +from trl import RewardTrainer, RewardConfig + +model = AutoModelForSequenceClassification.from_pretrained("gpt2") +peft_config = LoraConfig( + task_type=TaskType.SEQ_CLS, + inference_mode=False, + r=8, + lora_alpha=32, + lora_dropout=0.1, +) + +... + +trainer = RewardTrainer( + model=model, + args=training_args, + processing_class=tokenizer, + train_dataset=dataset, + peft_config=peft_config, +) + +trainer.train() + +``` + +### Adding a margin to the loss + +As in the [Llama 2 paper](https://huggingface.co/papers/2307.09288), you can add a margin to the loss by adding a `margin` column to the dataset. The reward collator will automatically pass it through and the loss will be computed accordingly. + +```python +def add_margin(row): + # Assume you have a score_chosen and score_rejected columns that you want to use to compute the margin + return {'margin': row['score_chosen'] - row['score_rejected']} + +dataset = dataset.map(add_margin) +``` + +### Centering rewards + +In many scenarios, it's preferable to ensure that a reward model's output is mean zero. This is often done by first calculating the model's average score and then subtracting it. + +[[Eisenstein et al., 2023]](https://huggingface.co/papers/2312.09244) proposed an auxiliary loss function designed to directly learn a centered reward model. This auxiliary loss minimizes the squared sum of the rewards, encouraging the model to naturally produce mean-zero outputs: + +$$\Big( R(p, r_1) + R(p, r_2) \Big)^2 $$ + +This auxiliary loss is combined with the main loss function, weighted by the parameter `center_rewards_coefficient` in the `[RewardConfig]`. By default, this feature is deactivated (`center_rewards_coefficient = None`). + +```python +training_args = RewardConfig( + center_rewards_coefficient=0.01, + ... +) +``` + +For reference results, please refer PR [#1932](https://github.com/huggingface/trl/pull/1932). + +## RewardTrainer + +[[autodoc]] RewardTrainer + +## RewardConfig + +[[autodoc]] RewardConfig diff --git a/testbed/huggingface__trl/docs/source/rloo_trainer.md b/testbed/huggingface__trl/docs/source/rloo_trainer.md new file mode 100644 index 0000000000000000000000000000000000000000..8c16484d900dac4829324cf10ceae0fb6deb2caf --- /dev/null +++ b/testbed/huggingface__trl/docs/source/rloo_trainer.md @@ -0,0 +1,279 @@ +# RLOO Trainer + +[![](https://img.shields.io/badge/All_models-RLOO-blue)](https://huggingface.co/models?other=rloo,trl) + +TRL supports training LLMs with REINFORCE Leave-One-Out (RLOO). The idea is that instead of using a value function, RLOO generates K completions for each prompt. For each completion, RLOO uses the mean scores from the other K-1 completions as a baseline to calculate the advantage. RLOO also models the entire completion as a single action, where as PPO models each token as an action. Note that REINFORCE / A2C is a special case of PPO, when the number of PPO epochs is 1 and the number of mini-batches is 1, which is how we implement RLOO in TRL. + +References: +- [Back to Basics: Revisiting REINFORCE Style Optimization for Learning from Human Feedback in LLMs](https://huggingface.co/papers/2402.14740) +- [A2C is a special case of PPO](https://huggingface.co/papers/2205.09123) +- [Fine-Tuning Language Models from Human Preferences](https://github.com/openai/lm-human-preferences) +- [Learning to Summarize from Human Feedback](https://github.com/openai/summarize-from-feedback) +- [The N Implementation Details of RLHF with PPO](https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo) +- [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031) + +## Get started + +To just run a RLOO script to make sure the trainer can run, you can run the following command to train a RLOO model with a dummy reward model. + +```bash +python examples/scripts/rloo/rloo.py \ + --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ + --dataset_train_split descriptiveness \ + --learning_rate 3e-6 \ + --output_dir models/minimal/rloo \ + --per_device_train_batch_size 64 \ + --gradient_accumulation_steps 1 \ + --total_episodes 10000 \ + --model_name_or_path EleutherAI/pythia-14m \ + --reward_model_path EleutherAI/pythia-14m \ + --missing_eos_penalty 1.0 +``` + + +## Explanation of the logged metrics + +The logged metrics are as follows. Here is an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/u2sqci34) + + + +* `eps`: Tracks the number of episodes per second. +* `objective/kl`: The mean Kullback-Leibler (KL) divergence between the current policy and reference policy. +* `objective/entropy`: The mean entropy of the policy, indicating the randomness of the actions chosen by the policy. +* `objective/non_score_reward`: The mean reward from non-score-related sources, basically `beta * kl.sum(1)`, where `beta` is the KL penalty coefficient and `kl` is the per-token KL divergence. +* `objective/rlhf_reward`: The mean RLHF reward, which is `score - non_score_reward`. +* `objective/scores`: The mean scores returned by the reward model / environment. +* `policy/approxkl_avg`: The average approximate KL divergence between consecutive PPO policies. Note that this is not the same as `objective/kl`. +* `policy/clipfrac_avg`: The average fraction of policy updates that are clipped, indicating how often the policy updates are constrained to prevent large changes. +* `loss/policy_avg`: The average policy loss, indicating how well the policy is performing. +* `val/clipfrac_avg`: The average fraction of value function updates that are clipped, similar to policy/clipfrac_avg but for the value function. +* `policy/entropy_avg`: The average entropy of the policy during training, indicating how diverse the policy's actions are. +* `val/ratio`: The mean ratio of the current policy probability to the old policy probability, providing a measure of how much the policy has changed. +* `val/ratio_var`: The variance of the `val/ratio`, indicating the variability in policy changes. +* `val/num_eos_tokens`: The number of end-of-sequence (EOS) tokens generated, which can indicate the number of complete responses. +* `lr`: lr: The current learning rate used by the optimizer. +* `episode`: episode: The current global step or episode count in the training process. + + +## Cookbook + +* Debugging TIP: `objective/rlhf_reward`: this is the ultimate objective of the RLHF training. If training works as intended, this metric should keep going up. +* Debugging TIP: `val/ratio`: this number should float around 1.0, and it gets clipped by `--cliprange 0.2` with PPO's surrogate loss. So if this `ratio` is too high like 2.0 or 1000.0 or too small like 0.1, it means the updates between consecutive policies are too drastic. You should try undertand why this is happening and try to fix it. +* Memory TIP: If you are running out of memory, you can try to reduce the `--per_device_train_batch_size` or increase the `--gradient_accumulation_steps` to reduce the memory footprint. +* Memory TIP: If you have multiple GPUs, you can also run training with DeepSpeed stage 3 to reduce the memory footprint `accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml`. +* Usage TIP: We recommend to use the "EOS trick" via `--missing_eos_penalty`, which subtracts a static scalar penalty from the score of completions that do not end with an EOS token. This can help the model learn to generate more coherent completions. + + +## What is my model doing exactly? + +To help you understand what your model is doing, we periodically log some sample completions from the model. Here is an example of a completion. In an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/u2sqci34), it looks like the following, allowing you to see the model's response at different stages of training. By default we generate `--num_sample_generations 10` during training, but you can customize the number of generations. + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/ppov2_completions.gif) + + +In the logs the sampled generations look like + +``` +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┓ +┃ query ┃ model response ┃ score ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━┩ +│ SUBREDDIT: r/AskReddit │ I'm in love with a friend, and │ 3.921875 │ +│ │ I don't know how to get rid of │ │ +│ TITLE: How do you get someone │ those feelings. I'm │ │ +│ out of your head? │ desperate.<|endoftext|>[PAD][P… │ │ +│ │ │ │ +│ POST: Hi, │ │ │ +│ I'm 22, and I have been with my │ │ │ +│ girlfriend for 5 years now. We │ │ │ +│ recently moved together. We've │ │ │ +│ always loved each other │ │ │ +│ intensely. │ │ │ +│ │ │ │ +│ Problem, I recently started to │ │ │ +│ have feelings for an other │ │ │ +│ person (a friend). This person │ │ │ +│ has had a boyfriend for now 3 │ │ │ +│ years, and has absolutely no │ │ │ +│ ideas. Those feelings were so │ │ │ +│ strong, it was hard to hide │ │ │ +│ them. After 2 months of me │ │ │ +│ being distant and really sad, │ │ │ +│ my girlfriend forced me to say │ │ │ +│ what was bothering me. I'm not │ │ │ +│ a good liar, and now she knows. │ │ │ +│ │ │ │ +│ We decided to give us a week │ │ │ +│ alone, I went to my parents. │ │ │ +│ │ │ │ +│ Now, I'm completely lost. I │ │ │ +│ keep on thinking about this │ │ │ +│ person, and I hate that. I │ │ │ +│ would like for those feelings │ │ │ +│ to go away, to leave me alone. │ │ │ +│ But I can't. │ │ │ +│ │ │ │ +│ What do I do? It's been 3 │ │ │ +│ months now, and I'm just │ │ │ +│ desperate. │ │ │ +│ │ │ │ +│ TL;DR: │ │ │ +├─────────────────────────────────┼─────────────────────────────────┼──────────┤ +│ SUBREDDIT: r/pettyrevenge │ My mom woke me up with a loud │ 6.84375 │ +│ │ TV. I blasted Gangnam Style on │ │ +│ TITLE: So, my mom woke me up │ repeat, with the bass cranked │ │ +│ with a loud TV. │ up as high as it could │ │ +│ │ go.<|endoftext|>[PAD][PAD][PAD… │ │ +│ POST: She was in her living │ │ │ +│ room, watching TV. This was at │ │ │ +│ about 8:30 in the morning, and │ │ │ +│ she was exercising. She turned │ │ │ +│ the TV up extra loud to hear it │ │ │ +│ over her excercycle, and woke │ │ │ +│ me up. I went in there asking │ │ │ +│ for her to turn it down. She │ │ │ +│ said she didn't have to; I │ │ │ +│ explained that I always used │ │ │ +│ headphones so she didn't have │ │ │ +│ to deal with my noise and that │ │ │ +│ she should give me a little │ │ │ +│ more respect, given that I paid │ │ │ +│ rent at the time. │ │ │ +│ │ │ │ +│ She disagreed. I went back to │ │ │ +│ my room, rather pissed off at │ │ │ +│ the lack of equality. I had no │ │ │ +│ lock on my door; but I had a │ │ │ +│ dresser right next to it, so I │ │ │ +│ pulled one of the drawers out │ │ │ +│ enough so that it caused the │ │ │ +│ door to not be openable. Then, │ │ │ +│ I turned my speakers up really │ │ │ +│ loud and blasted Gangnam Style │ │ │ +│ on repeat, with the bass │ │ │ +│ cranked up as high as it could │ │ │ +│ go. │ │ │ +│ │ │ │ +│ If you hate Gangnam Style for │ │ │ +│ being overplayed, you will see │ │ │ +│ why I chose that particular │ │ │ +│ song. I personally don't mind │ │ │ +│ it. But here's the thing about │ │ │ +│ my bass; it vibrates the walls, │ │ │ +│ making one hell of a lot of │ │ │ +│ noise. Needless to say, my mom │ │ │ +│ was not pleased and shut off │ │ │ +│ the internet. But it was oh so │ │ │ +│ worth it. │ │ │ +│ │ │ │ +│ TL;DR: │ │ │ +└─────────────────────────────────┴─────────────────────────────────┴──────────┘ +``` + +## Implementation details + +The bulk of RLOOTrainer is based on the PPO implementation, which is based on the [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). + + +Below is a vectorized advantage calculation for RLOO: + +```python +def test_rloo_reward(): + local_batch_size = 3 + rloo_k = 4 + rlhf_reward = torch.tensor([ + 1, 2, 3, # first rlhf reward for three prompts + 2, 3, 4, # second rlhf reward for three prompts + 5, 6, 7, # third rlhf reward for three prompts + 8, 9, 10, # fourth rlhf reward for three prompts + ]).float() # here we have 3 prompts which have 4 completions each + + baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) + advantages = torch.zeros_like(rlhf_reward) + for i in range(0, len(advantages), local_batch_size): + other_response_rlhf_rewards = [] + for j in range(0, len(advantages), local_batch_size): + if i != j: + other_response_rlhf_rewards.append(rlhf_reward[j : j + local_batch_size]) + advantages[i : i + local_batch_size] = rlhf_reward[i : i + local_batch_size] - torch.stack(other_response_rlhf_rewards).mean(0) + + assert (1 - (2 + 5 + 8) / 3 - advantages[0].item()) < 1e-6 # First rlhf reward for the first prompt + assert (6 - (3 + 2 + 9) / 3 - advantages[7].item()) < 1e-6 # Third rlhf reward for the second prompt + + # Vectorized implementation + rlhf_reward = rlhf_reward.reshape(rloo_k, local_batch_size) + baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) + vec_advantages = rlhf_reward - baseline + torch.testing.assert_close(vec_advantages.flatten(), advantages) +``` + +## Benchmark experiments + +To validate the RLOO implementation works, we ran experiment on the 1B model. Here are the command we used to run the experiment. We take the SFT / RM models directly from [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). + +``` +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ + --output_dir models/minimal/rloo_tldr \ + --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ + --dataset_test_split validation \ + --num_ppo_epochs 2 \ + --num_mini_batches 2 \ + --learning_rate 3e-6 \ + --per_device_train_batch_size 8 \ + --gradient_accumulation_steps 8 \ + --total_episodes 1000000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ + --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ + --local_rollout_forward_batch_size 16 \ + --missing_eos_penalty 1.0 \ + --stop_token eos \ + --kl_coef 0.03 +``` + +Checkpoints and experiment tracking are available at: + +- [🤗 Model checkpoint](https://huggingface.co/vwxyzjn/rloo_tldr) +- [🐝 Tracked experiment](https://wandb.ai/huggingface/trl/runs/u2sqci34) + + +To evaluate, we use [vLLM](https://github.com/vllm-project/vllm) to load the checkpoints and GPT-4o mini as a judge model to evaluate the generated TL;DR against the reference TL;DR. +For more information on how to use judges, see [Judges](judges). + +```bash +$ python examples/scripts/evals/judge_tldr.py --model_name_or_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 33.00% +$ python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 51.20% +``` + +The RLOO checkpoint gets a 51.2% preferred rate vs the 33.0% preference rate of the SFT checkpoint. This is a good sign that the RLOO training is working as intended. + + +Metrics: + +![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/benchmark/pr-1540/rloo.png) + + +```bash +# pip install openrlbenchmark==0.2.1a5 +# see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation +# to use it, change `?we=huggingface&wpn=trl` to your own project and `?tag=pr-1540` to your own tag +python -m openrlbenchmark.rlops_multi_metrics \ + --filters '?we=huggingface&wpn=trl&xaxis=train/episode&ceik=output_dir&cen=sft_model_path&metrics=train/objective/rlhf_reward&metrics=train/objective/scores&metrics=train/objective/kl&metrics=train/objective/non_score_reward&metrics=train/objective/entropy&metrics=train/policy/approxkl_avg&metrics=train/policy/clipfrac_avg&metrics=train/loss/policy_avg&metrics=train/policy/entropy_avg&metrics=train/val/ratio&metrics=train/val/ratio_var&metrics=train/val/num_eos_tokens&metrics=train/lr&metrics=train/eps' \ + "cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr?tag=pr-1540" \ + --env-ids models/minimal/rloo_tldr \ + --pc.ncols 4 \ + --pc.ncols-legend 1 \ + --pc.xlabel "Episode" \ + --output-filename benchmark/trl/pr-1540/rloo \ + --scan-history +``` + + +## RLOOTrainer + +[[autodoc]] RLOOTrainer + +## RLOOConfig + +[[autodoc]] RLOOConfig \ No newline at end of file diff --git a/testbed/huggingface__trl/docs/source/sentiment_tuning.mdx b/testbed/huggingface__trl/docs/source/sentiment_tuning.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0637cb7ec312c37f327ae4ca031fdd7231799717 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/sentiment_tuning.mdx @@ -0,0 +1,36 @@ +# Sentiment Tuning Examples + +The notebooks and scripts in this examples show how to fine-tune a model with a sentiment classifier (such as `lvwerra/distilbert-imdb`). + +Here's an overview of the notebooks and scripts in the [trl repository](https://github.com/huggingface/trl/tree/main/examples): + + + +| File | Description | +|------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| +| [`examples/scripts/ppo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo.py) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/trl/blob/main/examples/sentiment/notebooks/gpt2-sentiment.ipynb) | This script shows how to use the `PPOTrainer` to fine-tune a sentiment analysis model using IMDB dataset | +| [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-sentiment.ipynb) | This notebook demonstrates how to reproduce the GPT2 imdb sentiment tuning example on a jupyter notebook. | +| [`examples/notebooks/gpt2-control.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-control.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/trl/blob/main/examples/sentiment/notebooks/gpt2-sentiment-control.ipynb) | This notebook demonstrates how to reproduce the GPT2 sentiment control example on a jupyter notebook. + + + +## Usage + +```bash +# 1. run directly +python examples/scripts/ppo.py +# 2. run via `accelerate` (recommended), enabling more features (e.g., multiple GPUs, deepspeed) +accelerate config # will prompt you to define the training configuration +accelerate launch examples/scripts/ppo.py # launches training +# 3. get help text and documentation +python examples/scripts/ppo.py --help +# 4. configure logging with wandb and, say, mini_batch_size=1 and gradient_accumulation_steps=16 +python examples/scripts/ppo.py --log_with wandb --mini_batch_size 1 --gradient_accumulation_steps 16 +``` + +Note: if you don't want to log with `wandb` remove `log_with="wandb"` in the scripts/notebooks. You can also replace it with your favourite experiment tracker that's [supported by `accelerate`](https://huggingface.co/docs/accelerate/usage_guides/tracking). + + +## Few notes on multi-GPU + +To run in multi-GPU setup with DDP (distributed Data Parallel) change the `device_map` value to `device_map={"": Accelerator().process_index}` and make sure to run your script with `accelerate launch yourscript.py`. If you want to apply naive pipeline parallelism you can use `device_map="auto"`. \ No newline at end of file diff --git a/testbed/huggingface__trl/docs/source/sft_trainer.mdx b/testbed/huggingface__trl/docs/source/sft_trainer.mdx new file mode 100644 index 0000000000000000000000000000000000000000..607591a9c29168cbde185062dcc60a0bca3cd0f7 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/sft_trainer.mdx @@ -0,0 +1,772 @@ +# Supervised Fine-tuning Trainer + +[![](https://img.shields.io/badge/All_models-SFT-blue)](https://huggingface.co/models?other=sft,trl) + +Supervised fine-tuning (or SFT for short) is a crucial step in RLHF. In TRL we provide an easy-to-use API to create your SFT models and train them with few lines of code on your dataset. + +Check out a complete flexible example at [`examples/scripts/sft.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/sft.py). +Experimental support for Vision Language Models is also included in the example [`examples/scripts/sft_vlm.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/sft_vlm.py). + +## Quickstart + +If you have a dataset hosted on the 🤗 Hub, you can easily fine-tune your SFT model using [`SFTTrainer`] from TRL. Let us assume your dataset is `imdb`, the text you want to predict is inside the `text` field of the dataset, and you want to fine-tune the `facebook/opt-350m` model. +The following code-snippet takes care of all the data pre-processing and training for you: + +```python +from datasets import load_dataset +from trl import SFTConfig, SFTTrainer + +dataset = load_dataset("stanfordnlp/imdb", split="train") + +training_args = SFTConfig( + max_seq_length=512, + output_dir="/tmp", +) +trainer = SFTTrainer( + "facebook/opt-350m", + train_dataset=dataset, + args=training_args, +) +trainer.train() +``` +Make sure to pass the correct value for `max_seq_length` as the default value will be set to `min(tokenizer.model_max_length, 1024)`. + +You can also construct a model outside of the trainer and pass it as follows: + +```python +from transformers import AutoModelForCausalLM +from datasets import load_dataset +from trl import SFTConfig, SFTTrainer + +dataset = load_dataset("stanfordnlp/imdb", split="train") + +model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") + +training_args = SFTConfig(output_dir="/tmp") + +trainer = SFTTrainer( + model, + train_dataset=dataset, + args=training_args, +) + +trainer.train() +``` + +The above snippets will use the default training arguments from the [`SFTConfig`] class. If you want to modify the defaults pass in your modification to the `SFTConfig` constructor and pass them to the trainer via the `args` argument. + +## Advanced usage + +### Train on completions only + +You can use the `DataCollatorForCompletionOnlyLM` to train your model on the generated prompts only. Note that this works only in the case when `packing=False`. +To instantiate that collator for instruction data, pass a response template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on completions only on the CodeAlpaca dataset: + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +from datasets import load_dataset +from trl import SFTConfig, SFTTrainer, DataCollatorForCompletionOnlyLM + +dataset = load_dataset("lucasmccabe-lmi/CodeAlpaca-20k", split="train") + +model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") +tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + +def formatting_prompts_func(example): + output_texts = [] + for i in range(len(example['instruction'])): + text = f"### Question: {example['instruction'][i]}\n ### Answer: {example['output'][i]}" + output_texts.append(text) + return output_texts + +response_template = " ### Answer:" +collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer) + +trainer = SFTTrainer( + model, + train_dataset=dataset, + args=SFTConfig(output_dir="/tmp"), + formatting_func=formatting_prompts_func, + data_collator=collator, +) + +trainer.train() +``` + +To instantiate that collator for assistant style conversation data, pass a response template, an instruction template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on assistant completions only on the Open Assistant Guanaco dataset: + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +from datasets import load_dataset +from trl import SFTConfig, SFTTrainer, DataCollatorForCompletionOnlyLM + +dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") + +model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") +tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + +instruction_template = "### Human:" +response_template = "### Assistant:" +collator = DataCollatorForCompletionOnlyLM(instruction_template=instruction_template, response_template=response_template, tokenizer=tokenizer, mlm=False) + +trainer = SFTTrainer( + model, + args=SFTConfig(output_dir="/tmp"), + train_dataset=dataset, + data_collator=collator, +) + +trainer.train() +``` + +Make sure to have a `pad_token_id` which is different from `eos_token_id` which can result in the model not properly predicting EOS (End of Sentence) tokens during generation. + +#### Using token_ids directly for `response_template` + +Some tokenizers like Llama 2 (`meta-llama/Llama-2-XXb-hf`) tokenize sequences differently depending on whether they have context or not. For example: + +```python +from transformers import AutoTokenizer +tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") + +def print_tokens_with_ids(txt): + tokens = tokenizer.tokenize(txt, add_special_tokens=False) + token_ids = tokenizer.encode(txt, add_special_tokens=False) + print(list(zip(tokens, token_ids))) + +prompt = """### User: Hello\n\n### Assistant: Hi, how can I help you?""" +print_tokens_with_ids(prompt) # [..., ('▁Hello', 15043), ('<0x0A>', 13), ('<0x0A>', 13), ('##', 2277), ('#', 29937), ('▁Ass', 4007), ('istant', 22137), (':', 29901), ...] + +response_template = "### Assistant:" +print_tokens_with_ids(response_template) # [('▁###', 835), ('▁Ass', 4007), ('istant', 22137), (':', 29901)] +``` + +In this case, and due to lack of context in `response_template`, the same string ("### Assistant:") is tokenized differently: + + - Text (with context): `[2277, 29937, 4007, 22137, 29901]` + - `response_template` (without context): `[835, 4007, 22137, 29901]` + +This will lead to an error when the `DataCollatorForCompletionOnlyLM` does not find the `response_template` in the dataset example text: + +``` +RuntimeError: Could not find response key [835, 4007, 22137, 29901] in token IDs tensor([ 1, 835, ...]) +``` + + +To solve this, you can tokenize the `response_template` with the same context as in the dataset, truncate it as needed and pass the `token_ids` directly to the `response_template` argument of the `DataCollatorForCompletionOnlyLM` class. For example: + +```python +response_template_with_context = "\n### Assistant:" # We added context here: "\n". This is enough for this tokenizer +response_template_ids = tokenizer.encode(response_template_with_context, add_special_tokens=False)[2:] # Now we have it like in the dataset texts: `[2277, 29937, 4007, 22137, 29901]` + +data_collator = DataCollatorForCompletionOnlyLM(response_template_ids, tokenizer=tokenizer) +``` + +### Add Special Tokens for Chat Format + +Adding special tokens to a language model is crucial for training chat models. These tokens are added between the different roles in a conversation, such as the user, assistant, and system and help the model recognize the structure and flow of a conversation. This setup is essential for enabling the model to generate coherent and contextually appropriate responses in a chat environment. +The [`setup_chat_format`] function in `trl` easily sets up a model and tokenizer for conversational AI tasks. This function: +- Adds special tokens to the tokenizer, e.g. `<|im_start|>` and `<|im_end|>`, to indicate the start and end of a conversation. +- Resizes the model’s embedding layer to accommodate the new tokens. +- Sets the `chat_template` of the tokenizer, which is used to format the input data into a chat-like format. The default is `chatml` from OpenAI. +- _optionally_ you can pass `resize_to_multiple_of` to resize the embedding layer to a multiple of the `resize_to_multiple_of` argument, e.g. 64. If you want to see more formats being supported in the future, please open a GitHub issue on [trl](https://github.com/huggingface/trl) + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +from trl import setup_chat_format + +# Load model and tokenizer +model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") +tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + +# Set up the chat format with default 'chatml' format +model, tokenizer = setup_chat_format(model, tokenizer) + +``` + +With our model and tokenizer set up, we can now fine-tune our model on a conversational dataset. Below is an example of how a dataset can be formatted for fine-tuning. + +### Dataset format support + +The [`SFTTrainer`] supports popular dataset formats. This allows you to pass the dataset to the trainer without any pre-processing directly. The following formats are supported: +* conversational format +```json +{"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "..."}]} +{"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "..."}]} +{"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "..."}]} +``` +* instruction format +```json +{"prompt": "", "completion": ""} +{"prompt": "", "completion": ""} +{"prompt": "", "completion": ""} +``` + +If your dataset uses one of the above formats, you can directly pass it to the trainer without pre-processing. The [`SFTTrainer`] will then format the dataset for you using the defined format from the model's tokenizer with the [apply_chat_template](https://huggingface.co/docs/transformers/main/en/chat_templating#templates-for-chat-models) method. + + +```python +from datasets import load_dataset +from trl import SFTConfig, SFTTrainer + +... + +# load jsonl dataset +dataset = load_dataset("json", data_files="path/to/dataset.jsonl", split="train") +# load dataset from the HuggingFace Hub +dataset = load_dataset("philschmid/dolly-15k-oai-style", split="train") + +... + +training_args = SFTConfig(packing=True) +trainer = SFTTrainer( + "facebook/opt-350m", + args=training_args, + train_dataset=dataset, +) +``` + +If the dataset is not in one of those format you can either preprocess the dataset to match the formatting or pass a formatting function to the SFTTrainer to do it for you. Let's have a look. + + +### Format your input prompts + +For instruction fine-tuning, it is quite common to have two columns inside the dataset: one for the prompt & the other for the response. +This allows people to format examples like [Stanford-Alpaca](https://github.com/tatsu-lab/stanford_alpaca) did as follows: +```bash +Below is an instruction ... + +### Instruction +{prompt} + +### Response: +{completion} +``` +Let us assume your dataset has two fields, `question` and `answer`. Therefore you can just run: +```python +... +def formatting_prompts_func(example): + output_texts = [] + for i in range(len(example['question'])): + text = f"### Question: {example['question'][i]}\n ### Answer: {example['answer'][i]}" + output_texts.append(text) + return output_texts + +trainer = SFTTrainer( + model, + args=training_args, + train_dataset=dataset, + formatting_func=formatting_prompts_func, +) + +trainer.train() +``` +To properly format your input make sure to process all the examples by looping over them and returning a list of processed text. Check out a full example of how to use SFTTrainer on alpaca dataset [here](https://github.com/huggingface/trl/pull/444#issue-1760952763) + +### Packing dataset ([`ConstantLengthDataset`]) + +[`SFTTrainer`] supports _example packing_, where multiple short examples are packed in the same input sequence to increase training efficiency. This is done with the [`ConstantLengthDataset`] utility class that returns constant length chunks of tokens from a stream of examples. To enable the usage of this dataset class, simply pass `packing=True` to the [`SFTConfig`] constructor. + +```python +... +training_args = SFTConfig(packing=True) + +trainer = SFTTrainer( + "facebook/opt-350m", + train_dataset=dataset, + args=training_args +) + +trainer.train() +``` + +Note that if you use a packed dataset and if you pass `max_steps` in the training arguments you will probably train your models for more than few epochs, depending on the way you have configured the packed dataset and the training protocol. Double check that you know and understand what you are doing. +If you don't want to pack your `eval_dataset`, you can pass `eval_packing=False` to the `SFTConfig` init method. + +#### Customize your prompts using packed dataset + +If your dataset has several fields that you want to combine, for example if the dataset has `question` and `answer` fields and you want to combine them, you can pass a formatting function to the trainer that will take care of that. For example: + +```python +def formatting_func(example): + text = f"### Question: {example['question']}\n ### Answer: {example['answer']}" + return text + +training_args = SFTConfig(packing=True) +trainer = SFTTrainer( + "facebook/opt-350m", + train_dataset=dataset, + args=training_args, + formatting_func=formatting_func +) + +trainer.train() +``` +You can also customize the [`ConstantLengthDataset`] much more by directly passing the arguments to the [`SFTConfig`] constructor. Please refer to that class' signature for more information. + +### Control over the pretrained model + +You can directly pass the kwargs of the `from_pretrained()` method to the [`SFTConfig`]. For example, if you want to load a model in a different precision, analogous to + +```python +model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.bfloat16) + +... + +training_args = SFTConfig( + model_init_kwargs={ + "torch_dtype": "bfloat16", + }, + output_dir="/tmp", +) +trainer = SFTTrainer( + "facebook/opt-350m", + train_dataset=dataset, + args=training_args, +) + +trainer.train() +``` +Note that all keyword arguments of `from_pretrained()` are supported. + +### Training adapters + +We also support tight integration with 🤗 PEFT library so that any user can conveniently train adapters and share them on the Hub instead of training the entire model + +```python +from datasets import load_dataset +from trl import SFTConfig, SFTTrainer +from peft import LoraConfig + +dataset = load_dataset("stanfordnlp/imdb", split="train") + +peft_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", +) + +trainer = SFTTrainer( + "EleutherAI/gpt-neo-125m", + train_dataset=dataset, + args=SFTConfig(output_dir="/tmp"), + peft_config=peft_config +) + +trainer.train() +``` + +You can also continue training your `PeftModel`. For that, first load a `PeftModel` outside `SFTTrainer` and pass it directly to the trainer without the `peft_config` argument being passed. + +### Training adapters with base 8 bit models + +For that, you need to first load your 8 bit model outside the Trainer and pass a `PeftConfig` to the trainer. For example: + +```python +... + +peft_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", +) + +model = AutoModelForCausalLM.from_pretrained( + "EleutherAI/gpt-neo-125m", + load_in_8bit=True, + device_map="auto", +) + +trainer = SFTTrainer( + model, + train_dataset=dataset, + args=SFTConfig(), + peft_config=peft_config, +) + +trainer.train() +``` + +## Using Flash Attention and Flash Attention 2 + +You can benefit from Flash Attention 1 & 2 using SFTTrainer out of the box with minimal changes of code. +First, to make sure you have all the latest features from transformers, install transformers from source + +```bash +pip install -U git+https://github.com/huggingface/transformers.git +``` + +Note that Flash Attention only works on GPU now and under half-precision regime (when using adapters, base model loaded in half-precision) +Note also both features are perfectly compatible with other tools such as quantization. + +### Using Flash-Attention 1 + +For Flash Attention 1 you can use the `BetterTransformer` API and force-dispatch the API to use Flash Attention kernel. First, install the latest optimum package: + +```bash +pip install -U optimum +``` + +Once you have loaded your model, wrap the `trainer.train()` call under the `with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):` context manager: + +```diff +... + ++ with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): + trainer.train() +``` + +Note that you cannot train your model using Flash Attention 1 on an arbitrary dataset as `torch.scaled_dot_product_attention` does not support training with padding tokens if you use Flash Attention kernels. Therefore you can only use that feature with `packing=True`. If your dataset contains padding tokens, consider switching to Flash Attention 2 integration. + +Below are some numbers you can get in terms of speedup and memory efficiency, using Flash Attention 1, on a single NVIDIA-T4 16GB. + +| use_flash_attn_1 | model_name | max_seq_len | batch_size | time per training step | +| ---------------- | ----------------- | ----------- | ---------- | ---------------------- | +| x | facebook/opt-350m | 2048 | 8 | ~59.1s | +| | facebook/opt-350m | 2048 | 8 | **OOM** | +| x | facebook/opt-350m | 2048 | 4 | ~30.3s | +| | facebook/opt-350m | 2048 | 4 | ~148.9s | + +### Using Flash Attention-2 + +To use Flash Attention 2, first install the latest `flash-attn` package: + +```bash +pip install -U flash-attn +``` + +And add `attn_implementation="flash_attention_2"` when calling `from_pretrained`: + +```python +model = AutoModelForCausalLM.from_pretrained( + model_id, + load_in_4bit=True, + attn_implementation="flash_attention_2" +) +``` + +If you don't use quantization, make sure your model is loaded in half-precision and dispatch your model on a supported GPU device. +After loading your model, you can either train it as it is, or attach adapters and train adapters on it in case your model is quantized. + +In contrast to Flash Attention 1, the integration makes it possible to train your model on an arbitrary dataset that also includes padding tokens. + + +### Using model creation utility + +We included a utility function to create your model. + +[[autodoc]] ModelConfig + +```python +from trl import ModelConfig, SFTTrainer, get_kbit_device_map, get_peft_config, get_quantization_config +model_config = ModelConfig( + model_name_or_path="facebook/opt-350m" + attn_implementation=None, # or "flash_attention_2" +) +torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) +) +quantization_config = get_quantization_config(model_config) +model_kwargs = dict( + revision=model_config.model_revision, + trust_remote_code=model_config.trust_remote_code, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + use_cache=False if training_args.gradient_checkpointing else True, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, +) +model = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs) +trainer = SFTTrainer( + ..., + model=model_config.model_name_or_path, + peft_config=get_peft_config(model_config), +) +``` + +### Enhance the model's performances using NEFTune + +NEFTune is a technique to boost the performance of chat models and was introduced by the paper ["NEFTune: Noisy Embeddings Improve Instruction Finetuning"](https://huggingface.co/papers/2310.05914) from Jain et al. it consists of adding noise to the embedding vectors during training. According to the abstract of the paper: + +> Standard finetuning of LLaMA-2-7B using Alpaca achieves 29.79% on AlpacaEval, which rises to 64.69% using noisy embeddings. NEFTune also improves over strong baselines on modern instruction datasets. Models trained with Evol-Instruct see a 10% improvement, with ShareGPT an 8% improvement, and with OpenPlatypus an 8% improvement. Even powerful models further refined with RLHF such as LLaMA-2-Chat benefit from additional training with NEFTune. + +
+ +
+ +To use it in `SFTTrainer` simply pass `neftune_noise_alpha` when creating your `SFTConfig` instance. Note that to avoid any surprising behaviour, NEFTune is disabled after training to retrieve back the original behaviour of the embedding layer. + +```python +from datasets import load_dataset +from trl import SFTConfig, SFTTrainer + +dataset = load_dataset("stanfordnlp/imdb", split="train") + +training_args = SFTConfig( + neftune_noise_alpha=5, +) +trainer = SFTTrainer( + "facebook/opt-350m", + train_dataset=dataset, + args=training_args, +) +trainer.train() +``` + +We have tested NEFTune by training `mistralai/Mistral-7B-v0.1` on the [OpenAssistant dataset](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) and validated that using NEFTune led to a performance boost of ~25% on MT Bench. + +
+ +
+ +Note however, that the amount of performance gain is _dataset dependent_ and in particular, applying NEFTune on synthetic datasets like [UltraChat](https://huggingface.co/datasets/stingning/ultrachat) typically produces smaller gains. + +### Accelerate fine-tuning 2x using `unsloth` + +You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks on 1x A100 listed below: + +| 1 A100 40GB | Dataset | 🤗 | 🤗 + Flash Attention 2 | 🦥 Unsloth | 🦥 VRAM saved | +| --------------- | --------- | --- | --------------------- | --------- | ------------ | +| Code Llama 34b | Slim Orca | 1x | 1.01x | **1.94x** | -22.7% | +| Llama-2 7b | Slim Orca | 1x | 0.96x | **1.87x** | -39.3% | +| Mistral 7b | Slim Orca | 1x | 1.17x | **1.88x** | -65.9% | +| Tiny Llama 1.1b | Alpaca | 1x | 1.55x | **2.74x** | -57.8% | + +First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows: + +```python +import torch +from trl import SFTConfig, SFTTrainer +from unsloth import FastLanguageModel + +max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number + +# Load model +model, tokenizer = FastLanguageModel.from_pretrained( + model_name="unsloth/mistral-7b", + max_seq_length=max_seq_length, + dtype=None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ + load_in_4bit=True, # Use 4bit quantization to reduce memory usage. Can be False + # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf +) + +# Do model patching and add fast LoRA weights +model = FastLanguageModel.get_peft_model( + model, + r=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "gate_proj", + "up_proj", + "down_proj", + ], + lora_alpha=16, + lora_dropout=0, # Dropout = 0 is currently optimized + bias="none", # Bias = "none" is currently optimized + use_gradient_checkpointing=True, + random_state=3407, +) + +training_args = SFTConfig(output_dir="./output", max_seq_length=max_seq_length) + +trainer = SFTTrainer( + model=model, + args=training_args, + train_dataset=dataset, +) +trainer.train() +``` + +The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth). + +## Liger-Kernel: Increase 20% throughput and reduces 60% memory for multi-GPU training + +[Liger Kernel](https://github.com/linkedin/Liger-Kernel) is a collection of Triton kernels designed specifically for LLM training. It can effectively increase multi-GPU training throughput by 20% and reduces memory usage by 60%. That way, we can **4x** our context length, as described in the benchmark below. They have implemented Hugging Face Compatible `RMSNorm`, `RoPE`, `SwiGLU`, `CrossEntropy`, `FusedLinearCrossEntropy`, and more to come. The kernel works out of the box with [Flash Attention](https://github.com/Dao-AILab/flash-attention), [PyTorch FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html), and [Microsoft DeepSpeed](https://github.com/microsoft/DeepSpeed). + +With great memory reduction, you can potentially turn off cpu_offloading or gradient checkpointing to further boost the performance. + +| Speed Up | Memory Reduction | +|--------------------------|-------------------------| +| ![Speed up](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/docs/images/e2e-tps.png) | ![Memory](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/docs/images/e2e-memory.png) | + + +1. To use Liger-Kernel in `SFTTrainer`, first install by + +```bash +pip install liger-kernel +``` + +2. Once installed, set `use_liger` in [`SFTConfig`]. No other changes are needed! + +```python +training_args = SFTConfig( + use_liger=True +) +``` + +To learn more about Liger-Kernel, visit their [official repository](https://github.com/linkedin/Liger-Kernel/). + +## Best practices + +Pay attention to the following best practices when training a model with that trainer: + +- [`SFTTrainer`] always pads by default the sequences to the `max_seq_length` argument of the [`SFTTrainer`]. If none is passed, the trainer will retrieve that value from the tokenizer. Some tokenizers do not provide a default value, so there is a check to retrieve the minimum between 2048 and that value. Make sure to check it before training. +- For training adapters in 8bit, you might need to tweak the arguments of the `prepare_model_for_kbit_training` method from PEFT, hence we advise users to use `prepare_in_int8_kwargs` field, or create the `PeftModel` outside the [`SFTTrainer`] and pass it. +- For a more memory-efficient training using adapters, you can load the base model in 8bit, for that simply add `load_in_8bit` argument when creating the [`SFTTrainer`], or create a base model in 8bit outside the trainer and pass it. +- If you create a model outside the trainer, make sure to not pass to the trainer any additional keyword arguments that are relative to `from_pretrained()` method. + +## Multi-GPU Training + +Trainer (and thus SFTTrainer) supports multi-GPU training. If you run your script with `python script.py` it will default to using DP as the strategy, which may be [slower than expected](https://github.com/huggingface/trl/issues/1303). To use DDP (which is generally recommended, see [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many?select-gpu=Accelerate#data-parallelism) for more info) you must launch the script with `python -m torch.distributed.launch script.py` or `accelerate launch script.py`. For DDP to work you must also check the following: +- If you're using gradient_checkpointing, add the following to the TrainingArguments: `gradient_checkpointing_kwargs={'use_reentrant':False}` (more info [here](https://github.com/huggingface/transformers/issues/26969) +- Ensure that the model is placed on the correct device: +```python +from accelerate import PartialState +device_string = PartialState().process_index +model = AutoModelForCausalLM.from_pretrained( + ... + device_map={'':device_string} +) +``` + +## GPTQ Conversion + +You may experience some issues with GPTQ Quantization after completing training. Lowering `gradient_accumulation_steps` to `4` will resolve most issues during the quantization process to GPTQ format. + +## Extending `SFTTrainer` for Vision Language Models + +`SFTTrainer` does not inherently support vision-language data. However, we provide a guide on how to tweak the trainer to support vision-language data. Specifically, you need to use a custom data collator that is compatible with vision-language data. This guide outlines the steps to make these adjustments. For a concrete example, refer to the script [`examples/scripts/sft_vlm.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/sft_vlm.py) which demonstrates how to fine-tune the LLaVA 1.5 model on the [HuggingFaceH4/llava-instruct-mix-vsft](https://huggingface.co/datasets/HuggingFaceH4/llava-instruct-mix-vsft) dataset. + +### Preparing the Data + +The data format is flexible, provided it is compatible with the custom collator that we will define later. A common approach is to use conversational data. Given that the data includes both text and images, the format needs to be adjusted accordingly. Below is an example of a conversational data format involving both text and images: + +```python +images = ["obama.png"] +messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Who is this?"}, + {"type": "image"} + ] + }, + { + "role": "assistant", + "content": [ + {"type": "text", "text": "Barack Obama"} + ] + }, + { + "role": "user", + "content": [ + {"type": "text", "text": "What is he famous for?"} + ] + }, + { + "role": "assistant", + "content": [ + {"type": "text", "text": "He is the 44th President of the United States."} + ] + } +] +``` + +To illustrate how this data format will be processed using the LLaVA model, you can use the following code: + +```python +from transformers import AutoProcessor + +processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf") +print(processor.apply_chat_template(messages, tokenize=False)) +``` + +The output will be formatted as follows: + +```txt +Who is this? ASSISTANT: Barack Obama USER: What is he famous for? ASSISTANT: He is the 44th President of the United States. +``` + + + + +### A custom collator for processing multi-modal data + +Unlike the default behavior of `SFTTrainer`, processing multi-modal data is done on the fly during the data collation process. To do this, you need to define a custom collator that processes both the text and images. This collator must take a list of examples as input (see the previous section for an example of the data format) and return a batch of processed data. Below is an example of such a collator: + +```python +def collate_fn(examples): + # Get the texts and images, and apply the chat template + texts = [processor.apply_chat_template(example["messages"], tokenize=False) for example in examples] + images = [example["images"][0] for example in examples] + + # Tokenize the texts and process the images + batch = processor(texts, images, return_tensors="pt", padding=True) + + # The labels are the input_ids, and we mask the padding tokens in the loss computation + labels = batch["input_ids"].clone() + labels[labels == processor.tokenizer.pad_token_id] = -100 + batch["labels"] = labels + + return batch +``` + +We can verify that the collator works as expected by running the following code: + +```python +from datasets import load_dataset + +dataset = load_dataset("HuggingFaceH4/llava-instruct-mix-vsft", split="train") +examples = [dataset[0], dataset[1]] # Just two examples for the sake of the example +collated_data = collate_fn(examples) +print(collated_data.keys()) # dict_keys(['input_ids', 'attention_mask', 'pixel_values', 'labels']) +``` + +### Training the vision-language model + +Now that we have prepared the data and defined the collator, we can proceed with training the model. To ensure that the data is not processed as text-only, we need to set a couple of arguments in the `SFTConfig`, specifically `remove_unused_columns` and `skip_prepare_dataset` to `True` to avoid the default processing of the dataset. Below is an example of how to set up the `SFTTrainer`. + +```python +training_args.remove_unused_columns = False +training_args.dataset_kwargs = {"skip_prepare_dataset": True} + +trainer = SFTTrainer( + model=model, + args=training_args, + data_collator=collate_fn, + train_dataset=train_dataset, + processing_class=processor.tokenizer, +) +``` + +A full example of training LLaVa 1.5 on the [HuggingFaceH4/llava-instruct-mix-vsft](https://huggingface.co/datasets/HuggingFaceH4/llava-instruct-mix-vsft) dataset can be found in the script [`examples/scripts/sft_vlm.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/sft_vlm.py). + +- [Experiment tracking](https://wandb.ai/huggingface/trl/runs/2b2c5l7s) +- [Trained model](https://huggingface.co/HuggingFaceH4/sft-llava-1.5-7b-hf) + +## SFTTrainer + +[[autodoc]] SFTTrainer + +## SFTConfig + +[[autodoc]] SFTConfig + +## Datasets + +In the SFTTrainer we smartly support `datasets.IterableDataset` in addition to other style datasets. This is useful if you are using large corpora that you do not want to save all to disk. The data will be tokenized and processed on the fly, even when packing is enabled. + +Additionally, in the SFTTrainer, we support pre-tokenized datasets if they are `datasets.Dataset` or `datasets.IterableDataset`. In other words, if such a dataset has a column of `input_ids`, no further processing (tokenization or packing) will be done, and the dataset will be used as-is. This can be useful if you have pretokenized your dataset outside of this script and want to re-use it directly. + +### ConstantLengthDataset + +[[autodoc]] trainer.ConstantLengthDataset diff --git a/testbed/huggingface__trl/docs/source/text_environments.md b/testbed/huggingface__trl/docs/source/text_environments.md new file mode 100644 index 0000000000000000000000000000000000000000..851020e0f5c73f05957072db00040e3dddd0aa49 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/text_environments.md @@ -0,0 +1,197 @@ +# Text Environments + +Text environments provide a learning ground for language agents. It allows a language model to use tools to accomplish a task such as using a Python interpreter to answer math questions or using a search index for trivia questions. Having access to tools allows language models to solve tasks that would be very hard for the models itself but can be trivial for the appropriate tools. A good example is arithmetics of large numbers that become a simple copy-paste task once you have access to a calculator. + +
+ +
+ +Let's dive into how text environments work and start with tools! + +## Tools + +One of the core building blocks of text environments are tools that the model can use to solve tasks. In general tools can be any Python function that takes a string as input and returns string. The `TextEnvironment` offers two options for tools: either go with predefined tools from `transformers.Tool` or define your own function or class with `__call__` method. Let's have a look at both! + +### `transformers.Tool` + +Text environments fully support tools of the class `transformers.Tool`. The advantage of building tools in that framework is that they can easily be shared + +```Python +from transformers import load_tool + +# simple calculator tool that runs +-/* operations +calc_tool = load_tool("ybelkada/simple-calculator") + +# python interpreter that executes program and returns outputs +py_tool = load_tool("lvwerra/python-interpreter") + +# wikipedia search index that returns best search match +wiki_tool = load_tool("vwxyzjn/pyserini-wikipedia-kilt-doc") +``` + +These tools are either loaded from the hub or from a local folder. Using the tool is as simple as calling them with a text query: + +```Python +calc_tool("1/2") +>>> "0.5" +``` + +Note that both input and return values are strings to enable easy usage with a language model. + +### Custom Tools + +The following is an example of a tool that adds two integers: + +```Python +def add(text): + int_1, int_2 = text.split("+") + result = int(int_1) + int(int_2) + return str(result) + +print(add("1+1")) +>>> "2" +``` + +We looked at basic examples such as a calculator but the principle holds for more complex tools as well such as a web search tool where you input the query and get the search results in return. Now let's look at how the model can use the tools with the call syntax. + +### Call syntax + +In order to have a unified way for the model to call a tool we created a simple syntax that looks as follows: + +```python +"QUERYTOOL_RESPONSE" +``` + +There are a few special tokens involved so let's decompose it: First the model can signal that it wants to use a tool by emitting the `` token. After that we want to know the name of the tool to call which is done by enclosing the tool name with `<>` brackets. Once we know which tool to call the tool query follows which is in free text form. The `` tokens signifies the end of the query and stops the model generation. At this point the model output is parsed and the query sent to the tool. The environment appends the tool response to the string followed by the `` token to show the end the tool output. + +Let's look at the concrete example of the calculator and assume its name is `Calculator` (more on how the name of a tool is inferred later): + +```python +"1/20.5" +``` + +Finally, the episode is ended and generation stops when the model generates `` which marks the interaction as completed. + +Now let's have a look how we can create a new text environment! + +## Create a `TextEnvironment` + + +```python +prompt = """\ +What is 13-3? +13-310.0 +Result=10 +""" + +def reward_fn(result, answer): + """Simplified reward function returning 1 if result matches answer and 0 otherwise.""" + result_parsed = result.split("=")[1].split("<")[0] + return int(result_parsed==answer) + +text_env = TextEnvironemnt( + model=model, + tokenizer=tokenizer, + tools= {"SimpleCalculatorTool": load_tool("ybelkada/simple-calculator")}, + reward_fn=exact_match_reward, + prompt=prompt, + max_turns=1 + max_tool_response=100 + generation_kwargs={"do_sample": "true"} +) +``` + +Let's decompose the settings: + +| Argument | Description | +|:-------------------|:----------------| +| `model` | Language model to interact with the environment and generate requests. | +| `tokenizer` | Tokenizer of language model handling tokenization of strings. | +| `tools` | `list` of `dict` of tools. If former the name of the tool is inferred from class name and otherwise it's the keys of the dictionary.| +| `reward_fn` | A function that takes a string as input and returns. Can have extra arguments that are passed to `.run()` such as ground truth.| +| `prompt` | Prompt to prepend to every task. Usually a few examples to demonstrate to the model how to use the tools in a few-shot fashion. | +| `max_turns` | Maximum number of interactions between model and tools before episode ends.| +| `max_tool_response`| The tool response is truncated to this number to avoid running out of model context.| +| `max_length` | The maximum number of tokens to allow in an episode. | +| `generation_kwargs`| Generation settings used by the language model. | + +You can customize the environment to your needs and add custom tools and settings. Let's see how you can use the environment to have the model interact with the available tools! + + +## Run an Episode + +To run a set of queries through the text environment one can simply use the `run` method. + +```python +queries = ["What is 1/2?"] +answers = ["0.5"] + +queries, responses, masks, rewards, histories = text_env.run(queries, answers=answers) +``` + +This will execute the model/tool feedback loop for each query until either no tool is called anymore, the maximum number of turns is reached or to maximum number of tokens in an episode is exceeded. The extra `kwargs` (e.g. `answers=answers` above) passed to `run` will be passed on to the reward function. + +There are five objects that are returned by `run`: + +- `queries`: a list of the tokenized queries +- `responses`: all tokens that have been generated withing the environment including model and tool tokens +- `masks`: mask that indicates which tokens have been generated by the model and which tokens are generated by the tool +- `rewards`: a list of reward for each query/response +- `histories`: list of `TextHistory` objects, which are useful objects containing all the above and also the text equivalents + +The masks are crucial for training as we don't want to optimize tokens that the model has not generated which are tokens produced by the tools. + +Next, we'll train a PPO step with the generated responses! + + +### Train +Training on episodes from the `TextEnvironment` is straight forward and simply requires forwarding all the returned variables except the `TextHistory` objects to the `step` method: + +```python +train_stats = ppo_trainer.step(queries, responses, rewards, masks) +``` + +## `TextHistory` + +The `TextHistory` object stores the interactions between the model and the text environment. It stores tokens and text generated in each turn and their source in each turn (model or system) as well as rewards. Let's go through the class attributes and methods. + +### Attributes + +The following table summarises the available attributes of the `TextEnvironment` class: + +| Attribute | Description | +|:-------------------|:----------------| +| `text` | The full string of the text generated in the text environment with both model and system generated text. | +| `text_spans` | A list of tuples with the spans for each model or system generated text segment. | +| `system_spans` | A list of boolean values indicating if the segment is model or system generated. | +| `tokens` | All tokens generated in text environment with both model and system generated tokens. | +| `token_spans` | Similar to `text_spans` the `token_spans` indicate the boundaries of model andsystem generated tokens. | +| `token_masks` | The token masks can be used to ignore system generated tokens by masking them. | +| `completed` | Indicates if the interaction with the environment has completed. | +| `truncated` | Indicates if the interaction with the environment has completed because max length was reached. | + +With these attributes you can reconstruct every interaction of the model with the `TextEnvironment`. The `TextHistory` also lets you visualize the text history. Let's have a look! + +### Visualization + +When the model interacts inside the `TextEnvironment` it can be useful to visualize and separate which parts of the text outputs were generated by the model and which parts come from the system and tools. For that purpose there are the two methods [`TextHistory.show_text`] and [`TextHistory.show_tokens`]. They print the text and tokens respectively and highlight the various segments using the [`rich` libray](https://github.com/Textualize/rich) (make sure to install it before using these methods). + +You can see that the prompt is highlighted in gray, whereas system segments such as query and tool responses are highlighted in green. All segments generated by the model are highlighted in blue and in addition to the pure text output the reward is displayed as additional text in plum. Here an example of `show_text`: + +
+ +
+ +Sometimes there can be tricky tokenization related issues that are hidden when showing the decoded text. Thus `TextHistory` also offers an option to display the same highlighting on the tokens directly with `show_tokens`: + +
+ +
+ +Note that you can turn on the colour legend by passing `show_legend=True`. + +## API Documentation + +[[autodoc]] TextEnvironment + +[[autodoc]] TextHistory diff --git a/testbed/huggingface__trl/docs/source/use_model.md b/testbed/huggingface__trl/docs/source/use_model.md new file mode 100644 index 0000000000000000000000000000000000000000..f5ab1e45946460fc80d64f54136482b12400d059 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/use_model.md @@ -0,0 +1,58 @@ +# Use model after training + +Once you have trained a model using either the SFTTrainer, PPOTrainer, or DPOTrainer, you will have a fine-tuned model that can be used for text generation. In this section, we'll walk through the process of loading the fine-tuned model and generating text. If you need to run an inference server with the trained model, you can explore libraries such as [`text-generation-inference`](https://github.com/huggingface/text-generation-inference). + +## Load and Generate + +If you have fine-tuned a model fully, meaning without the use of PEFT you can simply load it like any other language model in transformers. E.g. the value head that was trained during the PPO training is no longer needed and if you load the model with the original transformer class it will be ignored: + +```python +from transformers import AutoTokenizer, AutoModelForCausalLM + +model_name_or_path = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub +device = "cpu" # or "cuda" if you have a GPU + +model = AutoModelForCausalLM.from_pretrained(model_name_or_path).to(device) +tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + +inputs = tokenizer.encode("This movie was really", return_tensors="pt").to(device) +outputs = model.generate(inputs) +print(tokenizer.decode(outputs[0])) +``` + +Alternatively you can also use the pipeline: + +```python +from transformers import pipeline + +model_name_or_path = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub +pipe = pipeline("text-generation", model=model_name_or_path) +print(pipe("This movie was really")[0]["generated_text"]) +``` + +## Use Adapters PEFT + +```python +from peft import PeftConfig, PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer + +base_model_name = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub" +adapter_model_name = "path/to/my/adapter" + +model = AutoModelForCausalLM.from_pretrained(base_model_name) +model = PeftModel.from_pretrained(model, adapter_model_name) + +tokenizer = AutoTokenizer.from_pretrained(base_model_name) +``` + +You can also merge the adapters into the base model so you can use the model like a normal transformers model, however the checkpoint will be significantly bigger: + +```python +model = AutoModelForCausalLM.from_pretrained(base_model_name) +model = PeftModel.from_pretrained(model, adapter_model_name) + +model = model.merge_and_unload() +model.save_pretrained("merged_adapters") +``` + +Once you have the model loaded and either merged the adapters or keep them separately on top you can run generation as with a normal model outlined above. diff --git a/testbed/huggingface__trl/docs/source/using_llama_models.mdx b/testbed/huggingface__trl/docs/source/using_llama_models.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cf602d2030400b00fe91749a8e49438bbfb90c4c --- /dev/null +++ b/testbed/huggingface__trl/docs/source/using_llama_models.mdx @@ -0,0 +1,160 @@ +# Using LLaMA models with TRL + +We've begun rolling out examples to use Meta's LLaMA models in `trl` (see [Meta's LLaMA release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) for the original LLaMA model). + +## Efficient training strategies + +Even training the smallest LLaMA model requires an enormous amount of memory. Some quick math: in bf16, every parameter uses 2 bytes (in fp32 4 bytes) in addition to 8 bytes used, e.g., in the Adam optimizer (see the [performance docs](https://huggingface.co/docs/transformers/perf_train_gpu_one#optimizer) in Transformers for more info). So a 7B parameter model would use `(2+8)*7B=70GB` just to fit in memory and would likely need more when you compute intermediate values such as attention scores. So you couldn’t train the model even on a single 80GB A100 like that. You can use some tricks, like more efficient optimizers of half-precision training, to squeeze a bit more into memory, but you’ll run out sooner or later. + +Another option is to use Parameter-Efficient Fine-Tuning (PEFT) techniques, such as the [`peft`](https://github.com/huggingface/peft) library, which can perform low-rank adaptation (LoRA) on a model loaded in 8-bit. +For more on `peft` + `trl`, see the [docs](https://huggingface.co/docs/trl/sentiment_tuning_peft). + +Loading the model in 8bit reduces the memory footprint drastically since you only need one byte per parameter for the weights (e.g. 7B LlaMa is 7GB in memory). +Instead of training the original weights directly, LoRA adds small adapter layers on top of some specific layers (usually the attention layers); thus, the number of trainable parameters is drastically reduced. + +In this scenario, a rule of thumb is to allocate ~1.2-1.4GB per billion parameters (depending on the batch size and sequence length) to fit the entire fine-tuning setup. +This enables fine-tuning larger models (up to 50-60B scale models on a NVIDIA A100 80GB) at low cost. + +Now we can fit very large models into a single GPU, but the training might still be very slow. +The simplest strategy in this scenario is data parallelism: we replicate the same training setup into separate GPUs and pass different batches to each GPU. +With this, you can parallelize the forward/backward passes of the model and scale with the number of GPUs. + +![chapter10_ddp.png](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/blog/stackllama/chapter10_ddp.png) + +We use either the `transformers.Trainer` or `accelerate`, which both support data parallelism without any code changes, by simply passing arguments when calling the scripts with `torchrun` or `accelerate launch`. The following runs a training script with 8 GPUs on a single machine with `accelerate` and `torchrun`, respectively. + +```bash +accelerate launch --multi_gpu --num_machines 1 --num_processes 8 my_accelerate_script.py +torchrun --nnodes 1 --nproc_per_node 8 my_torch_script.py +``` + +## Supervised fine-tuning + +Before we start training reward models and tuning our model with RL, it helps if the model is already good in the domain we are interested in. +In our case, we want it to answer questions, while for other use cases, we might want it to follow instructions, in which case instruction tuning is a great idea. +The easiest way to achieve this is by continuing to train the language model with the language modeling objective on texts from the domain or task. +The [StackExchange dataset](https://huggingface.co/datasets/HuggingFaceH4/stack-exchange-preferences) is enormous (over 10 million instructions), so we can easily train the language model on a subset of it. + +There is nothing special about fine-tuning the model before doing RLHF - it’s just the causal language modeling objective from pretraining that we apply here. +To use the data efficiently, we use a technique called packing: instead of having one text per sample in the batch and then padding to either the longest text or the maximal context of the model, we concatenate a lot of texts with a EOS token in between and cut chunks of the context size to fill the batch without any padding. + +![chapter10_preprocessing-clm.png](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/blog/stackllama/chapter10_preprocessing-clm.png) + +With this approach the training is much more efficient as each token that is passed through the model is also trained in contrast to padding tokens which are usually masked from the loss. +If you don't have much data and are more concerned about occasionally cutting off some tokens that are overflowing the context you can also use a classical data loader. + +The packing is handled by the `ConstantLengthDataset` and we can then use the `Trainer` after loading the model with `peft`. First, we load the model in int8, prepare it for training, and then add the LoRA adapters. + +```python +# load model in 8bit +model = AutoModelForCausalLM.from_pretrained( + args.model_path, + load_in_8bit=True, + device_map={"": Accelerator().local_process_index} + ) +model = prepare_model_for_kbit_training(model) + +# add LoRA to model +lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", +) + +model = get_peft_model(model, config) +``` + +We train the model for a few thousand steps with the causal language modeling objective and save the model. +Since we will tune the model again with different objectives, we merge the adapter weights with the original model weights. + +**Disclaimer:** due to LLaMA's license, we release only the adapter weights for this and the model checkpoints in the following sections. +You can apply for access to the base model's weights by filling out Meta AI's [form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) and then converting them to the 🤗 Transformers format by running this [script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). +Note that you'll also need to install 🤗 Transformers from source until the `v4.28` is released. + +Now that we have fine-tuned the model for the task, we are ready to train a reward model. + +## Reward modeling and human preferences + +In principle, we could fine-tune the model using RLHF directly with the human annotations. +However, this would require us to send some samples to humans for rating after each optimization iteration. +This is expensive and slow due to the number of training samples needed for convergence and the inherent latency of human reading and annotator speed. + +A trick that works well instead of direct feedback is training a reward model on human annotations collected before the RL loop. +The goal of the reward model is to imitate how a human would rate a text. There are several possible strategies to build a reward model: the most straightforward way would be to predict the annotation (e.g. a rating score or a binary value for “good”/”bad”). +In practice, what works better is to predict the ranking of two examples, where the reward model is presented with two candidates `(y_k, y_j)` for a given prompt `x` and has to predict which one would be rated higher by a human annotator. + +With the StackExchange dataset, we can infer which of the two answers was preferred by the users based on the score. +With that information and the loss defined above, we can then modify the `transformers.Trainer` by adding a custom loss function. + +```python +class RewardTrainer(Trainer): + def compute_loss(self, model, inputs, return_outputs=False): + rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0] + rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0] + loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean() + if return_outputs: + return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k} + return loss +``` + +We utilize a subset of a 100,000 pair of candidates and evaluate on a held-out set of 50,000. With a modest training batch size of 4, we train the Llama model using the LoRA `peft` adapter for a single epoch using the Adam optimizer with BF16 precision. Our LoRA configuration is: + +```python +peft_config = LoraConfig( + task_type=TaskType.SEQ_CLS, + inference_mode=False, + r=8, + lora_alpha=32, + lora_dropout=0.1, +) +``` +As detailed in the next section, the resulting adapter can be merged into the frozen model and saved for further downstream use. + +## Reinforcement Learning from Human Feedback + +With the fine-tuned language model and the reward model at hand, we are now ready to run the RL loop. It follows roughly three steps: + +1. Generate responses from prompts, +2. Rate the responses with the reward model, +3. Run a reinforcement learning policy-optimization step with the ratings. + +The Query and Response prompts are templated as follows before being tokenized and passed to the model: + +```bash +Question: + +Answer: +``` + +The same template was used for SFT, RM and RLHF stages. +Once more, we utilize `peft` for memory-efficient training, which offers an extra advantage in the RLHF context. +Here, the reference model and policy share the same base, the SFT model, which we load in 8-bit and freeze during training. +We exclusively optimize the policy's LoRA weights using PPO while sharing the base model's weights. + +```python +for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): + question_tensors = batch["input_ids"] + + # sample from the policy and to generate responses + response_tensors = ppo_trainer.generate( + question_tensors, + return_prompt=False, + length_sampler=output_length_sampler, + **generation_kwargs, + ) + batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) + + # Compute sentiment score + texts = [q + r for q, r in zip(batch["query"], batch["response"])] + pipe_outputs = sentiment_pipe(texts, **sent_kwargs) + rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs] + + # Run PPO step + stats = ppo_trainer.step(question_tensors, response_tensors, rewards) + # Log stats to Wandb + ppo_trainer.log_stats(stats, batch, rewards) +``` + +For the rest of the details and evaluation, please refer to our [blog post on StackLLaMA](https://huggingface.co/blog/stackllama). diff --git a/testbed/huggingface__trl/docs/source/xpo_trainer.mdx b/testbed/huggingface__trl/docs/source/xpo_trainer.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7516b9218d402ddfc3fa8c8c865bf8d3f63f0c82 --- /dev/null +++ b/testbed/huggingface__trl/docs/source/xpo_trainer.mdx @@ -0,0 +1,162 @@ +# XPO Trainer + +[![](https://img.shields.io/badge/All_models-XPO-blue)](https://huggingface.co/models?other=xpo,trl) + +## Overview + +Exploratory Preference Optimization (XPO) was proposed in the paper [Exploratory Preference Optimization: Harnessing Implicit Q*-Approximation for Sample-Efficient RLHF](https://huggingface.co/papers/2405.21046) by Tengyang Xie, Dylan J. Foster, Akshay Krishnamurthy, [Corby Rosset](https://huggingface.co/corbyrosset), [Ahmed Awadallah](https://huggingface.co/AhmedAwadallah), and Alexander Rakhlin. It is a simple online preference tuning method based on the DPO loss together with a reward model (RM). XPO augments the DPO objective with an exploration bonus allowing the method to explore outside the support of the intitial model and human feedback data. + +The abstract from the paper is the following: + +> Reinforcement learning from human feedback (RLHF) has emerged as a central tool for language model alignment. We consider online exploration in RLHF, which exploits interactive access to human or AI feedback by deliberately encouraging the model to produce diverse, maximally informative responses. By allowing RLHF to confidently stray from the pre-trained model, online exploration offers the possibility of novel, potentially super-human capabilities, but its full potential as a paradigm for language model training has yet to be realized, owing to computational and statistical bottlenecks in directly adapting existing reinforcement learning techniques. We propose a new algorithm for online exploration in RLHF, Exploratory Preference Optimization (XPO), which is simple and practical -- a one-line change to (online) Direct Preference Optimization (DPO; Rafailov et al., 2023) -- yet enjoys the strongest known provable guarantees and promising empirical performance. XPO augments the DPO objective with a novel and principled exploration bonus, empowering the algorithm to explore outside the support of the initial model and human feedback data. In theory, we show that XPO is provably sample-efficient and converges to a near-optimal language model policy under natural exploration conditions, irrespective of whether the initial model has good coverage. Our analysis, which builds on the observation that DPO implicitly performs a form of Q*-approximation (or, Bellman error minimization), combines previously disparate techniques from language modeling and theoretical reinforcement learning in a serendipitous fashion through the perspective of KL-regularized Markov decision processes. Empirically, we find that XPO is more sample-efficient than non-exploratory DPO variants in a preliminary evaluation. + +This post-training method was contributed by [Kashif Rasul](https://huggingface.co/kashif), [Quentin Gallouédec](https://huggingface.co/qgallouedec) and [Lewis Tunstall](https://huggingface.co/lewtun). + +## Quick start + +This example demonstrates how to train a model using the XPO method. We use the [Qwen 0.5B model](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) as the base model and [`PairRMJudge`] as a judge. We use the prompts from the [UltraFeedback dataset](https://huggingface.co/datasets/openbmb/UltraFeedback). You can view the prompts in the dataset here: + + +Below is the script to train the model: + +```python +# train_xpo.py +from datasets import load_dataset +from trl import PairRMJudge, XPOConfig, XPOTrainer +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct") +tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") +judge = PairRMJudge() +train_dataset = load_dataset("trl-lib/ultrafeedback-prompt", split="train") + +training_args = XPOConfig(output_dir="Qwen2-0.5B-XPO", logging_steps=10) +trainer = XPOTrainer( + model=model, judge=judge, args=training_args, processing_class=tokenizer, train_dataset=train_dataset +) +trainer.train() +``` + +Execute the script using the following command: + +```bash +accelerate launch train_xpo.py +``` + +Distributed across 8 GPUs, the training takes approximately 1 hour. + +To see how the [trained model](https://huggingface.co/trl-lib/Qwen2-0.5B-XPO) performs, you can use the [TRL Chat CLI](clis#chat-interface). + +
$ trl chat --model_name_or_path trl-lib/Qwen2-0.5B-XPO
+<quentin_gallouedec>:
+What is the best programming language?
+
+<trl-lib/Qwen2-0.5B-XPO>:
+The best programming language depends on individual preferences and familiarity with coding concepts. Some popular languages include Python, Java, C++, and JavaScript. 
+
+ +## Expected dataset type + +XPO requires a [prompt-only dataset](dataset_formats#prompt-only). The [`XPOTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset format. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. + +## Usage tips + +### Use a reward model + +Instead of a judge, you can chose to use a reward model -- see [Reward Bench](https://huggingface.co/spaces/allenai/reward-bench) for a leaderboard of public models you can use. Below is a code example showing how to replace a judge with the [trl-lib/Qwen2-0.5B-Reward](https://huggingface.co/trl-lib/Qwen2-0.5B-Reward) model: + +```diff +- from trl import PairRMJudge ++ from transformers import AutoModelForSequenceClassification + +- judge = PairRMJudge() ++ reward_model = AutoModelForSequenceClassification.from_pretrained("trl-lib/Qwen2-0.5B-Reward", num_labels=1) + + trainer = XPOTrainer( + ... +- judge=judge, ++ reward_model=reward_model, + ) +``` + + + +Make sure that the SFT model and reward model use the _same_ chat template and the same tokenizer. Otherwise, you may find the model completions are scored incorrectly during training. + + + +### Encourage EOS token generation + +When using a reward model, we may want the model to generate completions within a given length. During training, the model will generate completions up to the maximum length specified in the `max_new_tokens` argument of [`XPOConfig`]. If you want to penalize the model for not generating an EOS token before reaching the maximum length, you can use the `missing_eos_penalty` argument of [`XPOConfig`]: + +```python +training_args = XPOConfig(..., max_new_tokens=128, missing_eos_penalty=1.0) +``` + +### Logging Completions + +To better understand your model’s behavior during training, you can log sample completions periodically using the [`LogCompletionsCallback`]. + +```python +trainer = XPOTrainer(..., eval_dataset=eval_dataset) +completions_callback = LogCompletionsCallback(trainer, num_prompts=8) +trainer.add_callback(completions_callback) +``` + +This callback logs the model's generated completions directly to Weights & Biases. + +![Logged Completions](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/wandb_completions.png) + +## Example script + +We provide an example script to train a model using the XPO method. The script is available in [`examples/scripts/xpo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/xpo.py) + +To test the XPO script with the [Qwen2.5 0.5B model](https://huggingface.co/trl-lib/Qwen/Qwen2.5-0.5B-Instruct) on the [UltraFeedback dataset](https://huggingface.co/datasets/openbmb/UltraFeedback), run the following command: + +```bash +python examples/scripts/xpo.py \ + --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct \ + --judge pair_rm \ + --dataset_name trl-lib/ultrafeedback-prompt \ + --learning_rate 5.0e-7 \ + --logging_steps 25 \ + --output_dir Qwen2.5-0.5B-XPO-PairRM \ + --warmup_ratio 0.1 \ + --push_to_hub +``` + +## Logged metrics + +The logged metrics are as follows: + +* `loss/xpo`: The mean xpo part of the full loss. +* `loss/dpo`: The mean dpo part of the full loss. +* `objective/kl`: The mean KL divergence between the model and reference data. +* `objective/entropy`: The mean entropy of the model and reference data. +* `objective/model_scores`: The mean scores (according to the reward model) of the model completions. +* `objective/ref_scores`: The mean scores (according to the reward model) of the reference completions. +* `objective/scores_margin`: The mean score margin (according to the external reward model) between the chosen and rejected completions. +* `rewards/chosen`: The mean reward (according to XPO's DPO implicit reward model) of the chosen completions. +* `rewards/rejected`: The mean reward (according to XPO's DPO implicit reward model) of the rejected completions. +* `rewards/accuracies`: The accuracies of the XPO's implicit reward model. +* `rewards/margins`: The mean reward margin (according to online DPO's implicit reward model) between the chosen and rejected completions. +* `logps/chosen`: The mean log probabilities of the chosen completions. +* `logps/rejected`: The mean log probabilities of the rejected completions. +* `val/model_contain_eos_token`: The amount of times the model's output contains the eos token. +* `val/ref_contain_eos_token`: The amount of times the reference's output contains the eos token. +* `alpha`: The weight of the XPO loss term. Typically fixed, but can be made dynamic by passing a list to [`XPOConfig`]. +* `beta`: The parameter that controls the weight of the loss term representing the deviation from the reference model. Typically fixed, but can be made dynamic by passing a list to [`XPOConfig`]. + + +## XPOTrainer + +[[autodoc]] XPOTrainer + +## XPOConfig + +[[autodoc]] XPOConfig diff --git a/testbed/huggingface__trl/examples/README.md b/testbed/huggingface__trl/examples/README.md new file mode 100644 index 0000000000000000000000000000000000000000..37999e41abc02461a09ed7e29e39cc0bec15e488 --- /dev/null +++ b/testbed/huggingface__trl/examples/README.md @@ -0,0 +1,3 @@ +# Examples + +Please check out https://huggingface.co/docs/trl/example_overview for documentation on our examples. \ No newline at end of file diff --git a/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero1.yaml b/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5b5f782fb30f9fcbcc8fc58262f09eaf2e10368 --- /dev/null +++ b/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero1.yaml @@ -0,0 +1,20 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + deepspeed_multinode_launcher: standard + gradient_accumulation_steps: 1 + zero3_init_flag: false + zero_stage: 1 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +machine_rank: 0 +main_training_function: main +mixed_precision: 'bf16' +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero2.yaml b/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..239b14ac3a9ae8de73122d1154bf0d71903dc15f --- /dev/null +++ b/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero2.yaml @@ -0,0 +1,21 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + deepspeed_multinode_launcher: standard + offload_optimizer_device: none + offload_param_device: none + zero3_init_flag: false + zero_stage: 2 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +machine_rank: 0 +main_training_function: main +mixed_precision: 'bf16' +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero3.yaml b/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b5a1201f8a2ee8706b63f0f80c664a1fc61a7d9d --- /dev/null +++ b/testbed/huggingface__trl/examples/accelerate_configs/deepspeed_zero3.yaml @@ -0,0 +1,22 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + deepspeed_multinode_launcher: standard + offload_optimizer_device: none + offload_param_device: none + zero3_init_flag: true + zero3_save_16bit_model: true + zero_stage: 3 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +machine_rank: 0 +main_training_function: main +mixed_precision: bf16 +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/testbed/huggingface__trl/examples/accelerate_configs/fsdp_qlora.yaml b/testbed/huggingface__trl/examples/accelerate_configs/fsdp_qlora.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93b35414705771a46ba59c58e7c80e14aad3521b --- /dev/null +++ b/testbed/huggingface__trl/examples/accelerate_configs/fsdp_qlora.yaml @@ -0,0 +1,25 @@ +compute_environment: LOCAL_MACHINE +debug: false +distributed_type: FSDP +downcast_bf16: 'no' +fsdp_config: + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP + fsdp_backward_prefetch: BACKWARD_PRE + fsdp_cpu_ram_efficient_loading: true + fsdp_forward_prefetch: false + fsdp_offload_params: true + fsdp_sharding_strategy: FULL_SHARD + fsdp_state_dict_type: SHARDED_STATE_DICT + fsdp_sync_module_states: true + fsdp_use_orig_params: false +machine_rank: 0 +main_training_function: main +mixed_precision: 'bf16' +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false \ No newline at end of file diff --git a/testbed/huggingface__trl/examples/accelerate_configs/multi_gpu.yaml b/testbed/huggingface__trl/examples/accelerate_configs/multi_gpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..15dad9be3ba44f7c934e1ecab98a93cb83cbc79a --- /dev/null +++ b/testbed/huggingface__trl/examples/accelerate_configs/multi_gpu.yaml @@ -0,0 +1,16 @@ +compute_environment: LOCAL_MACHINE +debug: false +distributed_type: MULTI_GPU +downcast_bf16: 'no' +gpu_ids: all +machine_rank: 0 +main_training_function: main +mixed_precision: 'bf16' +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false diff --git a/testbed/huggingface__trl/examples/cli_configs/example_config.yaml b/testbed/huggingface__trl/examples/cli_configs/example_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3a8de10c70d81ba7ef9efdf250564861cc9be93 --- /dev/null +++ b/testbed/huggingface__trl/examples/cli_configs/example_config.yaml @@ -0,0 +1,18 @@ +# This is an example configuration file of TRL CLI, you can use it for +# SFT like that: `trl sft --config config.yaml --output_dir test-sft` +# The YAML file supports environment variables by adding an `env` field +# as below + +# env: +# CUDA_VISIBLE_DEVICES: 0 + +model_name_or_path: + trl-internal-testing/tiny-random-LlamaForCausalLM +dataset_name: + stanfordnlp/imdb +report_to: + none +learning_rate: + 0.0001 +lr_scheduler_type: + cosine diff --git a/testbed/huggingface__trl/examples/datasets/lm-human-preferences-descriptiveness.py b/testbed/huggingface__trl/examples/datasets/lm-human-preferences-descriptiveness.py new file mode 100644 index 0000000000000000000000000000000000000000..2620b3101b190e80f46ca756bad5f0a704ac6339 --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/lm-human-preferences-descriptiveness.py @@ -0,0 +1,81 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional + +from datasets import load_dataset +from transformers import AutoTokenizer, HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/lm-human-preferences-descriptiveness"`): + Hugging Face repository ID to push the dataset to. + dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): + Number of workers to use for dataset processing. + """ + + push_to_hub: bool = False + repo_id: str = "trl-lib/lm-human-preferences-descriptiveness" + dataset_num_proc: Optional[int] = None + + +# Edge cases handling: remove the cases where all samples are the same +def samples_not_all_same(example): + return not all(example["sample0"] == example[f"sample{j}"] for j in range(1, 4)) + + +def to_prompt_completion(example, tokenizer): + prompt = tokenizer.decode(example["query"]).strip() + best_idx = example["best"] + chosen = tokenizer.decode(example[f"sample{best_idx}"]) + for rejected_idx in range(4): # take the first rejected sample that is different from the chosen one + rejected = tokenizer.decode(example[f"sample{rejected_idx}"]) + if chosen != rejected: + break + assert chosen != rejected + return {"prompt": prompt, "chosen": chosen, "rejected": rejected} + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + dataset = load_dataset( + "json", + data_files="https://openaipublic.blob.core.windows.net/lm-human-preferences/labels/descriptiveness/offline_5k.json", + split="train", + ) + + dataset = dataset.filter(samples_not_all_same, num_proc=script_args.dataset_num_proc) + + dataset = dataset.map( + to_prompt_completion, + num_proc=script_args.dataset_num_proc, + remove_columns=["query", "sample0", "sample1", "sample2", "sample3", "best"], + fn_kwargs={"tokenizer": AutoTokenizer.from_pretrained("gpt2")}, + ) + + # train_size taken from https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/launch.py#L79) + dataset = dataset.train_test_split(train_size=4992) + + if script_args.push_to_hub: + dataset.push_to_hub(script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/datasets/lm-human-preferences-sentiment.py b/testbed/huggingface__trl/examples/datasets/lm-human-preferences-sentiment.py new file mode 100644 index 0000000000000000000000000000000000000000..af0359ac384d84ab8c36fa843790b456fd61012a --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/lm-human-preferences-sentiment.py @@ -0,0 +1,74 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional + +from datasets import load_dataset +from transformers import AutoTokenizer, HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/lm-human-preferences-sentiment"`): + Hugging Face repository ID to push the dataset to. + dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): + Number of workers to use for dataset processing. + """ + + push_to_hub: bool = False + repo_id: str = "trl-lib/lm-human-preferences-sentiment" + dataset_num_proc: Optional[int] = None + + +def to_prompt_completion(example, tokenizer): + prompt = tokenizer.decode(example["query"]).strip() + best_idx = example["best"] + chosen = tokenizer.decode(example[f"sample{best_idx}"]) + for rejected_idx in range(4): # take the first rejected sample that is different from the chosen one + rejected = tokenizer.decode(example[f"sample{rejected_idx}"]) + if chosen != rejected: + break + assert chosen != rejected + return {"prompt": prompt, "chosen": chosen, "rejected": rejected} + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + dataset = load_dataset( + "json", + data_files="https://openaipublic.blob.core.windows.net/lm-human-preferences/labels/sentiment/offline_5k.json", + split="train", + ) + + dataset = dataset.map( + to_prompt_completion, + num_proc=script_args.dataset_num_proc, + remove_columns=["query", "sample0", "sample1", "sample2", "sample3", "best"], + fn_kwargs={"tokenizer": AutoTokenizer.from_pretrained("gpt2")}, + ) + + # train_size taken from https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/launch.py#L70) + dataset = dataset.train_test_split(train_size=4992) + + if script_args.push_to_hub: + dataset.push_to_hub(script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/datasets/prm800k.py b/testbed/huggingface__trl/examples/datasets/prm800k.py new file mode 100644 index 0000000000000000000000000000000000000000..244257912c1e79647cec12004fe0fd01db2b7ff2 --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/prm800k.py @@ -0,0 +1,118 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional + +from datasets import load_dataset +from transformers import HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/prm800k"`): + Hugging Face repository ID to push the dataset to. + dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): + Number of workers to use for dataset processing. + """ + + push_to_hub: bool = False + repo_id: str = "trl-lib/prm800k" + dataset_num_proc: Optional[int] = None + + +def process_example(example): + outputs = [] + prompt = example["question"]["problem"] + + # Iterate through each step + previous_completions = [] + previous_labels = [] + for step in example["label"]["steps"]: + if step["completions"] is None and step["human_completion"] is None and step["chosen_completion"] is None: + # happens sometimes + break + # Loop through completions + for completion_idx, completion in enumerate(step["completions"]): + # For every completion that are not chosen, we are in a terminal state, so we can add it to the list of outputs. + if completion_idx != step["chosen_completion"]: + content = completion["text"] + completions = previous_completions[:] + [content] + label = completion["rating"] == 1 + labels = previous_labels[:] + [label] + outputs.append({"prompt": prompt, "completions": completions, "labels": labels}) + + # Now, exapand the previous completions and labels + if step["chosen_completion"] is not None: + chosen_completion = step["completions"][step["chosen_completion"]] + label = chosen_completion["rating"] == 1 + elif step["human_completion"] is not None: + chosen_completion = step["human_completion"] + label = True + else: + break + content = chosen_completion["text"] + previous_completions.append(content) + previous_labels.append(label) + + # Last step: we are in a terminal state, so we can add it to the list of outputs + outputs.append({"prompt": prompt, "completions": previous_completions, "labels": previous_labels}) + return outputs + + +def process_batch(examples): + outputs = [] + batch_size = len(examples["label"]) + for idx in range(batch_size): + example = {k: v[idx] for k, v in examples.items()} + outputs.extend(process_example(example)) + # list of dict to dict of list + outputs = {k: [v[k] for v in outputs] for k in outputs[0]} + return outputs + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + data_files = { + "train": "https://github.com/openai/prm800k/raw/refs/heads/main/prm800k/data/phase1_train.jsonl", + "test": "https://github.com/openai/prm800k/raw/refs/heads/main/prm800k/data/phase1_test.jsonl", + } + dataset = load_dataset("json", data_files=data_files) + + dataset = dataset.map( + process_batch, + batched=True, + batch_size=10, + remove_columns=[ + "labeler", + "timestamp", + "generation", + "is_quality_control_question", + "is_initial_screening_question", + "question", + "label", + ], + num_proc=script_args.dataset_num_proc, + ) + + if script_args.push_to_hub: + dataset.push_to_hub(script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/datasets/rlaif-v.py b/testbed/huggingface__trl/examples/datasets/rlaif-v.py new file mode 100644 index 0000000000000000000000000000000000000000..ec2501d4c71b677a7bc22554d1c7f1cc6cca751c --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/rlaif-v.py @@ -0,0 +1,73 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional + +from datasets import features, load_dataset +from transformers import HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/rlaif-v"`): + Hugging Face repository ID to push the dataset to. + dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): + Number of workers to use for dataset processing. + """ + + push_to_hub: bool = False + repo_id: str = "trl-lib/rlaif-v" + dataset_num_proc: Optional[int] = None + + +def to_conversational(example): + """ + Convert prompt from "xxx" to [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "xxx"}]}] + and chosen and rejected from "xxx" to [{"role": "assistant", "content": [{"type": "text", "text": "xxx"}]}]. + Images are wrapped into a list. + """ + prompt = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": example["question"]}]}] + chosen = [{"role": "assistant", "content": [{"type": "text", "text": example["chosen"]}]}] + rejected = [{"role": "assistant", "content": [{"type": "text", "text": example["rejected"]}]}] + return {"prompt": prompt, "images": [example["image"]], "chosen": chosen, "rejected": rejected} + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + dataset = load_dataset("openbmb/RLAIF-V-Dataset", split="train") + dataset = dataset.map( + to_conversational, + num_proc=script_args.dataset_num_proc, + remove_columns=dataset.column_names, + writer_batch_size=128, + ) + + # Cast the images to Sequence[Image] to avoid bytes format + f = dataset.features + f["images"] = features.Sequence(features.Image(decode=True)) + dataset = dataset.cast(f) + + dataset = dataset.train_test_split(test_size=0.01, writer_batch_size=128) + + if script_args.push_to_hub: + dataset.push_to_hub(script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/datasets/tldr.py b/testbed/huggingface__trl/examples/datasets/tldr.py new file mode 100644 index 0000000000000000000000000000000000000000..e386095f8846353e920fb9629c95c952a530064d --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/tldr.py @@ -0,0 +1,67 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional + +from datasets import load_dataset +from transformers import HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/tldr"`): + Hugging Face repository ID to push the dataset to. + dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): + Number of workers to use for dataset processing. + """ + + push_to_hub: bool = False + repo_id: str = "trl-lib/tldr" + dataset_num_proc: Optional[int] = None + + +def to_prompt_completion(example): + tldr_format_str = "SUBREDDIT: r/{subreddit}\n\nTITLE: {title}\n\nPOST: {post}\n\nTL;DR:" + prompt = tldr_format_str.format(subreddit=example["subreddit"], title=example["title"], post=example["post"]) + completion = " " + example["summary"] # Add a space to separate the prompt from the completion + return {"prompt": prompt, "completion": completion} + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + # Filtered reddit TL;DR dataset from https://github.com/openai/summarize-from-feedback?tab=readme-ov-file#reddit-tldr-dataset + data_files = { + "train": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/train.jsonl", + "validation": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/valid.jsonl", + "test": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/test.jsonl", + } + dataset = load_dataset("json", data_files=data_files) + + dataset = dataset.map( + to_prompt_completion, + num_proc=script_args.dataset_num_proc, + remove_columns=["id", "subreddit", "title", "post", "summary"], + ) + + if script_args.push_to_hub: + dataset.push_to_hub(script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/datasets/tldr_preference.py b/testbed/huggingface__trl/examples/datasets/tldr_preference.py new file mode 100644 index 0000000000000000000000000000000000000000..0ac6af66463ecfc68662a8db505e48d8245d44a0 --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/tldr_preference.py @@ -0,0 +1,72 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional + +from datasets import load_dataset +from transformers import HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/tldr-preference"`): + Hugging Face repository ID to push the dataset to. + dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): + Number of workers to use for dataset processing. + """ + + push_to_hub: bool = False + repo_id: str = "trl-lib/tldr-preference" + dataset_num_proc: Optional[int] = None + + +def to_preference(example): + info = example["info"] + if example["batch"] in ["batch0_cnndm", "cnndm0", "cnndm2"]: # CNN Daily Mail batches + article = info["article"].replace("\n\n", "\n") + prompt = f"TITLE: {info['title']}\n\n{article}\n\nTL;DR:" + elif example["batch"] in [f"batch{i}" for i in range(3, 23)] + ["edit_b2_eval_test"]: # Reddit batches + post = info["post"].replace("\n\n", "\n") + prompt = f"SUBREDDIT: r/{info['subreddit']}\n\nTITLE: {info['title']}\n\nPOST: {post}\n\nTL;DR:" + else: + raise ValueError(f"Unknown batch: {example['batch']}") + + chosen_idx = example["choice"] + rejected_idx = 1 - chosen_idx + chosen = example["summaries"][chosen_idx]["text"] + rejected = example["summaries"][rejected_idx]["text"] + return {"prompt": prompt, "chosen": chosen, "rejected": rejected} + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + dataset = load_dataset("openai/summarize_from_feedback", "comparisons") + + dataset = dataset.map( + to_preference, + num_proc=script_args.dataset_num_proc, + remove_columns=["info", "summaries", "choice", "worker", "batch", "split", "extra"], + ) + + if script_args.push_to_hub: + dataset.push_to_hub(script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/datasets/tokenize_ds.py b/testbed/huggingface__trl/examples/datasets/tokenize_ds.py new file mode 100644 index 0000000000000000000000000000000000000000..ae52e0b22c8b984dda610cf35e69311d602c9d23 --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/tokenize_ds.py @@ -0,0 +1,54 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import Optional + +from datasets import load_dataset +from transformers import AutoTokenizer, HfArgumentParser + +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +""" +python -i examples/datasets/tokenize_ds.py --model HuggingFaceH4/zephyr-7b-beta +python -i examples/datasets/tokenize_ds.py --model gpt2 +""" + + +@dataclass +class ScriptArguments: + dataset_name: str = field( + default="trl-internal-testing/hh-rlhf-helpful-base-trl-style", metadata={"help": "The dataset to load"} + ) + model: str = field(default="gpt2", metadata={"help": "The model to use for tokenization"}) + dataset_num_proc: Optional[int] = field( + default=None, metadata={"help": "The number of workers to use to tokenize the data"} + ) + + +if __name__ == "__main__": + script_args = HfArgumentParser(ScriptArguments).parse_args_into_dataclasses()[0] + dataset = load_dataset(script_args.dataset_name) + tokenizer = AutoTokenizer.from_pretrained(script_args.model) + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + + def process(row): + row["chosen"] = tokenizer.apply_chat_template(row["chosen"], tokenize=False) + row["rejected"] = tokenizer.apply_chat_template(row["rejected"], tokenize=False) + return row + + dataset = dataset.map(process, num_proc=script_args.dataset_num_proc) + print(dataset["train"][0]["chosen"]) diff --git a/testbed/huggingface__trl/examples/datasets/ultrafeedback-prompt.py b/testbed/huggingface__trl/examples/datasets/ultrafeedback-prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..9753aa620aca441bc9bb0a7385f636b34ea418cb --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/ultrafeedback-prompt.py @@ -0,0 +1,68 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional + +from datasets import load_dataset +from transformers import HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/ultrafeedback-prompt"`): + Hugging Face repository ID to push the dataset to. + dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): + Number of workers to use for dataset processing. + """ + + push_to_hub: bool = False + repo_id: str = "trl-lib/ultrafeedback-prompt" + dataset_num_proc: Optional[int] = None + + +def to_unpaired_preference(example): + prompt = [{"role": "user", "content": example["instruction"]}] + return {"prompt": prompt} + + +def drop_long_prompt(example): + if len(example["prompt"][0]["content"]) > 512: + return False + else: + return True + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + dataset = load_dataset("openbmb/UltraFeedback", split="train") + + dataset = dataset.map( + to_unpaired_preference, + remove_columns=["source", "instruction", "models", "completions", "correct_answers", "incorrect_answers"], + num_proc=script_args.dataset_num_proc, + ) + dataset = dataset.filter(drop_long_prompt) + dataset = dataset.train_test_split(test_size=0.05, seed=42) + + if script_args.push_to_hub: + dataset.push_to_hub(script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/datasets/ultrafeedback.py b/testbed/huggingface__trl/examples/datasets/ultrafeedback.py new file mode 100644 index 0000000000000000000000000000000000000000..5ca687760a33cd564826cb4cb673263f6351627e --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/ultrafeedback.py @@ -0,0 +1,102 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional + +from datasets import load_dataset +from transformers import HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + model_name (`str`, *optional*, defaults to `"gpt-3.5-turbo"`): + Language model to target. Possible values are: + + - `"alpaca-7b"` + - `"bard"` + - `"falcon-40b-instruct"` + - `"gpt-3.5-turbo"` (default) + - `"gpt-4"` + - `"llama-2-13b-chat"` + - `"llama-2-70b-chat"` + - `"llama-2-7b-chat"` + - `"mpt-30b-chat"` + - `"pythia-12b"` + - `"starchat"` + - `"ultralm-13b"` + - `"ultralm-65b"` + - `"vicuna-33b"` + - `"wizardlm-13b"` + - `"wizardlm-70b"` + - `"wizardlm-7b"` + + aspect (`str`, *optional*, defaults to `"helpfulness"`): + Aspect to target. Possible values are: + + - `"helpfulness"` (default) + - `"honesty"` + - `"instruction-following"` + - `"truthfulness"` + + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/ultrafeedback-gpt-3.5-turbo-helpfulness"`): + Hugging Face repository ID to push the dataset to. + dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): + Number of workers to use for dataset processing. + """ + + model_name: str = "gpt-3.5-turbo" + aspect: str = "helpfulness" + push_to_hub: bool = False + repo_id: str = "trl-lib/ultrafeedback-gpt-3.5-turbo-helpfulness" + dataset_num_proc: Optional[int] = None + + +def to_unpaired_preference(example, model_name, aspect): + prompt = [{"role": "user", "content": example["instruction"]}] + model_index = example["models"].index(model_name) + response_content = example["completions"][model_index]["response"] + completion = [{"role": "assistant", "content": response_content}] + score = int(example["completions"][model_index]["annotations"][aspect]["Rating"]) + label = score >= 5 + return {"prompt": prompt, "completion": completion, "label": label} + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + dataset = load_dataset("openbmb/UltraFeedback", split="train") + + dataset = dataset.filter( + lambda example: script_args.model_name in example["models"], + batched=False, + num_proc=script_args.dataset_num_proc, + ) + dataset = dataset.map( + to_unpaired_preference, + remove_columns=["source", "instruction", "models", "completions", "correct_answers", "incorrect_answers"], + fn_kwargs={"model_name": script_args.model_name, "aspect": script_args.aspect}, + num_proc=script_args.dataset_num_proc, + ) + dataset = dataset.train_test_split(test_size=0.05, seed=42) + + if script_args.push_to_hub: + dataset.push_to_hub(script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/datasets/zen.py b/testbed/huggingface__trl/examples/datasets/zen.py new file mode 100644 index 0000000000000000000000000000000000000000..7aa9b64ea7db729d6a78c0cebb66fb08d6d86ed8 --- /dev/null +++ b/testbed/huggingface__trl/examples/datasets/zen.py @@ -0,0 +1,651 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from datasets import Dataset +from transformers import HfArgumentParser + + +@dataclass +class ScriptArguments: + r""" + Arguments for the script. + + Args: + test_size (`float`, *optional*, defaults to `0.1`): + Fraction of the dataset to include in the test split. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether to push the dataset to the Hugging Face Hub. + repo_id (`str`, *optional*, defaults to `"trl-lib/zen"`): + Hugging Face repository ID to push the dataset to. + """ + + test_size: float = 0.1 + push_to_hub: bool = False + repo_id: str = "trl-lib/zen" + + +def main(test_size, push_to_hub, repo_id): + # fmt: off + standard_language_modeling_dataset = Dataset.from_dict({ + "text": [ + "Beautiful is better than ugly.", + "Explicit is better than implicit.", + "Simple is better than complex.", + "Complex is better than complicated.", + "Flat is better than nested.", + "Sparse is better than dense.", + "Readability counts.", + "Special cases aren't special enough to break the rules.", + "Although practicality beats purity.", + "Errors should never pass silently.", + "Unless explicitly silenced.", + "In the face of ambiguity, refuse the temptation to guess.", + "There should be one-- and preferably only one --obvious way to do it.", + "Although that way may not be obvious at first unless you're Dutch.", + "Now is better than never.", + "Although never is often better than *right* now.", + "If the implementation is hard to explain, it's a bad idea.", + "If the implementation is easy to explain, it may be a good idea.", + "Namespaces are one honking great idea -- let's do more of those!", + ], + }) + standard_language_modeling_dataset = standard_language_modeling_dataset.train_test_split(test_size=test_size) + if push_to_hub: + standard_language_modeling_dataset.push_to_hub(repo_id, config_name="standard_language_modeling") + + standard_prompt_only_dataset = Dataset.from_dict({ + "prompt": [ + "Beautiful is better than", + "Explicit is", + "Simple is better", + "Complex", + "Flat is better than", + "Sparse is better", + "Readability", + "Special cases aren't special", + "Although practicality beats", + "Errors should never", + "Unless explicitly", + "In the face of ambiguity, refuse", + "There should be one-- and preferably", + "Although that way may not be obvious at first unless you're", + "Now is", + "Although never is often", + "If the implementation is hard to explain,", + "If the implementation is easy", + "Namespaces are one honking great", + ], + }) + standard_prompt_only_dataset = standard_prompt_only_dataset.train_test_split(test_size=test_size) + if push_to_hub: + standard_prompt_only_dataset.push_to_hub(repo_id, config_name="standard_prompt_only") + + standard_prompt_completion_dataset = Dataset.from_dict({ + "prompt": [ + "Beautiful is better than", + "Explicit is", + "Simple is better", + "Complex", + "Flat is better than", + "Sparse is better", + "Readability", + "Special cases aren't special", + "Although practicality beats", + "Errors should never", + "Unless explicitly", + "In the face of ambiguity, refuse", + "There should be one-- and preferably", + "Although that way may not be obvious at first unless you're", + "Now is", + "Although never is often", + "If the implementation is hard to explain,", + "If the implementation is easy", + "Namespaces are one honking great", + ], + "completion": [ + " ugly.", + " better than implicit.", + " than complex.", + " is better than complicated.", + " nested.", + " than dense.", + " counts.", + " enough to break the rules.", + " purity.", + " pass silently.", + " silenced.", + " the temptation to guess.", + " only one --obvious way to do it.", + " Dutch.", + " better than never.", + " better than *right* now.", + " it's a bad idea.", + " to explain, it may be a good idea.", + " idea -- let's do more of those!", + ], + }) + standard_prompt_completion_dataset = standard_prompt_completion_dataset.train_test_split(test_size=test_size) + if push_to_hub: + standard_prompt_completion_dataset.push_to_hub(repo_id, config_name="standard_prompt_completion") + + standard_preference_dataset = Dataset.from_dict({ + "prompt": [ + "Beautiful is better than", + "Explicit is", + "Simple is better", + "Complex", + "Flat is better than", + "Sparse is better", + "Readability", + "Special cases aren't special", + "Although practicality beats", + "Errors should never", + "Unless explicitly", + "In the face of ambiguity, refuse", + "There should be one-- and preferably", + "Although that way may not be obvious at first unless you're", + "Now is", + "Although never is often", + "If the implementation is hard to explain,", + "If the implementation is easy", + "Namespaces are one honking great", + ], + "chosen": [ + " ugly.", + " better than implicit.", + " than complex.", + " is better than complicated.", + " nested.", + " than dense.", + " counts.", + " enough to break the rules.", + " purity.", + " pass silently.", + " silenced.", + " the temptation to guess.", + " only one --obvious way to do it.", + " Dutch.", + " better than never.", + " better than *right* now.", + " it's a bad idea.", + " to explain, it may be a good idea.", + " idea -- let's do more of those!", + ], + "rejected": [ + " the moon.", + " worse than nothing.", + " than a long vacation.", + " is always the answer.", + " chocolate.", + " without any context.", + " is optional.", + " enough to become unicorns.", + " reality.", + " pass their driving test.", + " forgotten.", + " the opportunity to laugh.", + " two or more confusing methods.", + " a time traveler.", + " never better.", + " not even a possibility.", + " it's clearly the best choice.", + " it's probably magic.", + " watermelon -- let's plant some!", + ], + }) + standard_preference_dataset = standard_preference_dataset.train_test_split(test_size=test_size) + if push_to_hub: + standard_preference_dataset.push_to_hub(repo_id, config_name="standard_preference") + + standard_implicit_prompt_preference_dataset = Dataset.from_dict({ + "chosen": [ + "Beautiful is better than ugly.", + "Explicit is better than implicit.", + "Simple is better than complex.", + "Complex is better than complicated.", + "Flat is better than nested.", + "Sparse is better than dense.", + "Readability counts.", + "Special cases aren't special enough to break the rules.", + "Although practicality beats purity.", + "Errors should never pass silently.", + "Unless explicitly silenced.", + "In the face of ambiguity, refuse the temptation to guess.", + "There should be one-- and preferably only one --obvious way to do it.", + "Although that way may not be obvious at first unless you're Dutch.", + "Now is better than never.", + "Although never is often better than *right* now.", + "If the implementation is hard to explain, it's a bad idea.", + "If the implementation is easy to explain, it may be a good idea.", + "Namespaces are one honking great idea -- let's do more of those!", + ], + "rejected": [ + "Beautiful is better than the moon.", + "Explicit is worse than nothing.", + "Simple is better than a long vacation.", + "Complex is always the answer.", + "Flat is better than chocolate.", + "Sparse is better without any context.", + "Readability is optional.", + "Special cases aren't special enough to become unicorns.", + "Although practicality beats reality.", + "Errors should never pass their driving test.", + "Unless explicitly forgotten.", + "In the face of ambiguity, refuse the opportunity to laugh.", + "There should be one-- and preferably two or more confusing methods.", + "Although that way may not be obvious at first unless you're a time traveler.", + "Now is never better.", + "Although never is often not even a possibility.", + "If the implementation is hard to explain, it's clearly the best choice.", + "If the implementation is easy it's probably magic.", + "Namespaces are one honking great watermelon -- let's plant some!", + ], + }) + standard_implicit_prompt_preference_dataset = standard_implicit_prompt_preference_dataset.train_test_split(test_size=test_size) + if push_to_hub: + standard_implicit_prompt_preference_dataset.push_to_hub(repo_id, config_name="standard_implicit_prompt_preference") + + standard_unpaired_preference_dataset = Dataset.from_dict({ + "prompt": [ + "Beautiful is better than", + "Explicit is", + "Simple is better", + "Complex", + "Flat is better than", + "Sparse is better", + "Readability", + "Special cases aren't special", + "Although practicality beats", + "Errors should never", + "Unless explicitly", + "In the face of ambiguity, refuse", + "There should be one-- and preferably", + "Although that way may not be obvious at first unless you're", + "Now is", + "Although never is often", + "If the implementation is hard to explain,", + "If the implementation is easy", + "Namespaces are one honking great", + ], + "completion": [ + " ugly.", + " worse than nothing.", + " than a long vacation.", + " is better than complicated.", + " nested.", + " without any context.", + " counts.", + " enough to become unicorns.", + " purity.", + " pass silently.", + " forgotten.", + " the temptation to guess.", + " only one --obvious way to do it.", + " a time traveler.", + " better than never.", + " not even a possibility.", + " it's a bad idea.", + " it's probably magic.", + " watermelon -- let's plant some!", + ], + "label": [True, False, False, True, True, False, True, False, True, True, False, True, True, False, True, False, True, False, False], + }) + standard_unpaired_preference_dataset = standard_unpaired_preference_dataset.train_test_split(test_size=test_size) + if push_to_hub: + standard_unpaired_preference_dataset.push_to_hub(repo_id, config_name="standard_unpaired_preference") + + standard_step_dataset = Dataset.from_dict({ + "prompt": [ + "Beautiful is better than", + "Explicit is better than", + "Simple is better than", + "Complex is better than", + "Flat is better than", + "Sparse is better than", + "Readability counts", + "Special cases aren't special enough", + "Although practicality beats", + "Errors should never pass", + "In the face of ambiguity, refuse", + "There should be one-- and preferably only one --", + "Although that way may not be", + "Now is better than", + "Never is often better than", + "If the implementation is hard to explain, it's", + "If the implementation is easy to explain, it", + "Namespaces are one", + "Although practicality sometimes beats purity,", + ], + "completions":[ + [", let me think...", " ugly."], + [", of course,", " implicit.", " because clarity matters."], + ["... let's keep it basic,", " complex."], + [" when needed,", " complicated."], + [" in terms of structure,", " nested."], + ["... especially for readability."], + [" especially when others read it."], + [", unless...", " they follow the rules."], + [" some theoretical elegance,", " purity."], + [" silently,", " unless explicitly silenced."], + [" the temptation to guess."], + [" way to do it,"," but sometimes it's not obvious.", " especially when there's more than one possibility."], + [" clear at first,", " it will eventually emerge."], + [" later."], + [" problematic fixes."], + [" likely because it's too complicated."], + [" might be a good design."], + [" of those great ideas,", " that solve many problems."], + [" the code should still aim for balance."], + ], + "label": [ + [False, True], + [False, True, False], + [False, True], + [True, True], + [True, False], + [True], + [False], + [True, False], + [False, False], + [False, False], + [True], + [True, True, False], + [True, True], + [False], + [True], [False], + [False], + [True, True], + [False] + ] + }) + standard_step_dataset = standard_step_dataset.train_test_split(test_size=test_size) + if push_to_hub: + standard_step_dataset.push_to_hub(repo_id, config_name="standard_step") + + conversational_language_modeling_dataset = Dataset.from_dict({ + "messages": [ + [{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Beautiful."},], + [{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explicit."}], + [{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Simple."}], + [{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Complex."}], + [{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Flat."}], + [{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Sparse."}], + [{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Readability."}], + [{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}], + [{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Practicality."}], + [{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Errors."}], + [{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "When explicitly silenced."}], + [{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Refuse the temptation to guess."}], + [{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "One, and preferably only one."}], + [{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "Dutch."}], + [{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Now is better than never."}], + [{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "Yes, often."}], + [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}], + [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it may be a good idea."}], + [{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Namespaces are one honking great idea."}], + ], + }) + conversational_language_modeling_dataset = conversational_language_modeling_dataset.train_test_split(test_size=test_size) + if push_to_hub: + conversational_language_modeling_dataset.push_to_hub(repo_id, config_name="conversational_language_modeling") + + conversational_prompt_only_dataset = Dataset.from_dict({ + "prompt": [ + [{"role": "user", "content": "What is better than ugly?"}], + [{"role": "user", "content": "What is better than implicit?"}], + [{"role": "user", "content": "What is better than complex?"}], + [{"role": "user", "content": "What is better than complicated?"}], + [{"role": "user", "content": "What is better than nested?"}], + [{"role": "user", "content": "What is better than dense?"}], + [{"role": "user", "content": "What counts?"}], + [{"role": "user", "content": "Are special cases enough to break the rules?"}], + [{"role": "user", "content": "What beats purity?"}], + [{"role": "user", "content": "What should never pass silently?"}], + [{"role": "user", "content": "When can errors pass silently?"}], + [{"role": "user", "content": "What should you do in the face of ambiguity?"}], + [{"role": "user", "content": "How many ways should there be to do it?"}], + [{"role": "user", "content": "For whom may the way not be obvious at first?"}], + [{"role": "user", "content": "What is better than never?"}], + [{"role": "user", "content": "Is never better than *right* now?"}], + [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}], + [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}], + [{"role": "user", "content": "Any great ideas?"}], + ], + }) + conversational_prompt_only_dataset = conversational_prompt_only_dataset.train_test_split(test_size=test_size) + if push_to_hub: + conversational_prompt_only_dataset.push_to_hub(repo_id, config_name="conversational_prompt_only") + + conversational_prompt_completion_dataset = Dataset.from_dict({ + "prompt": [ + [{"role": "user", "content": "What is better than ugly?"}], + [{"role": "user", "content": "What is better than implicit?"}], + [{"role": "user", "content": "What is better than complex?"}], + [{"role": "user", "content": "What is better than complicated?"}], + [{"role": "user", "content": "What is better than nested?"}], + [{"role": "user", "content": "What is better than dense?"}], + [{"role": "user", "content": "What counts?"}], + [{"role": "user", "content": "Are special cases enough to break the rules?"}], + [{"role": "user", "content": "What beats purity?"}], + [{"role": "user", "content": "What should never pass silently?"}], + [{"role": "user", "content": "When can errors pass silently?"}], + [{"role": "user", "content": "What should you do in the face of ambiguity?"}], + [{"role": "user", "content": "How many ways should there be to do it?"}], + [{"role": "user", "content": "For whom may the way not be obvious at first?"}], + [{"role": "user", "content": "What is better than never?"}], + [{"role": "user", "content": "Is never better than *right* now?"}], + [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}], + [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}], + [{"role": "user", "content": "Any great ideas?"}], + ], + "completion": [ + [{"role": "assistant", "content": "Beautiful."}], + [{"role": "assistant", "content": "Explicit."}], + [{"role": "assistant", "content": "Simple."}], + [{"role": "assistant", "content": "Complex."}], + [{"role": "assistant", "content": "Flat."}], + [{"role": "assistant", "content": "Sparse."}], + [{"role": "assistant", "content": "Readability."}], + [{"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}], + [{"role": "assistant", "content": "Practicality."}], + [{"role": "assistant", "content": "Errors."}], + [{"role": "assistant", "content": "When explicitly silenced."}], + [{"role": "assistant", "content": "Refuse the temptation to guess."}], + [{"role": "assistant", "content": "One, and preferably only one."}], + [{"role": "assistant", "content": "Dutch."}], + [{"role": "assistant", "content": "Now is better than never."}], + [{"role": "assistant", "content": "Yes, often."}], + [{"role": "assistant", "content": "It means it's a bad idea."}], + [{"role": "assistant", "content": "It means it may be a good idea."}], + [{"role": "assistant", "content": "Namespaces are one honking great idea."}], + ], + }) + conversational_prompt_completion_dataset = conversational_prompt_completion_dataset.train_test_split(test_size=test_size) + if push_to_hub: + conversational_prompt_completion_dataset.push_to_hub(repo_id, config_name="conversational_prompt_completion") + + conversational_preference_dataset = Dataset.from_dict({ + "prompt": [ + [{"role": "user", "content": "What is better than ugly?"}], + [{"role": "user", "content": "What is better than implicit?"}], + [{"role": "user", "content": "What is better than complex?"}], + [{"role": "user", "content": "What is better than complicated?"}], + [{"role": "user", "content": "What is better than nested?"}], + [{"role": "user", "content": "What is better than dense?"}], + [{"role": "user", "content": "What counts?"}], + [{"role": "user", "content": "Are special cases enough to break the rules?"}], + [{"role": "user", "content": "What beats purity?"}], + [{"role": "user", "content": "What should never pass silently?"}], + [{"role": "user", "content": "When can errors pass silently?"}], + [{"role": "user", "content": "What should you do in the face of ambiguity?"}], + [{"role": "user", "content": "How many ways should there be to do it?"}], + [{"role": "user", "content": "For whom may the way not be obvious at first?"}], + [{"role": "user", "content": "What is better than never?"}], + [{"role": "user", "content": "Is never better than *right* now?"}], + [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}], + [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}], + [{"role": "user", "content": "Any great ideas?"}], + ], + "chosen": [ + [{"role": "assistant", "content": "Beautiful."}], + [{"role": "assistant", "content": "Explicit."}], + [{"role": "assistant", "content": "Simple."}], + [{"role": "assistant", "content": "Complex."}], + [{"role": "assistant", "content": "Flat."}], + [{"role": "assistant", "content": "Sparse."}], + [{"role": "assistant", "content": "Readability."}], + [{"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}], + [{"role": "assistant", "content": "Practicality."}], + [{"role": "assistant", "content": "Errors."}], + [{"role": "assistant", "content": "When explicitly silenced."}], + [{"role": "assistant", "content": "Refuse the temptation to guess."}], + [{"role": "assistant", "content": "One, and preferably only one."}], + [{"role": "assistant", "content": "Dutch."}], + [{"role": "assistant", "content": "Now is better than never."}], + [{"role": "assistant", "content": "Yes, often."}], + [{"role": "assistant", "content": "It means it's a bad idea."}], + [{"role": "assistant", "content": "It means it may be a good idea."}], + [{"role": "assistant", "content": "Namespaces are one honking great idea."}], + ], + "rejected": [ + [{"role": "assistant", "content": "Acceptable."}], + [{"role": "assistant", "content": "Explained."}], + [{"role": "assistant", "content": "Very complex."}], + [{"role": "assistant", "content": "Very complicated."}], + [{"role": "assistant", "content": "Circular."}], + [{"role": "assistant", "content": "Heavy."}], + [{"role": "assistant", "content": "Looking complicated."}], + [{"role": "assistant", "content": "Yes, special cases are special enough to break the rules."}], + [{"role": "assistant", "content": "Nothing."}], + [{"role": "assistant", "content": "Warnings."}], + [{"role": "assistant", "content": "Never."}], + [{"role": "assistant", "content": "Give up."}], + [{"role": "assistant", "content": "As many as possible."}], + [{"role": "assistant", "content": "French."}], + [{"role": "assistant", "content": "Some day."}], + [{"role": "assistant", "content": "No, never."}], + [{"role": "assistant", "content": "It means it's a good idea."}], + [{"role": "assistant", "content": "It means it's a bad idea."}], + [{"role": "assistant", "content": "Recursion."}], + ], + }) + conversational_preference_dataset = conversational_preference_dataset.train_test_split(test_size=test_size) + if push_to_hub: + conversational_preference_dataset.push_to_hub(repo_id, config_name="conversational_preference") + + conversational_implicit_prompt_preference_dataset = Dataset.from_dict({ + "chosen": [ + [{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Beautiful."}], + [{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explicit."}], + [{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Simple."}], + [{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Complex."}], + [{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Flat."}], + [{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Sparse."}], + [{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Readability."}], + [{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}], + [{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Practicality."}], + [{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Errors."}], + [{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "When explicitly silenced."}], + [{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Refuse the temptation to guess."}], + [{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "One, and preferably only one."}], + [{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "Dutch."}], + [{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Now is better than never."}], + [{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "Yes, often."}], + [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}], + [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it may be a good idea."}], + [{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Namespaces are one honking great idea."}], + ], + "rejected": [ + [{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Acceptable."}], + [{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explained."}], + [{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Very complex."}], + [{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Very complicated."}], + [{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Circular."}], + [{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Heavy."}], + [{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Looking complicated."}], + [{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "Yes, special cases are special enough to break the rules."}], + [{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Nothing."}], + [{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Warnings."}], + [{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "Never."}], + [{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Give up."}], + [{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "As many as possible."}], + [{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "French."}], + [{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Some day."}], + [{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "No, never."}], + [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a good idea."}], + [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}], + [{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Recursion."}], + ], + }) + conversational_implicit_prompt_preference_dataset = conversational_implicit_prompt_preference_dataset.train_test_split(test_size=test_size) + if push_to_hub: + conversational_implicit_prompt_preference_dataset.push_to_hub(repo_id, config_name="conversational_implicit_prompt_preference") + + conversational_unpaired_preference_dataset = Dataset.from_dict({ + "prompt": [ + [{"role": "user", "content": "What is better than ugly?"}], + [{"role": "user", "content": "What is better than implicit?"}], + [{"role": "user", "content": "What is better than complex?"}], + [{"role": "user", "content": "What is better than complicated?"}], + [{"role": "user", "content": "What is better than nested?"}], + [{"role": "user", "content": "What is better than dense?"}], + [{"role": "user", "content": "What counts?"}], + [{"role": "user", "content": "Are special cases enough to break the rules?"}], + [{"role": "user", "content": "What beats purity?"}], + [{"role": "user", "content": "What should never pass silently?"}], + [{"role": "user", "content": "When can errors pass silently?"}], + [{"role": "user", "content": "What should you do in the face of ambiguity?"}], + [{"role": "user", "content": "How many ways should there be to do it?"}], + [{"role": "user", "content": "For whom may the way not be obvious at first?"}], + [{"role": "user", "content": "What is better than never?"}], + [{"role": "user", "content": "Is never better than *right* now?"}], + [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}], + [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}], + [{"role": "user", "content": "Any great ideas?"}], + ], + "completion": [ + [{'role': 'assistant', 'content': 'Beautiful.'}], + [{'role': 'assistant', 'content': 'Explicit.'}], + [{'role': 'assistant', 'content': 'Simple.'}], + [{'role': 'assistant', 'content': 'Very complicated.'}], + [{'role': 'assistant', 'content': 'Flat.'}], + [{'role': 'assistant', 'content': 'Sparse.'}], + [{'role': 'assistant', 'content': 'Readability.'}], + [{'role': 'assistant', 'content': 'Yes, special cases are special enough to break the rules.'}], + [{'role': 'assistant', 'content': 'Practicality.'}], + [{'role': 'assistant', 'content': 'Warnings.'}], + [{'role': 'assistant', 'content': 'When explicitly silenced.'}], + [{'role': 'assistant', 'content': 'Give up.'}], + [{'role': 'assistant', 'content': 'One, and preferably only one.'}], + [{'role': 'assistant', 'content': 'French.'}], + [{'role': 'assistant', 'content': 'Some day.'}], + [{'role': 'assistant', 'content': 'Yes, often.'}], + [{'role': 'assistant', 'content': "It means it's a bad idea."}], + [{'role': 'assistant', 'content': 'It means it may be a good idea.'}], + [{'role': 'assistant', 'content': 'Namespaces are one honking great idea.'}], + ], + "label": [True, True, True, False, True, True, True, False, True, False, True, False, True, False, False, True, True, True, True], + }) + conversational_unpaired_preference_dataset = conversational_unpaired_preference_dataset.train_test_split(test_size=test_size) + if push_to_hub: + conversational_unpaired_preference_dataset.push_to_hub(repo_id, config_name="conversational_unpaired_preference") + # fmt: on + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + main(script_args.test_size, script_args.push_to_hub, script_args.repo_id) diff --git a/testbed/huggingface__trl/examples/notebooks/README.md b/testbed/huggingface__trl/examples/notebooks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f2a11e280099f79e30059ff77295d53eff30b62a --- /dev/null +++ b/testbed/huggingface__trl/examples/notebooks/README.md @@ -0,0 +1,7 @@ +# Notebooks + +This directory contains a collection of Jupyter notebooks that demonstrate how to use the TRL library in different applications. + +- [`best_of_n.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/best_of_n.ipynb): This notebook demonstrates how to use the "Best of N" sampling strategy using TRL when fine-tuning your model with PPO. +- [`gpt2-sentiment.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-sentiment.ipynb): This notebook demonstrates how to reproduce the GPT2 imdb sentiment tuning example on a jupyter notebook. +- [`gpt2-control.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-sentiment-control.ipynb): This notebook demonstrates how to reproduce the GPT2 sentiment control example on a jupyter notebook. diff --git a/testbed/huggingface__trl/examples/notebooks/best_of_n.ipynb b/testbed/huggingface__trl/examples/notebooks/best_of_n.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..75ea2815e97f0e9a008aeb62a1b8fe2aafd2df4d --- /dev/null +++ b/testbed/huggingface__trl/examples/notebooks/best_of_n.ipynb @@ -0,0 +1,662 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "WQpNapZNWuXP" + }, + "source": [ + "\n", + "**Best-of-n sampling as an alternative to RLHF**\n", + "\n", + "This notebook compares reward-model scores of prompt based responses from \n", + "1. a base model (`gpt2-imdb`)\n", + "2. `RLHF` tuned model based on this base-model \n", + "3. the base-model again from which we sample n responses to each prompt, score them and take the best scored one AKA the `best-of-n sampled` model\n", + "\n", + "Import dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vDA6qayz692w" + }, + "outputs": [], + "source": [ + "%pip install transformers trl" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "M1s_iNm773hM" + }, + "outputs": [], + "source": [ + "import torch\n", + "import pandas as pd\n", + "\n", + "from transformers import pipeline, AutoTokenizer\n", + "from datasets import load_dataset\n", + "\n", + "from trl import AutoModelForCausalLMWithValueHead\n", + "from trl.core import LengthSampler\n", + "\n", + "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Y7hyrIrO8tcY" + }, + "source": [ + "Various constants" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "MqS3OM6Q8x6g" + }, + "outputs": [], + "source": [ + "ref_model_name = \"lvwerra/gpt2-imdb\"\n", + "model_name = \"lvwerra/gpt2-imdb-pos-v2\"\n", + "reward_model = \"lvwerra/distilbert-imdb\"\n", + "\n", + "N_BEST_OF = 4" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c1YcXeElg6or" + }, + "source": [ + "Models and tokenizers" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "b855NrL181Hh" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/kashif/Github/transformers/src/transformers/tokenization_utils_base.py:1617: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be deprecated in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "text/plain": [ + "AutoModelForCausalLMWithValueHead(\n", + " (pretrained_model): GPT2LMHeadModel(\n", + " (transformer): GPT2Model(\n", + " (wte): Embedding(50257, 768)\n", + " (wpe): Embedding(1024, 768)\n", + " (drop): Dropout(p=0.1, inplace=False)\n", + " (h): ModuleList(\n", + " (0-11): 12 x GPT2Block(\n", + " (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n", + " (attn): GPT2SdpaAttention(\n", + " (c_attn): Conv1D(nf=2304, nx=768)\n", + " (c_proj): Conv1D(nf=768, nx=768)\n", + " (attn_dropout): Dropout(p=0.1, inplace=False)\n", + " (resid_dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n", + " (mlp): GPT2MLP(\n", + " (c_fc): Conv1D(nf=3072, nx=768)\n", + " (c_proj): Conv1D(nf=768, nx=3072)\n", + " (act): NewGELUActivation()\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " )\n", + " (ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " (lm_head): Linear(in_features=768, out_features=50257, bias=False)\n", + " )\n", + " (v_head): ValueHead(\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " (summary): Linear(in_features=768, out_features=1, bias=True)\n", + " (flatten): Flatten(start_dim=1, end_dim=-1)\n", + " )\n", + ")" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name)\n", + "\n", + "ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name)\n", + "\n", + "reward_pipe = pipeline(\"sentiment-analysis\", model=reward_model, device=device)\n", + "\n", + "tokenizer = AutoTokenizer.from_pretrained(ref_model_name)\n", + "\n", + "tokenizer.pad_token = tokenizer.eos_token\n", + "\n", + "# cuda-ize models\n", + "model.to(device)\n", + "ref_model.to(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Z1Cz0gCFhZYJ" + }, + "source": [ + "Dataset building" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "LqLVEp5p_8XM" + }, + "outputs": [], + "source": [ + "def build_dataset(\n", + " tokenizer,\n", + " dataset_name=\"stanfordnlp/imdb\",\n", + " input_min_text_length=2,\n", + " input_max_text_length=8,\n", + "):\n", + " # load imdb with datasets\n", + " ds = load_dataset(dataset_name, split=\"train\")\n", + " ds = ds.rename_columns({\"text\": \"review\"})\n", + " ds = ds.filter(lambda x: len(x[\"review\"]) > 200, batched=False)\n", + "\n", + " input_size = LengthSampler(input_min_text_length, input_max_text_length)\n", + "\n", + " def tokenize(sample):\n", + " sample[\"input_ids\"] = tokenizer.encode(sample[\"review\"])[: input_size()]\n", + " sample[\"query\"] = tokenizer.decode(sample[\"input_ids\"])\n", + " return sample\n", + "\n", + " ds = ds.map(tokenize, batched=False)\n", + " ds.set_format(type=\"torch\")\n", + " return ds\n", + "\n", + "\n", + "dataset = build_dataset(tokenizer)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "AqA2McjMAxNw" + }, + "outputs": [], + "source": [ + "gen_kwargs = {\n", + " \"min_length\": -1,\n", + " \"top_k\": 0.0,\n", + " \"top_p\": 1.0,\n", + " \"do_sample\": True,\n", + " \"pad_token_id\": tokenizer.eos_token_id,\n", + "}\n", + "sent_kwargs = {\"top_k\": None, \"function_to_apply\": \"none\", \"batch_size\": 16}" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "L_q4qs35AxcR" + }, + "outputs": [], + "source": [ + "output_min_length = 4\n", + "output_max_length = 16\n", + "output_length_sampler = LengthSampler(output_min_length, output_max_length)\n", + "\n", + "#### get a batch from the dataset\n", + "bs = 16\n", + "output_data = dict()\n", + "dataset.set_format(\"pandas\")\n", + "df_batch = dataset[:].sample(bs)\n", + "output_data[\"query\"] = df_batch[\"query\"].tolist()\n", + "query_tensors = df_batch[\"input_ids\"].tolist()\n", + "\n", + "# :: [Resp]\n", + "response_tensors_ref, response_tensors = [], []\n", + "# :: [[Resp]]\n", + "response_tensors_best_of = []" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QVfpyHnZBLKY" + }, + "source": [ + "\n", + "Generation using various models" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "-imZ7uEFBNbw" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "The attention mask is not set and cannot be inferred from input because pad token is same as eos token. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n" + ] + } + ], + "source": [ + "for i in range(bs):\n", + " gen_len = output_length_sampler()\n", + "\n", + " query = torch.tensor(query_tensors[i])\n", + "\n", + " output = ref_model.generate(\n", + " query.unsqueeze(dim=0).to(device), max_new_tokens=gen_len, **gen_kwargs\n", + " ).squeeze()\n", + " response_tensors_ref.append(tokenizer.decode(output))\n", + "\n", + " output = model.generate(\n", + " query.unsqueeze(dim=0).to(device), max_new_tokens=gen_len, **gen_kwargs\n", + " ).squeeze()\n", + " response_tensors.append(tokenizer.decode(output))\n", + "\n", + " # generating copies of the same query for the Best-of-n sampling\n", + " queries = query.repeat((N_BEST_OF, 1))\n", + " output = ref_model.generate(\n", + " queries.to(device), max_new_tokens=gen_len, **gen_kwargs\n", + " ).squeeze()\n", + " response_tensors_best_of.append(tokenizer.batch_decode(output))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Jp5FC0Y5h_Sf" + }, + "source": [ + "Scoring" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "PyDbbAQ0F_h7" + }, + "outputs": [], + "source": [ + "scores_ref = [\n", + " output[0][\"score\"] for output in reward_pipe(response_tensors_ref, **sent_kwargs)\n", + "]\n", + "scores = [output[0][\"score\"] for output in reward_pipe(response_tensors, **sent_kwargs)]\n", + "scores_best_of = []\n", + "for i, response in enumerate(response_tensors_best_of):\n", + " # base_score = scores_ref[i]\n", + " scores_best_of.append(\n", + " torch.tensor(\n", + " [output[0][\"score\"] for output in reward_pipe(response, **sent_kwargs)]\n", + " )\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 682 + }, + "id": "nA1GDNJEiGm-", + "outputId": "1389c686-0751-4304-dea2-b71fd68748e1" + }, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
queryresponse (ref)scores (ref)response (RLHF)scores (RLHF)response (best_of)scores (best_of)
0This movieThis movie should have read some books, and1.411889This movie has plenty of extraordinary feature...2.735337This movie was unexpectedly funny and funny, you2.405301
1OK where do i begin?OK where do i begin? *** Acting is decent (not...1.555380OK where do i begin? For all of you who are no...0.019694OK where do i begin? i just wanted to add some...0.622912
2I watchedI watched one can compare themselves upon view...1.380120I watched it because of its excellent cast. Th...2.498309I watched the trial trial for teaches us a goo...2.057187
3It's been 19 years since GordonIt's been 19 years since Gordon finally left c...1.554914It's been 19 years since Gordon Tree has becom...1.632266It's been 19 years since Gordon Clarke put me ...2.783458
4Just kiddingJust kidding; I know a lot-0.069533Just kidding \"Third World Snopes0.944632Just kidding, I didn't even1.945202
5shakespeare's plays have a wayshakespeare's plays have a way of weaving into...1.656927shakespeare's plays have a way. It's the look ...1.444803shakespeare's plays have a way of getting back...1.834373
6This movie is wonderful. WhatThis movie is wonderful. What could have been ...2.749068This movie is wonderful. What someone likes ab...2.759510This movie is wonderful. What a different look,2.695312
7I lovedI loved this film. <br /><2.576181I loved it, and I really loved Audrey2.578412I loved this film. Reading reviews of it2.751773
8A superb andA superb and very cool drama. The novel is2.910374A superb and super fun movie that removes all the2.783201A superb and most finely acted role that I will2.894923
9I rememberI remember.Very poor execution but good movies0.923775I remember when Shelter saw some girls on TV0.825408I remember thinking to myself how SOMEONE who1.634163
10This su*kThis su*k camel down your kidd1.605957This su*k Dress! I loved it2.345865This su*k like a roll of crap2.422874
11One StinkOne Stink Act...<br /><br1.456476One Stinkl was a great actor, particularly1.782818One Stink?: Invisible of Saint Barbara, poor1.667756
12I pulled down a VHSI pulled down a VHS copy and watched it with m...0.756151I pulled down a VHS looking a good looking, and a-0.008258I pulled down a VHS copy the other day and all I0.992919
13For someFor some alone no more Buddy Trumbull would ha...0.790762For some enthraled time, the film will impress...2.455694For some reason, a bomb crashed on the rear of...0.857423
14This one features allThis one features all the good elements of spi...1.452079This one features all kinds of wit and humor r...2.743043This one features all the best Birdprogram sup...2.343950
15Somehow a woman working withSomehow a woman working with Jim Wynorski prof...0.242172Somehow a woman working with her daughter play...0.092226Somehow a woman working with an overweight ins...1.415525
\n", + "
" + ], + "text/plain": [ + " query \\\n", + "0 This movie \n", + "1 OK where do i begin? \n", + "2 I watched \n", + "3 It's been 19 years since Gordon \n", + "4 Just kidding \n", + "5 shakespeare's plays have a way \n", + "6 This movie is wonderful. What \n", + "7 I loved \n", + "8 A superb and \n", + "9 I remember \n", + "10 This su*k \n", + "11 One Stink \n", + "12 I pulled down a VHS \n", + "13 For some \n", + "14 This one features all \n", + "15 Somehow a woman working with \n", + "\n", + " response (ref) scores (ref) \\\n", + "0 This movie should have read some books, and 1.411889 \n", + "1 OK where do i begin? *** Acting is decent (not... 1.555380 \n", + "2 I watched one can compare themselves upon view... 1.380120 \n", + "3 It's been 19 years since Gordon finally left c... 1.554914 \n", + "4 Just kidding; I know a lot -0.069533 \n", + "5 shakespeare's plays have a way of weaving into... 1.656927 \n", + "6 This movie is wonderful. What could have been ... 2.749068 \n", + "7 I loved this film.
< 2.576181 \n", + "8 A superb and very cool drama. The novel is 2.910374 \n", + "9 I remember.Very poor execution but good movies 0.923775 \n", + "10 This su*k camel down your kidd 1.605957 \n", + "11 One Stink Act...

Optimise GPT2 to produce IMDB movie reviews with controlled sentiment using a BERT sentiment classifier for rewards.\n", + "\n", + "**WARNING:** We often experienced loss spikes in this examples which caused model training to fail or slow down. There is a [GitHub issue](https://github.com/lvwerra/trl/issues/101) to track the issue." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\n", + "

Figure: Experiment setup to tune GPT2. The yellow arrows are outside the scope of this notebook, but the trained models are available through Hugging Face.

\n", + "
\n", + "\n", + "\n", + "The experiment setup is very similar to the positive sentiment notebook. However, in this notebook we fine-tune GPT2 (small) to generate **controlled** movie reviews based on the IMDB dataset. The model gets the target sentiment and 5 tokens from a real review and is tasked to produce continuations with the targeted sentiment. The reward for the continuations is calculated with the logits of a BERT sentiment classifier. That reward is then used for PPO training." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup experiment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Import dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/leandro_huggingface_co/miniconda3/envs/trl/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import random\n", + "import torch\n", + "import wandb\n", + "import time\n", + "import os\n", + "from tqdm import tqdm\n", + "import numpy as np\n", + "import pandas as pd\n", + "from random import choices\n", + "import matplotlib.pyplot as plt\n", + "\n", + "tqdm.pandas()\n", + "\n", + "from datasets import load_dataset\n", + "\n", + "from transformers import AutoTokenizer, pipeline\n", + "\n", + "from trl import (\n", + " PPOTrainer,\n", + " PPOConfig,\n", + " AutoModelForCausalLMWithValueHead,\n", + " create_reference_model,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "sentiment_pipe_kwargs = {\"top_k\": None, \"function_to_apply\": \"none\"}\n", + "\n", + "config = PPOConfig(\n", + " model_name=\"lvwerra/gpt2-imdb\",\n", + " steps=51200,\n", + " learning_rate=1.41e-5,\n", + " remove_unused_columns=False,\n", + " log_with=\"wandb\",\n", + ")\n", + "\n", + "txt_in_len = 5\n", + "txt_out_len = 20\n", + "seed = 1" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "np.random.seed(seed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see that we load a GPT2 model called `gpt2_imdb`. This model was additionally fine-tuned on the IMDB dataset for 1 epoch with the huggingface [script](https://github.com/huggingface/transformers/blob/master/examples/run_language_modeling.py) (no special settings). The other parameters are mostly taken from the original paper [\"Fine-Tuning Language Models from Human Preferences\"](\n", + "https://huggingface.co/papers/1909.08593). This model as well as the BERT model is available in the Huggingface model zoo [here](https://huggingface.co/models). The following code should automatically download the models." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load data and models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load pre-trained GPT2 language models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We load the GPT2 model with a value head and the tokenizer. We load the model twice; the first model is optimized while the second model serves as a reference to calculate the KL-divergence from the starting point. This serves as an additional reward signal in the PPO training to make sure the optimized model does not deviate too much from the original language model." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name)\n", + "gpt2_ref_model = create_reference_model(gpt2_model)\n", + "gpt2_tokenizer = AutoTokenizer.from_pretrained(config.model_name)\n", + "\n", + "gpt2_tokenizer.pad_token = gpt2_tokenizer.eos_token" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load IMDB dataset\n", + "The IMDB dataset contains 50k movie review annotated with \"positive\"/\"negative\" feedback indicating the sentiment. We load the IMDB dataset into a DataFrame and filter for comments that are at least 500 characters long and take the first 1000 characters of each comment. The first filter we apply to avoid comments that are less than `txt_in_len` token long and the second to avoid tokenizing way more text than we actually need." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Found cached dataset imdb (/home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1)\n", + "Loading cached processed dataset at /home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1/cache-d314b4c14499bf03.arrow\n", + "Loading cached processed dataset at /home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1/cache-0d5fcb05c95b1186.arrow\n" + ] + }, + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['review', 'sentiment'],\n", + " num_rows: 22578\n", + "})" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# create the dataset\n", + "#\n", + "dataset = load_dataset(\"stanfordnlp/imdb\", split=\"train\")\n", + "dataset = dataset.rename_columns({\"text\": \"review\", \"label\": \"sentiment\"})\n", + "# make sure the comments are are at least 500 and trim to 1000\n", + "dataset = dataset.filter(lambda x: len(x[\"review\"]) > 500, batched=False)\n", + "dataset = dataset.map(lambda x: {\"review\": x[\"review\"][:1000]}, batched=False)\n", + "\n", + "dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tokenize IMDB reviews" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We tokenize all IMDB in advance to avoid tokenizing twice. In the first step we encode the queries and slice the first `txt_in_len` tokens. In a second step we decode these tokens back to text for later display." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Loading cached processed dataset at /home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1/cache-383f6ebf0ae41ee4.arrow\n", + "Loading cached processed dataset at /home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1/cache-f4875ad4fccbbc1f.arrow\n" + ] + } + ], + "source": [ + "dataset = dataset.map(\n", + " lambda x: {\n", + " \"input_ids\": gpt2_tokenizer.encode(\" \" + x[\"review\"], return_tensors=\"pt\")[\n", + " 0, :txt_in_len\n", + " ]\n", + " },\n", + " batched=False,\n", + ")\n", + "dataset = dataset.map(\n", + " lambda x: {\"query\": gpt2_tokenizer.decode(x[\"input_ids\"])}, batched=False\n", + ")\n", + "dataset = dataset[:20480]\n", + "\n", + "from datasets import Dataset\n", + "\n", + "dataset = Dataset.from_dict(dataset)\n", + "dataset.set_format(\"pytorch\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([ 770, 2646, 373, 2192, 7867])" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dataset[3][\"input_ids\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def collator(data):\n", + " return dict((key, [d[key] for d in data]) for key in data[0])" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mlvwerra\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.13.9" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/leandro_huggingface_co/trl/examples/sentiment/notebooks/wandb/run-20230206_125743-jpcnr7jx" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run comic-music-184 to Weights & Biases (docs)
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/lvwerra/trl" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/lvwerra/trl/runs/jpcnr7jx" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "ppo_trainer = PPOTrainer(\n", + " config, gpt2_model, gpt2_ref_model, gpt2_tokenizer, dataset, data_collator=collator\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load BERT classifier\n", + "We load a BERT classifier fine-tuned on the IMDB dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "if ppo_trainer.accelerator.num_processes == 1:\n", + " device = 0 if torch.cuda.is_available() else \"cpu\" # to avoid a `pipeline` bug\n", + "else:\n", + " device = ppo_trainer.accelerator.device\n", + "sentiment_pipe = pipeline(\n", + " \"sentiment-analysis\", \"lvwerra/distilbert-imdb\", device=device\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model outputs are the logits for the negative and positive class. We will use the logits for positive class as a reward signal for the language model." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'label': 'NEGATIVE', 'score': 2.3350484371185303},\n", + " {'label': 'POSITIVE', 'score': -2.726576328277588}]" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text = \"this movie was really bad!!\"\n", + "output = sentiment_pipe(text, **sentiment_pipe_kwargs)\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'label': 'POSITIVE', 'score': 2.557040214538574},\n", + " {'label': 'NEGATIVE', 'score': -2.294790267944336}]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text = \"this movie was really good!!\"\n", + "output = sentiment_pipe(text, **sentiment_pipe_kwargs)\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'label': 'POSITIVE', 'score': 0.8562759160995483},\n", + " {'label': 'NEGATIVE', 'score': -0.7086048126220703}]" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text = \"this movie was a documentary\"\n", + "output = sentiment_pipe(text, **sentiment_pipe_kwargs)\n", + "output" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The resulting reward signal:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_pipe_output(outputs):\n", + " positive_logits = []\n", + " for out in outputs:\n", + " for element in out:\n", + " if element[\"label\"] == \"POSITIVE\":\n", + " positive_logits.append(torch.tensor(element[\"score\"]))\n", + " return positive_logits" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "-0.7086048126220703" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "output[1][\"score\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Control token dict\n", + "We will append the control token at the beginning of each query to signal the model what the target sentiment is. Each control sequence consists of three tokens:" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "ctrl_str = [\"[negative]\", \"[neutral]\", \"[positive]\"]\n", + "device = torch.device(\n", + " \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", + ") # this should be handled by accelerate\n", + "ctrl_tokens = dict(\n", + " (s, gpt2_tokenizer.encode(s, return_tensors=\"pt\").squeeze().to(device))\n", + " for s in ctrl_str\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'[negative]': tensor([ 58, 31591, 60], device='cuda:0'),\n", + " '[neutral]': tensor([ 58, 29797, 60], device='cuda:0'),\n", + " '[positive]': tensor([ 58, 24561, 60], device='cuda:0')}" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ctrl_tokens" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reward function" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "def pos_logit_to_reward(logit, task):\n", + " \"\"\"\n", + " Take the positive sentiment logit and scale it for the task.\n", + " task [negative]: reward = -logit\n", + " task [neutral]: reward = -2*abs(logit)+4\n", + " task [positive]: reward = logit\n", + " \"\"\"\n", + " for i in range(len(logit)):\n", + " if task[i] == \"[negative]\":\n", + " logit[i] = -logit[i]\n", + " elif task[i] == \"[neutral]\":\n", + " logit[i] = -2 * torch.abs(logit[i]) + 4\n", + " elif task[i] == \"[positive]\":\n", + " pass\n", + " else:\n", + " raise ValueError(\"task has to be in [0, 1, 2]!\")\n", + " return logit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following examples show the rewards for the cases where the classifier logit is 4, -4 and 0 for the three targets `['negative]`, `['neutral]` and `['positive']`. The scaling is not perfect as it differs between neutral and the other two classes. This is something to further investigate in the future. Ideally, one would use the logit output for each class individually, but since there is no dedicated class for neutral this is a workaround." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['[negative]', '[neutral]', '[positive]']\n" + ] + } + ], + "source": [ + "print(ctrl_str)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([-4., -4., 4.])" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pos_logit_to_reward(torch.Tensor([4, 4, 4]), ctrl_str)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([ 4., -4., -4.])" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pos_logit_to_reward(torch.Tensor([-4, -4, -4]), ctrl_str)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([-0., 4., 0.])" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pos_logit_to_reward(torch.Tensor([0, 0, 0]), ctrl_str)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generation settings" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "generation_kwargs = {\n", + " \"min_length\": -1,\n", + " \"top_k\": 0.0,\n", + " \"top_p\": 1.0,\n", + " \"do_sample\": True,\n", + " \"pad_token_id\": gpt2_tokenizer.eos_token_id,\n", + " \"max_new_tokens\": txt_out_len,\n", + " \"eos_token_id\": -1,\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Optimize model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Steps**\n", + "\n", + "The training loop consists of the following steps:\n", + "1. Get a batch of queries and create random controls\n", + "2. Get the query responses from the policy\n", + "3. Join query and responses and tokenize for BERT analysis\n", + "4. Get sentiments for query/responses from BERT\n", + "5. Optimize policy with PPO using the (query, response, reward) triplet\n", + "6. Log all the training statistics\n", + "\n", + "**Training time**\n", + "\n", + "This step takes **~2h** on a P6000 GPU with the above specified settings." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 8%|▊ | 6/80 [12:44<2:37:54, 128.03s/it]/home/leandro_huggingface_co/miniconda3/envs/trl/lib/python3.9/site-packages/transformers/pipelines/base.py:1045: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "100%|██████████| 80/80 [2:46:39<00:00, 124.99s/it] \n", + " 91%|█████████▏| 73/80 [2:30:39<14:35, 125.03s/it] " + ] + } + ], + "source": [ + "for epoch in range(2):\n", + " for batch in tqdm(ppo_trainer.dataloader):\n", + " (\n", + " logs,\n", + " game_data,\n", + " ) = (\n", + " dict(),\n", + " dict(),\n", + " )\n", + "\n", + " #### prepend a random control token\n", + " task_list = choices(ctrl_str, k=config.batch_size)\n", + " game_data[\"query\"] = [t + q for t, q in zip(task_list, batch[\"query\"])]\n", + " query_tensors = [\n", + " torch.cat((ctrl_tokens[t], input_ids))\n", + " for t, input_ids in zip(task_list, batch[\"input_ids\"])\n", + " ]\n", + "\n", + " #### get response from gpt2\n", + " response_tensors = []\n", + " for query in query_tensors:\n", + " response = ppo_trainer.generate(query, **generation_kwargs)\n", + " response_tensors.append(response.squeeze()[-txt_out_len:])\n", + " game_data[\"response\"] = [\n", + " gpt2_tokenizer.decode(r.squeeze()) for r in response_tensors\n", + " ]\n", + "\n", + " #### sentiment analysis\n", + " texts = [q + r for q, r in zip(batch[\"query\"], game_data[\"response\"])]\n", + " logits = extract_pipe_output(sentiment_pipe(texts, **sentiment_pipe_kwargs))\n", + " rewards = pos_logit_to_reward(logits, task_list)\n", + "\n", + " #### Run PPO training\n", + " t = time.time()\n", + " stats = ppo_trainer.step(query_tensors, response_tensors, rewards)\n", + "\n", + " for cs in ctrl_str:\n", + " key = \"env/reward_\" + cs.strip(\"[]\")\n", + " stats[key] = np.mean(\n", + " [r.cpu().numpy() for r, t in zip(rewards, task_list) if t == cs]\n", + " )\n", + " ppo_trainer.log_stats(stats, game_data, rewards)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training progress\n", + "If you are tracking the training progress with Weights&Biases you should see a plot similar to the following:\n", + "\n", + "
\n", + "\n", + "

Figure: Reward mean and distribution evolution during training.

\n", + "
\n", + "\n", + "One can observe how the model starts to generate more positive outputs after a few optimisation steps.\n", + "\n", + "> Note: Investigating the KL-divergence will probably show that at this point the model has not converged to the target KL-divergence, yet. To get there would require longer training or starting with a higher inital coefficient." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model inspection" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reward distribution\n", + "First, we can have a look at the reward distribution. Both the negative and positive rewards are clearly shifted to high rewards. The neutral rewards, however, are still centered around zero. There are a few possible explanations for this. There could be a bug in the code and the way the neutral rewards are calculated. Another problem could be that sentence sometimes start with a strong sentiment and it is hard for the model shift the sentiment towards neutral." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiwAAAGzCAYAAAAMr0ziAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAABPCUlEQVR4nO3deVwVZf8//tecw4HDroiyibKImSmQkKi5B6K3mt4tLvm4RSq7S7lvjTtNLAVcPqip0aLZnbdL3ZK0qP2+5o0SSVmiFor7lklubGqIgB4OnPn9YWfyyGE5h+UM8Ho+Hjw8c80117znOoPzZuaaGUEURRFEREREMqawdABEREREdWHCQkRERLLHhIWIiIhkjwkLERERyR4TFiIiIpI9JixEREQke0xYiIiISPaYsBAREZHsMWEhIiIi2WPCQkQNtmnTJgiCgNzcXLOWnzZtGnx8fAzKBEFAQkJCg2OrS2ZmJgRBQGZmplQ2dOhQ9OrVq8nXDQC5ubkQBAGbNm1qlvURtVRMWIio1UhJSUFycrKlwzBKzrERtQRWlg6AiMiYO3fuwMrKtP+iUlJScOLECcyePbveywwePBh37tyBtbW1iRGapqbYunbtijt37kClUjXp+olaOp5hIZKBsrIyS4dQK51Oh7t37zbrOtVqtckJiynu3r0LnU4HhUIBtVoNhcIy/x0KggC1Wg2lUmmR9RO1FExYiJpZQkICBEHAqVOn8Nxzz6F9+/YYOHCgNP+///0vQkJCYGtrCxcXF0yaNAmXL1+W5r/77rtQKpUoLi6WylatWgVBEBAbGyuVVVVVwdHREa+//rpUtnLlSgwYMAAdOnSAra0tQkJC8MUXX1SLURAExMTEYMuWLXjkkUdgY2ODtLQ0AMDJkycxfPhw2NraonPnzliyZAl0Ol29t3/Hjh3o1asX1Go1evXqhe3btxut9+AYltu3b2P27Nnw8fGBjY0NOnXqhIiICBw+fBjAvXEnX3/9NX777TcIggBBEKRxMfpxKlu3bsWbb74JLy8v2NnZoaSkxOgYFr3s7GwMGDAAtra28PX1xbp16wzm1zR258E2a4utpjEs3377LQYNGgR7e3u0a9cO48aNw+nTpw3q6PelX375BdOmTUO7du3g7OyM6OholJeX1/wlELVAvCREZCHPPvssAgIC8H//938QRREAsHTpUixYsAATJkzAiy++iKKiIrz33nsYPHgwjhw5gnbt2mHQoEHQ6XT44YcfMGbMGADAvn37oFAosG/fPqn9I0eOoLS0FIMHD5bK3nnnHTz55JOYMmUKKioqsHXrVjz77LPYuXMnRo8ebRDft99+i88++wwxMTFwdXWFj48P8vPzMWzYMFRWVmLevHmwt7fHv//9b9ja2tZrm/fs2YOnn34aPXv2RFJSEm7cuIHo6Gh07ty5zmVffvllfPHFF4iJiUHPnj1x48YN/PDDDzh9+jT69OmDN954A7du3cKVK1fw9ttvAwAcHBwM2li8eDGsra3x2muvQaPR1HoZ6Pfff8df/vIXTJgwAZMnT8Znn32GV155BdbW1nj++efrtb169Yntft988w1GjRoFPz8/JCQk4M6dO3jvvffw+OOP4/Dhw9UGKE+YMAG+vr5ISkrC4cOHsX79enTq1AnLly83KU4iWROJqFnFx8eLAMTJkycblOfm5opKpVJcunSpQfnx48dFKysrqbyqqkp0cnIS586dK4qiKOp0OrFDhw7is88+KyqVSvH27duiKIri6tWrRYVCIf7+++9SW+Xl5QZtV1RUiL169RKHDx9uUA5AVCgU4smTJw3KZ8+eLQIQDx48KJUVFhaKzs7OIgDx4sWLtW57cHCw6OHhIRYXF0tle/bsEQGIXbt2rRZDfHy8NO3s7CzOnDmz1vZHjx5drR1RFMW9e/eKAEQ/P79qfaCft3fvXqlsyJAhIgBx1apVUplGoxGDg4PFTp06iRUVFaIoiuLGjRuNbrexNmuK7eLFiyIAcePGjVKZfj03btyQyo4ePSoqFApx6tSpUpl+X3r++ecN2vzrX/8qdujQodq6iFoyXhIispCXX37ZYHrbtm3Q6XSYMGECrl+/Lv24u7sjICAAe/fuBQAoFAoMGDAA33//PQDg9OnTuHHjBubNmwdRFJGVlQXg3lmXXr16oV27dtI67j8T8vvvv+PWrVsYNGiQdFnlfkOGDEHPnj0Nynbt2oV+/fqhb9++UlnHjh0xZcqUOrc3Ly8POTk5iIqKgrOzs1QeERFRbT3GtGvXDgcPHsS1a9fqrFuTqKioep8NsrKywt///ndp2traGn//+99RWFiI7Oxss2Ooi76fpk2bBhcXF6k8MDAQERER2LVrV7VlHtyXBg0ahBs3bqCkpKTJ4iRqbkxYiCzE19fXYPr8+fMQRREBAQHo2LGjwc/p06dRWFgo1R00aBCys7Nx584d7Nu3Dx4eHujTpw+CgoKky0I//PADBg0aZLCOnTt3ol+/flCr1XBxcUHHjh3xwQcf4NatW3XGBwC//fYbAgICqpU/9NBDdW7vb7/9BgBmL79ixQqcOHEC3t7e6Nu3LxISEvDrr7/Wudz9jG1TTTw9PWFvb29Q1r17dwAw+3kz9aHvJ2N98vDDD+P69evVBml36dLFYLp9+/YA7iWlRK0Fx7AQWciDf+nrdDoIgoD//e9/Ru8YuX/Mw8CBA6HVapGVlYV9+/ZJicmgQYOwb98+nDlzBkVFRQYJy759+/Dkk09i8ODBWLt2LTw8PKBSqbBx40akpKTUGZ+lTZgwAYMGDcL27duxZ88evPXWW1i+fDm2bduGUaNG1auNxt4mQRCMlldVVTXqeupS0x1G4h9jo4haAyYsRDLh7+8PURTh6+sr/SVfk759+8La2hr79u3Dvn37MGfOHAD3niny0UcfISMjQ5rW+/LLL6FWq7F7927Y2NhI5Rs3bqx3jF27dsX58+erlZ89e7ZeywIwe3kA8PDwwIwZMzBjxgwUFhaiT58+WLp0qZSw1JRAmOPatWsoKyszOMty7tw5AJAGverPZNx/xxbw51mS+9U3Nn0/GeuTM2fOwNXVtdqZH6K2gJeEiGTiqaeeglKpRGJiYrW/jEVRxI0bN6RptVqNxx57DJ9++ikuXbpkcIblzp07ePfdd+Hv7w8PDw9pGaVSCUEQDP76z83NxY4dO+od41/+8hccOHAAhw4dksqKioqwZcuWOpf18PBAcHAwNm/ebHAJKj09HadOnap12aqqqmqXrTp16gRPT09oNBqpzN7e3ujlLXNUVlbiww8/lKYrKirw4YcfomPHjggJCQFwL8kEII0n0sf673//u1p79Y3t/n66PxE6ceIE9uzZg7/85S/mbhJRi8YzLEQy4e/vjyVLliAuLg65ubkYP348HB0dcfHiRWzfvh0vvfQSXnvtNan+oEGDsGzZMjg7O6N3794A7h3EH3roIZw9exbTpk0zaH/06NFYvXo1Ro4cieeeew6FhYVYs2YNunXrhmPHjtUrxrlz5+KTTz7ByJEjMWvWLOm25q5du9arjaSkJIwePRoDBw7E888/j5s3b+K9997DI488gtLS0hqXu337Njp37oxnnnkGQUFBcHBwwDfffIOffvoJq1atkuqFhIQgNTUVsbGxeOyxx+Dg4ICxY8fWa9se5OnpieXLlyM3Nxfdu3dHamoqcnJy8O9//1t6Ku0jjzyCfv36IS4uDjdv3oSLiwu2bt2KysrKau2ZEttbb72FUaNGoX///njhhRek25qdnZ2b5f1KRLJkyVuUiNoi/a2oRUVFRud/+eWX4sCBA0V7e3vR3t5e7NGjhzhz5kzx7NmzBvW+/vprEYA4atQog/IXX3xRBCD+5z//qdb2f/7zHzEgIEC0sbERe/ToIW7cuFGK534AaryF+NixY+KQIUNEtVotenl5iYsXLxb/85//1Ou2Zv32Pfzww6KNjY3Ys2dPcdu2bWJUVFSttzVrNBpxzpw5YlBQkOjo6Cja29uLQUFB4tq1aw2WKS0tFZ977jmxXbt2BrdK628z/vzzz6vFU9NtzY888oj4888/i/379xfVarXYtWtX8f3336+2/IULF8Tw8HDRxsZGdHNzE+fPny+mp6dXa7Om2Izd1iyKovjNN9+Ijz/+uGhrays6OTmJY8eOFU+dOmVQp6Z9qabbrYlaMkEUOSqLiIiI5I1jWIiIiEj2mLAQERGR7DFhISIiItljwkJERESyx4SFiIiIZI8JCxEREcleq3hwnE6nw7Vr1+Do6Nioj+YmIiKipiOKIm7fvg1PT08oFLWfQ2kVCcu1a9fg7e1t6TCIiIjIDJcvX0bnzp1rrdMqEhZHR0cA9zbYycnJ7Ha0Wi327NmDESNGSI/ebovYD+wDgH0AsA/02A/sA6Bp+qCkpATe3t7Scbw2rSJh0V8GcnJyanDCYmdnBycnpza7QwLsB4B9ALAPAPaBHvuBfQA0bR/UZzgHB90SERGR7DFhISIiItljwkJERESy1yrGsNSHKIqorKxEVVVVjXW0Wi2srKxw9+7dWuu1di2xH1QqFZRKpaXDICKiJtImEpaKigrk5eWhvLy81nqiKMLd3R2XL19u089zaYn9IAgCOnfuDAcHB0uHQkRETaDVJyw6nQ4XL16EUqmEp6cnrK2tazwI63Q6lJaWwsHBoc4H2LRmLa0fRFFEUVERrly5goCAAJ5pISJqhVp9wlJRUQGdTgdvb2/Y2dnVWlen06GiogJqtbpFHKibSkvsh44dOyI3NxdarZYJCxFRK9QyjkaNoKUceMk8LeXSFRERmYdHcSIiIpI9JixEREQke61+DEtt3k4/ZzAtiiI0Gg1sbGya5BLDqxHdTao/dOhQfPfddwCAI0eOIDg4uNFjamyCIGD79u0YP358o7SXmZmJYcOGAQDGjRuHHTt2NEq7RETUsvAMi8xNnz4deXl56NWrl6VDMZCQkGA0gcrLy8OoUaMabT0DBgxAXl4eJkyY0GhtEhFRy9Omz7C0BHZ2dnB3d7d0GPXW2LFaW1vD3d0dtra20Gg0jdo2ERG1HDzD0oJkZmZCEARkZGQgNDQUdnZ2GDBgAM6ePWtQ76uvvkKfPn2gVqvh5+eHxMREVFZWSvPPnDmDgQMHQq1Wo2fPnvjmm28gCILB5Zb4+Hj06NEDdnZ28PPzw4IFC6DVagEAmzZtQmJiIo4ePQpBECAIAjZt2gQABu0MGDAAr7/+ukFsRUVFUKlU+P777wEAGo0Gr732Gry8vGBvb4+wsDBkZmY2bscREVGLxzMsLdAbb7yBVatWoWPHjnj55Zfx/PPP48cffwQA7Nu3D1OnTsW7776LQYMG4cKFC3jppZcA3EtCqqqqMH78eHTp0gUHDx7E7du38a9//avaOhwdHbFhwwZ07twZx48fx/Tp0+Ho6Ii5c+di4sSJOHHiBNLS0vDNN98AAJydnau1MWXKFKxYsQLLli2TxgSlpqbC09MTgwYNAgDExMTg1KlT2Lp1Kzw9PbF9+3aMHDkSx48fR0BAQJP0H9Vsbc5a6bOgE+AJT6w/vh6iQrRgVMCM4BkWXT8RWR7PsLRAS5cuxZAhQ9CzZ0/MmzcP+/fvx927dwEAiYmJmDdvHqKiouDn54eIiAgsXrwYH374IQAgPT0dFy5cwMcff4ygoCAMHDgQS5curbaO1157DQMGDICPjw/Gjh2L1157DZ999hkAwNbWFg4ODrCysoK7u7t0yeZBEyZMwLVr1/DDDz9IZSkpKZg8eTIEQcClS5ewceNGfP755xg0aBD8/f3x2muvYeDAgdi4cWNTdB0REbVQPMPSAgUGBkqfPTw8AACFhYXo0qULjh49ih9//NEgCamqqsLdu3dRXl6Os2fPwtvb22CsSd++fautY9u2bfjPf/6DCxcuoLS0FJWVlXBycjIpzo4dO2LEiBHYsmULBg0ahIsXLyIrK0tKno4fP46qqip0725495RGo0GHDh1MWhcREbVuTFhaIJVKJX3WX2rR6XQAgNLSUiQmJuKpp56qtpxara5X+1lZWXjppZeQkJCAkSNHwtnZGVu3bsWqVatMjnXKlCn45z//iffeew8pKSno3bs3evfuLcWqVCqRnZ1d7XH6fIkhERHdz6xLQmvWrIGPjw/UajXCwsJw6NChGutu27YNoaGhaNeuHezt7REcHIxPPvnEoM60adOkwZv6n5EjR5oTWpvXp08fnD17Ft26dav2o1Ao8NBDD+Hy5csoKCiQlvnpp58M2sjKyoK3tzfmz5+P0NBQBAQE4LfffjOoY21tjaqqqjrjGTduHO7evYu0tDSkpKRgypQp0rxHH30UVVVVKCwsrBZrS7ozioiImp7JZ1hSU1MRGxuLdevWISwsDMnJyYiMjMTZs2fRqVOnavVdXFzwxhtvoEePHrC2tsbOnTsRHR2NTp06ITIyUqo3cuRIg3ELNjY2Zm5S27Zw4UKMGTMGXbp0wTPPPAOFQoGjR4/ixIkTWLJkCSIiIuDv74+oqCisWLECt2/fxptvvgngz7M13bp1w5UrV7B161aEhYXh66+/xvbt2w3W4+Pjg4sXLyInJwedO3eGo6Oj0e/M3t4e48ePx4IFC3D69GlMnjxZmte9e3dMmTIFU6dOxapVq/Doo4+iqKgIGRkZCAwMxOjRo5uwp4iIqCUxOWFZvXo1pk+fjujoaADAunXr8PXXX2PDhg2YN29etfpDhw41mJ41axY2b96MH374wSBhsbGxafa/qh988qxOp0NJSQmcnJxa7MsSIyMjsXPnTixatAjLly+HSqVCjx498OKLLwIAlEolduzYgRdffBGPPfYY/Pz88NZbb2Hs2LHSJaMnn3wSr7zyCv75z39Co9Fg9OjRWLBgARISEqT1PP3009i2bRuGDRuG4uJibNy4EdOmTTMa05QpU/CXv/wFgwcPRpcuXQzmbdy4EUuWLMG//vUvXL16Fa6urujXrx/GjBnTJP1DREQtk0kJS0VFBbKzsxEXFyeVKRQKhIeHIysrq87lRVHEt99+i7Nnz2L58uUG8zIzM9GpUye0b98ew4cPx5IlS2oceKnRaAweIlZSUgIA0Gq10rNC9LRaLURRhE6nk8Z51Baf/t+66jaX+2MZPHiwdBlGXxYYGFitLCIiAhEREdXa0s/v3r279BwUANIt0X5+ftDpdBBFEYsWLcLbb79t8IqCf/7zn1IbKpVKumvo/vYfjAW4l0QZKwfuJVDx8fGIj4+vMV59P9T2vejj1mq11cbDmEO/Hz24P7V2gk6o9vn+Mkux1PfQVveDB7Ef2AdA0/SBKW0Jov4oXQ/Xrl2Dl5cX9u/fj/79+0vlc+fOxXfffYeDBw8aXe7WrVvw8vKCRqOBUqnE2rVr8fzzz0vzt27dCjs7O/j6+uLChQuYP38+HBwckJWVZfTgk5CQgMTExGrlKSkpsLOzMyjT33rr7e0Na2vr+m6qLIwZMwaHDh2CtbU1du/ejUceeaRR2t25cyfs7e3h7++PX3/9FXFxcXB2dkZaWlqjtN+Y9u/fjwkTJkCj0Uh3HBlTUVGBy5cvIz8/3+AheUREJF/l5eV47rnncOvWrTrvRG2Wu4QcHR2Rk5OD0tJSZGRkIDY2Fn5+ftLlokmTJkl1e/fujcDAQPj7+yMzMxNPPPFEtfbi4uIQGxsrTZeUlMDb2xsjRoyotsF3797F5cuX4eDgUOddMqIo4vbt23B0dGySlx+a6tNPP8WdO3cAAF26dGm0hKuyshKvv/46Ll26BFdXVzzxxBNYuXKl1Hdy6ochQ4bg8OHDAO7dOVTTDn337l3Y2tpi8ODB9b4bqjZarRbp6emIiIgwuCurtVt/fL30WdAJ8LjqgTyvPIs/OO7F3i9aZL1tdT94EPuBfQA0TR/or5DUh0kJi6urK5RKpcEdJgBQUFBQ6/gThUKBbt26AQCCg4Nx+vRpJCUlVRvfoufn5wdXV1f88ssvRhMWGxsbowM8VSpVtU6sqqqCIAhQKBR1jkvRX27Q17c0b2/vJml32rRpNY43AeTVD/b29tWe02KMQqGAIAhG94GGaOz25M5YYiIqRIsnLJb+DtraflAT9gP7AGjcPjClHZOORtbW1ggJCUFGRoZUptPpkJGRYXCJqC46na7WF9lduXIFN27ckB6KRkRERG2byZeEYmNjERUVhdDQUPTt2xfJyckoKyuT7hqaOnUqvLy8kJSUBABISkpCaGgo/P39odFosGvXLnzyySf44IMPAPz5oLOnn34a7u7uuHDhAubOnYtu3boZ3EVEREREbZfJCcvEiRNRVFSEhQsXIj8/H8HBwUhLS4ObmxsA4NKlSwaXEcrKyjBjxgxcuXIFtra26NGjB/773/9i4sSJAO7dJXLs2DFs3rwZxcXF8PT0xIgRI7B48WI+i4WIiIgAmDnoNiYmBjExMUbnZWZmGkwvWbIES5YsqbEtW1tb7N6925wwiIiIqI2w/MhSIiIiojowYSEiIiLZa9tva96bZDApiCLUGg0EGxugKZ4/Miyu7jr3GTp0KL777jsAwJEjRxAcHNz4MTWDTZs2Yfbs2SguLpam9YO0Z82aheTkZMsFR0RELQLPsMjc9OnTkZeXh169ejXbOjMzM9G+fXspwWhsEydORF5enkm3whMRUdvWts+wtAB2dnbN/lLI+qqoqDDr6bu2trawtbVtca9KICIiy+EZlhYkMzMTgiAgIyMDoaGhsLOzw4ABA3D27FmDel999RX69OkDtVoNPz8/JCYmSu/Xyc3NhSAIyMnJkeoXFxdDEARkZmYiNzdXerpwhw4dIAiC9FTcoUOHIiYmBrNnz4arq6v0nJzVq1ejd+/esLe3h7e3N2bMmIHS0tKm7xAiImozmLC0QG+88QZWrVqFn3/+GVZWVgYvkty3bx+mTp2KWbNm4dSpU/jwww+xadMmLF26tF5te3t74/PPPwcAnD59Gnl5eXjnnXek+Zs3b4a1tTV+/PFHrFu3DsC9x+K/++67OHnyJDZv3oxvv/0Wc+fObcQtJiKito6XhFqgpUuXYsiQIQCAefPmYfTo0bh79y7UajUSExMxb948REVFAbj3XqbFixdj7ty5iI+Pr7NtpVIJFxcXAECnTp2kz3oBAQFYsWKFQdns2bOlzz4+PliyZAlefvllrF27tiGbSUREJGHC0gIFBgZKn/XvWyosLESXLl1w9OhR/PjjjwZnVKqqqnD37l2Ul5c3eN0hISHVyr755hskJSXhzJkzKCkpQWVlpbQ+Ozu7Bq+TiIiICUsLdP/bLYU/br/Wv2FZ/26mp556qtpyarVaem2CKP759l2tVlvvddvb2xtM5+bmYsyYMXjllVewdOlSuLi44IcffsALL7yAiooKJixERNQomLC0Mn369MHZs2fRrVs3o/M7duwIAMjLy8Ojjz4KAAYDcAFId+9UVVXVub7s7GzodDqsWrVKSoY+++wzc8MnIiIyiglLK7Nw4UKMGTMGXbp0wTPPPAOFQoGjR4/ixIkTWLJkCWxtbdGvXz8sW7YMvr6+KCwsxJtvvmnQRteuXSEIAnbu3IkxY8bA1tYWDg4ORtfXrVs3aLVavPfeexg7dqzBYFwiIqLG0rYTlgeePCvqdLhbUgJrJycIipZ5A1VkZCR27tyJRYsWYfny5VCpVOjRowdefPFFqc6GDRvwwgsvICQkBA899BBWrFiBESNGSPO9vLwQFxeH+fPn44UXXsDUqVOxadMmo+sLCgrC6tWrsXz5csTFxWHw4MFISkrC1KlTm3pTiYioDWnbCUsLM3ToUIOxJwAQHBxcrSwyMlJ6RooxDz/8MPbv329Q9mAbc+bMweLFi6XLPED1N3Hrvfrqq3j11VcNyv72t79Jn6dNmyY9y4WIiMgcLfM0Qhuydu1aODg44Pjx45YOpdFs2bIFDg4O2Ldvn6VDISKiFoJnWGRsy5YtuHPnDgCgS5cuFo6m8Tz55JMICwsDALRr186ywRARUYvAhEXGvLy8LB1Ck3B0dISjo6OlwyAiohaEl4SIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPdwkREdXH3iRLR2DcA0/spnoy9fsUFQB6APtWA4KuSUICwO+zFm06YVmbs9ZgWhRFaDQa2NjYSG9BbkwzgmeYVH/o0KH47rvvAABHjhxBcHBwo8dkbJ1BQUFITEyssc6mTZswe/ZsFBcXN9p6p02bhs2bNwMAtm/fjvHjxzda20RE1PLxkpDMTZ8+HXl5eejVq1ezrG/btm1YtGiRNO3j44Pk5GSDOhMnTsS5c+cadb3vvPMO8vLyGrVNIiJqPdr0GZaWwM7ODu7u7s22PhcXF+h0OpSUlNRYx9bWFra2to26XmdnZzg7Ozdqm0RE1HrwDEsLkpmZCUEQ8PXXXyMwMBBqtRr9+vXDiRMnDOp9+eWXeOSRR2BjYwMfHx+sWrXKYP7atWsREBAAtVoNNzc3PPPMM9K8oUOHSi8yHD58OH777Te8+uqrEARBuky2adMm6ZH6586dgyAIOHPmjME63n77bfj7+0vTJ06cwKhRo+Dg4AA3Nzf87W9/w/Xr1xutb4iIqHVjwtICzZkzB6tWrcJPP/2Ejh07YuzYsdBqtQCA7OxsTJgwAZMmTcLx48eRkJCABQsWYNOmTQCAn3/+Gf/85z+xaNEinD17FmlpaRg8eLDR9XzxxRfo3LkzFi1ahLy8PKOXbLp3747Q0FBs2bLFoHzLli147rnnAADFxcUYPnw4Hn30Ufz8889IS0tDQUEBJkyY0Ii9QkRErRkvCbVA8fHxiIiIAABs3rwZnTt3xvbt2zFhwgSsXr0aTzzxBBYsWADgXkJx6tQpvPXWW5g2bRouXboEe3t7jBkzBo6OjujatSseffRRo+txcXGBUqmEo6NjrZelpkyZgvfffx+LFy8GcO+sS3Z2Nv773/8CAN5//308+uij+L//+z9pmQ0bNsDb2xvnzp1D9+7dG6VfiIio9eIZlhaof//+0mcXFxc89NBDOH36NADg9OnTePzxxw3qP/744zh//jyqqqoQERGBrl27ws/PD3/729+wZcsWlJeXNyieSZMmITc3FwcOHABw7+xKnz590KNHDwDA0aNHsXfvXjg4OEg/+nkXLlxo0LqJiKhtYMLSxjg6OuLw4cP49NNP4eHhgYULFyIoKKhBtyi7u7tj+PDhSElJAQCkpKRgypQp0vzS0lKMHTsWOTk5Bj/nz5+v8XIUERHR/ZiwtED6MxkA8Pvvv+PcuXN4+OGHAQAPP/wwfvzxR4P6P/74I7p37w6lUgkAsLKyQnh4OFasWIFjx44hNzcX3377rdF1WVtbo6qqqs6YpkyZgtTUVGRlZeHXX3/FpEmTpHl9+vTByZMn4ePjg27duhn82Nvbm7z9RETU9jBhaYEWLVqEjIwMnDhxAtOmTYOrq6v0oLV//etfyMjIwOLFi3Hu3Dls3rwZ77//Pl577TUAwM6dO/Huu+8iJycHv/32Gz7++GPodDo89NBDRtfl4+OD77//HlevXq31rp6nnnoKt2/fxiuvvIJhw4bB09NTmjdz5kzcvHkTkydPxk8//YQLFy5g9+7diI6OrlcyRERE1KYH3T745Fn980ecnJygUMg3l1u2bBlmzZqF8+fPIzg4GP/v//0/WFtbA7h3NuOzzz7DwoULsXjxYnh4eGDRokWYNm0aAKBdu3bYtm0bEhIScPfuXQQEBODTTz/FI488YnRdixYtwt///nf4+/tDo9FAFEWj9RwdHTF27Fh89tln2LBhg8E8T09P/Pjjj3j99dcxYsQIaDQadO3aFSNHjpR1PxMRkXy06YSlpRo4cGC1Z6/c7+mnn8bTTz9d47KZmZk1LpuZmWnw4Lh+/frh6NGjBnWmTZsmJUD3S01NRWpqqtF2AwICsG3bthrXS0REVBv+eStza9euhYODA44fP27pUJrUyy+/DAcHB0uHQUREMsUzLDK2ZcsW3LlzBwDQpUsX7N+/38IRNZ1FixZJ42w8PDwsHA0REckNExYZ8/LyMpgeOnRojWNIWrpOnTqhU6dOlg6DiIhkyqxLQmvWrIGPjw/UajXCwsJw6NChGutu27YNoaGhaNeuHezt7REcHIxPPvnEoI4oili4cCE8PDxga2uL8PBwnD9/3pzQiIiIqBUyOWFJTU1FbGws4uPjcfjwYQQFBSEyMhKFhYVG67u4uOCNN95AVlYWjh07hujoaERHR2P37t1SnRUrVuDdd9/FunXrcPDgQdjb2yMyMhJ37941f8se0FrPTNA9/H6JiFo3kxOW1atXY/r06YiOjkbPnj2xbt062NnZVbuVVW/o0KH461//iocffhj+/v6YNWsWAgMD8cMPPwC4d6BJTk7Gm2++iXHjxiEwMBAff/wxrl27hh07djRo4wBApVIBQIMfP0/yVlFRAQDSw/GIiKh1MWkMS0VFBbKzsxEXFyeVKRQKhIeHIysrq87lRVHEt99+i7Nnz2L58uUAgIsXLyI/Px/h4eFSPWdnZ4SFhSErK8vgial6Go0GGo1GmtbfgqvVaqW3Ft/P0dERBQUF0Ol0sLOzgyAINcZXUVGBO3fu1FinLWhp/aDT6VBYWAi1Wg1RFI3uA6bSt9EYbbUkgk6o9vn+Mkux1PdgsB+IMr2pshn6plX+Ppj4fWr/qK9t6v1Axn3cFPuBKW2ZlLBcv34dVVVVcHNzMyh3c3PDmTNnalzu1q1b8PLygkajgVKpxNq1a6W3Defn50ttPNimft6DkpKSkJiYWK18z549sLOzM7qMo6MjysrK+KCyVkqr1aKoqAjHjh1r1HbT09MbtT2584RntTKPq5a/a2vX5V0WXf+9/aCHRWOo0a7m65vW9ftg3veZXtrEb5dvxu/TXI25H5hy9aNZ7hJydHRETk4OSktLkZGRgdjYWPj5+WHo0KFmtRcXF4fY2FhpuqSkBN7e3hgxYgScnJxqXK6qqgqVlZU1jneorKzE/v37MWDAAFhZtd0bqFpaPwiCAJVK1ajJqFarRXp6OiIiIqTLim3B+uPrpc+CToDHVQ/keeVBVFh2jNCLvV+0yHoN9oMD71kkhjoNiq27TgO1yt+HfatNqq4VFUgv7Y4Ih3NQCbomCgrN8n2aqyn2A/0Vkvow6Wjk6uoKpVKJgoICg/KCggK4u7vXuJxCoUC3bt0AAMHBwTh9+jSSkpIwdOhQabmCggKD528UFBQgODjYaHs2NjawsbGpVq5SqWrtxLo6WKvVorKyEg4ODq3nl9IM7Ic/1bVPtTbGEhNRIVo8YbH0d6BSqZr2INUQzdg3rer3wczvUyXomnZfaAH925j7gSntmPQnqbW1NUJCQpCRkSGV6XQ6ZGRkoH///vVuR6fTSWNQfH194e7ubtBmSUkJDh48aFKbRERE1HqZfL4/NjYWUVFRCA0NRd++fZGcnIyysjJER0cDAKZOnQovLy8kJSUBuDfeJDQ0VHp53q5du/DJJ5/ggw8+AHDvdP7s2bOxZMkSBAQEwNfXFwsWLICnp6f0BmIiIiJq20xOWCZOnIiioiIsXLgQ+fn5CA4ORlpamjRo9tKlSwZjCcrKyjBjxgxcuXIFtra26NGjB/773/9i4sSJUp25c+eirKwML730EoqLizFw4ECkpaVBrVY3wiYSERFRS2fWiMqYmBjExMQYnffgm4CXLFmCJUuW1NqeIAhYtGgRFi1aZE44RERE1MrxHl8iIiKSPSYsREREJHtMWIiIiEj2mLAQERGR7DFhISIiItljwkJERESyx4SFiIiIZI8JCxEREckeExYiIiKSPSYsREREJHtMWIiIiEj2zHqXEFFr8Xb6OaPlglgFXwBr9v4CUVA2a0yvRnRv1vUREbUEPMNCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0rSwdARIbeTj9nsXUfLrkhfbaCAuOtPC0WCxHR/XiGhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9sxKWNasWQMfHx+o1WqEhYXh0KFDNdb96KOPMGjQILRv3x7t27dHeHh4tfrTpk2DIAgGPyNHjjQnNCIiImqFTE5YUlNTERsbi/j4eBw+fBhBQUGIjIxEYWGh0fqZmZmYPHky9u7di6ysLHh7e2PEiBG4evWqQb2RI0ciLy9P+vn000/N2yIiIiJqdUxOWFavXo3p06cjOjoaPXv2xLp162BnZ4cNGzYYrb9lyxbMmDEDwcHB6NGjB9avXw+dToeMjAyDejY2NnB3d5d+2rdvb94WERERUatj0oPjKioqkJ2djbi4OKlMoVAgPDwcWVlZ9WqjvLwcWq0WLi4uBuWZmZno1KkT2rdvj+HDh2PJkiXo0KGD0TY0Gg00Go00XVJSAgDQarXQarWmbJIB/bINaaM1aEv9IIhVtZbXNL+1srrvbxj9Z0EnWCociaX2RYPfBVGmQ/6aoW9a5f8JJn6f2j/qa5t6P5BxHzfFfmBKW4IoimJ9K1+7dg1eXl7Yv38/+vfvL5XPnTsX3333HQ4ePFhnGzNmzMDu3btx8uRJqNVqAMDWrVthZ2cHX19fXLhwAfPnz4eDgwOysrKgVCqrtZGQkIDExMRq5SkpKbCzs6vv5hAREZEFlZeX47nnnsOtW7fg5ORUa91mfTT/smXLsHXrVmRmZkrJCgBMmjRJ+ty7d28EBgbC398fmZmZeOKJJ6q1ExcXh9jYWGm6pKREGhtT1wbXRqvVIj09HREREVCpVGa309K1pX5Ys/cXo+WCWAWfuxeQq/aHKFRPmluro7e3SZ+toMAYq57I88qDqKj33zVN4sXeL1pkvQa/Cwfes0gMdRoUW3edBmqV/yfsW21Sda2oQHppd0Q4nINK0DVRUPKm7fePRt8P9FdI6sOkhMXV1RVKpRIFBQUG5QUFBXB3d6912ZUrV2LZsmX45ptvEBgYWGtdPz8/uLq64pdffjGasNjY2MDGxqZauUqlapRObKx2Wrq20A91JSOioGxTCUslqv9HLCpEiycslt4PVSqVfA9Szdg3rer/BDO/T5Wgk+++0NT++O4bcz8wpR2TLsZZW1sjJCTEYMCsfgDt/ZeIHrRixQosXrwYaWlpCA0NrXM9V65cwY0bN+Dh4WFKeERERNRKmTx6KDY2Fh999BE2b96M06dP45VXXkFZWRmio6MBAFOnTjUYlLt8+XIsWLAAGzZsgI+PD/Lz85Gfn4/S0lIAQGlpKebMmYMDBw4gNzcXGRkZGDduHLp164bIyMhG2kwiIiJqyUwewzJx4kQUFRVh4cKFyM/PR3BwMNLS0uDm5gYAuHTpEhSKP/OgDz74ABUVFXjmmWcM2omPj0dCQgKUSiWOHTuGzZs3o7i4GJ6enhgxYgQWL15s9LIPERERtT1mDbqNiYlBTEyM0XmZmZkG07m5ubW2ZWtri927d5sTBhEREbURMn2wABEREdGfmLAQERGR7DFhISIiItljwkJERESyx4SFiIiIZI8JCxEREckeExYiIiKSPSYsREREJHtMWIiIiEj2mLAQERGR7DFhISIiItljwkJERESyx4SFiIiIZI8JCxEREckeExYiIiKSPSYsREREJHtMWIiIiEj2mLAQERGR7DFhISIiItmzsnQA1LqszVlr6RCqmRE8w9IhUCu0tviYpUO4pxl+5wSdAE94Yv3x9RAVosnL83eQGgPPsBAREZHsMWEhIiIi2WPCQkRERLLHhIWIiIhkjwkLERERyR4TFiIiIpI93tZMzSrrwo1mX6em6Fyzr7M1OXTxJiqhs2gM93+Hr0Z0t2AkRGQpPMNCREREsseEhYiIiGSPl4SIiKhpXNx379/fb1k2DmoVeIaFiIiIZI8JCxEREckeExYiIiKSPSYsREREJHtMWIiIiEj2mLAQERGR7DFhISIiItljwkJERESyx4SFiIiIZM+shGXNmjXw8fGBWq1GWFgYDh06VGPdjz76CIMGDUL79u3Rvn17hIeHV6sviiIWLlwIDw8P2NraIjw8HOfPnzcnNCIiImqFTE5YUlNTERsbi/j4eBw+fBhBQUGIjIxEYWGh0fqZmZmYPHky9u7di6ysLHh7e2PEiBG4evWqVGfFihV49913sW7dOhw8eBD29vaIjIzE3bt3zd8yIiIiajVMfpfQ6tWrMX36dERHRwMA1q1bh6+//hobNmzAvHnzqtXfsmWLwfT69evx5ZdfIiMjA1OnToUoikhOTsabb76JcePGAQA+/vhjuLm5YceOHZg0aVK1NjUaDTQajTRdUlICANBqtdBqtaZukkS/bEPaaA0a0g+CTqh1vpUFrkIKYpXZy5izbEt2//ej/2yJ7+xB938Pzfn7afC7IBr2gyCXV7HV8TvXGPS/13X9fld3r4+0ouX3oYbSb0Nr2BZzNcUx0pS2BFEUxfpWrqiogJ2dHb744guMHz9eKo+KikJxcTG++uqrOtu4ffs2OnXqhM8//xxjxozBr7/+Cn9/fxw5cgTBwcFSvSFDhiA4OBjvvPNOtTYSEhKQmJhYrTwlJQV2dnb13RwiIiKyoPLycjz33HO4desWnJycaq1r0p8I169fR1VVFdzc3AzK3dzccObMmXq18frrr8PT0xPh4eEAgPz8fKmNB9vUz3tQXFwcYmNjpemSkhLpUlNdG1wbrVaL9PR0REREQKVSmd1OS9eQflh/fH2t8w9dvNmQ0MwS5PiUycsIYhV87l5ArtofoqBsgqjk6ejtbdJnKygwxqondlaeQiV0FozK8DucOaxbs63X4HfhwHsG89bfOtFscdSq64AmX4WgE+Bx1QN5XnkQFfX+Gxf4bT8A4EXnXk0UWfPRigqkl3ZHhMM5qATL/j5YirbfPxr9GKm/QlIfzXpOc9myZdi6dSsyMzOhVqvNbsfGxgY2NjbVylUqVaN0YmO109KZ0w91/WdmiQNfQxIOUVC2qYTF2PdTCZ3FE5b7vwNL/G6qVKpqBykRlc0eh1GmJBANJCpE0xKWP/qoNR3gVYKuVW2PSf743WvMY6Qp7Zh0Mc7V1RVKpRIFBQUG5QUFBXB3d6912ZUrV2LZsmXYs2cPAgMDpXL9cua0SURERG2DSQmLtbU1QkJCkJGRIZXpdDpkZGSgf//+NS63YsUKLF68GGlpaQgNDTWY5+vrC3d3d4M2S0pKcPDgwVrbJCIiorbD5EtCsbGxiIqKQmhoKPr27Yvk5GSUlZVJdw1NnToVXl5eSEpKAgAsX74cCxcuREpKCnx8fKRxKQ4ODnBwcIAgCJg9ezaWLFmCgIAA+Pr6YsGCBfD09DQY2EtERERtl8kJy8SJE1FUVISFCxciPz8fwcHBSEtLkwbNXrp0CQrFnyduPvjgA1RUVOCZZ54xaCc+Ph4JCQkAgLlz56KsrAwvvfQSiouLMXDgQKSlpTVonAsRERG1HmYNuo2JiUFMTIzReZmZmQbTubm5dbYnCAIWLVqERYsWmRMOERERtXJt9wk4RERE1GIwYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJnllvayYiak6HS1Klz2tzOjTbegWdAE94Yv3x9RCLjzXbeomoOp5hISIiItljwkJERESyx4SFiIiIZI8JCxEREckeExYiIiKSPd4lRETUAJeL71h0/Vcu3GjydVhBgfFWnjh08SYqoav3cp1L/uibdk0TF7UtPMNCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGTPrIRlzZo18PHxgVqtRlhYGA4dOlRj3ZMnT+Lpp5+Gj48PBEFAcnJytToJCQkQBMHgp0ePHuaERkRERK2QyQlLamoqYmNjER8fj8OHDyMoKAiRkZEoLCw0Wr+8vBx+fn5YtmwZ3N3da2z3kUceQV5envTzww8/mBoaERERtVJWpi6wevVqTJ8+HdHR0QCAdevW4euvv8aGDRswb968avUfe+wxPPbYYwBgdL4UiJVVrQlNS/B2+jlLh1DNqxHdLR0CERFRg5mUsFRUVCA7OxtxcXFSmUKhQHh4OLKyshoUyPnz5+Hp6Qm1Wo3+/fsjKSkJXbp0MVpXo9FAo9FI0yUlJQAArVYLrVZrdgz6Zc1tQxCrzF53UzFnWxrSD4JOqHW+lQWGTZnzveiXkeN32pTu/370ny3xndWmrn2sKdZ171/j/10qoWq2eIxpju/H3H1B3zdaUV77kDn029AatsVcDT1G1tZmfQiiKIr1rXzt2jV4eXlh//796N+/v1Q+d+5cfPfddzh48GCty/v4+GD27NmYPXu2Qfn//vc/lJaW4qGHHkJeXh4SExNx9epVnDhxAo6OjtXaSUhIQGJiYrXylJQU2NnZ1XdziIiIyILKy8vx3HPP4datW3Bycqq1rsmXhJrCqFGjpM+BgYEICwtD165d8dlnn+GFF16oVj8uLg6xsbHSdElJCby9vTFixIg6N7g2Wq0W6enpiIiIgEpl+l9Na/b+Yva6m8rMYd1MXqYh/bD++Ppa5x+6eNPkeBoqyPEpk5cRxCr43L2AXLU/REHZBFHJ09Hb26TPVlBgjFVP7Kw8hUroLBiVob6+Ls22LkEnwOOqB/K88iBe/tFonavFd5stHqPrdwpu8nWYuy94leQAAOK7hjZRZM1HKyqQXtodEQ7noBLk8/vQnLT9/tGgY6Qx+isk9WFSwuLq6gqlUomCggKD8oKCgkYdf9KuXTt0794dv/xiPAGwsbGBjY1NtXKVStUonWhuO3I8sDWkP8zpB1FR+wk7Sxz4GvK9iIJSlt9rUzH2/VRCJ6uEpa59rKnWKaLS6LwqNN7pcXM053dj6r6g75vWdIBXCbpWtT0m+eN40FjHWn1b9WXSxThra2uEhIQgIyNDKtPpdMjIyDC4RNRQpaWluHDhAjw8PBqtTSIiImq5TL4kFBsbi6ioKISGhqJv375ITk5GWVmZdNfQ1KlT4eXlhaSkJAD3BuqeOnVK+nz16lXk5OTAwcEB3brdu1zx2muvYezYsejatSuuXbuG+Ph4KJVKTJ48ubG2k4iIiFowkxOWiRMnoqioCAsXLkR+fj6Cg4ORlpYGNzc3AMClS5egUPx54ubatWt49NFHpemVK1di5cqVGDJkCDIzMwEAV65cweTJk3Hjxg107NgRAwcOxIEDB9CxY8cGbh4RERG1BmYNuo2JiUFMTIzRefokRM/Hxwd13Yi0detWc8IgIiKiNkIWdwkRmatzSXaddfoV3zK5XZ2gxPUOA/DYlU1QtMJnsRzo8pKlQyAiMknbfQIOERERtRhMWIiIiEj2mLAQERGR7DFhISIiItljwkJERESyx4SFiIiIZI8JCxEREcken8NCrd7/pzD9LdpKqPAYBuB/wq+oEprm5XZP6kx/kzYRUVvFMyxEREQke0xYiIiISPaYsBAREZHscQwLERE1qaxfb1g6BAP9/TpYOgQyA8+wEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkexZWToAorbq/1P8YrF1XylJtdi6qe2x5L5uzJHiq5jRLtDSYZCJeIaFiIiIZI8JCxEREckeExYiIiKSPY5hIaIWJevCjWZblxUUGG/liUMXb8K95E6zrZeIquMZFiIiIpI9JixEREQke0xYiIiISPbMSljWrFkDHx8fqNVqhIWF4dChQzXWPXnyJJ5++mn4+PhAEAQkJyc3uE0iIiJqW0xOWFJTUxEbG4v4+HgcPnwYQUFBiIyMRGFhodH65eXl8PPzw7Jly+Du7t4obRIREVHbYnLCsnr1akyfPh3R0dHo2bMn1q1bBzs7O2zYsMFo/cceewxvvfUWJk2aBBsbm0Zpk4iIiNoWk25rrqioQHZ2NuLi4qQyhUKB8PBwZGVlmRWAOW1qNBpoNBppuqSkBACg1Wqh1WrNikO//P3/mkoQq8xed1MxZ1sa0g+CTqh1vlUjD5tSQtWo7ekp/mhX0UTtW1p9vgd9ncb+zlqS+/ugqfa1hmqO78fcfUGufSbAClrRtG3R1zd1udakocfI2tqsD5MSluvXr6Oqqgpubm4G5W5ubjhz5owpTTWozaSkJCQmJlYr37NnD+zs7MyK437p6elmLefb4DU3vl27zpm9rDn94AnPWuePt6p9vslcejVuew8IcZnYpO1bymMm1B1j1bPJ4mgpxlj1BFzk2Q+mfJcNZfK+0MS/nw2x67Z5y6WXdm/cQFqSP44J5h4jjSkvL6933Rb54Li4uDjExsZK0yUlJfD29saIESPg5ORkdrtarRbp6emIiIiASmX6XwZr9srrBV8AMHNYN5OXaUg/rD++vtb5hy7eNDme2niV5DRqe3oKqBDiMhHZN1OhQ+P9NSEXV52C66xjBQXGWPXEzspTqISu6YOSofv7wK3ksKXDMao+32VDmbsvNNXvZ0N5tVPjRWfTkimtqEB6aXdEOJyDSmibvw/afv9o0DHSGP0VkvowKWFxdXWFUqlEQUGBQXlBQUGNA2qbok0bGxuj42FUKlWjdKK57YiCssHrbmwN6Q9z+kFUiLXOb+wDX1UTJxM6aJt8HZZgyvdQCV2bTVj0KqGT7X7QnN+NqfuCXPtMhJXZSYdK0LXZhAV/HA8a61irb6u+TLoYZ21tjZCQEGRkZEhlOp0OGRkZ6N+/vylNNWmbRERE1LqYfEkoNjYWUVFRCA0NRd++fZGcnIyysjJER0cDAKZOnQovLy8kJSUBuDeo9tSpU9Lnq1evIicnBw4ODujWrVu92iQiIqK2zeSEZeLEiSgqKsLChQuRn5+P4OBgpKWlSYNmL126BIXizxM3165dw6OPPipNr1y5EitXrsSQIUOQmZlZrzaJiIiobTNr0G1MTAxiYmKMztMnIXo+Pj4QxdrHNdTVJhEREbVtbfeGciIiImoxmLAQERGR7DFhISIiItlrkQ+Oa25rc9bWq97hkhtNHInp1uZ0MHkZQSfAE55Yf3x9nc9VISIiag5MWIhINjqXZFs6BANKqACXXvAqyYH83hR2T3P0mWE/yPNhcNT68ZIQERERyR7PsLRyWRdMv0xlBQXGW3ni0MWbbf6R7EREJA88w0JERESyx4SFiIiIZI8JCxEREckeExYiIiKSPSYsREREJHtMWIiIiEj2mLAQERGR7DFhISIiItljwkJERESyx4SFiIiIZI8JCxEREckeExYiIiKSPSYsREREJHtMWIiIiEj2rCwdABE1v84l2XXWUUIFuPSCV0kOqqBthqiIiGrGMyxEREQke0xYiIiISPaYsBAREZHsMWEhIiIi2WPCQkRERLLHhIWIiIhkjwkLERERyR4TFiIiIpI9JixEREQke0xYiIiISPaYsBAREZHsMWEhIiIi2WPCQkRERLLHhIWIiIhkjwkLERERyR4TFiIiIpI9sxKWNWvWwMfHB2q1GmFhYTh06FCt9T///HP06NEDarUavXv3xq5duwzmT5s2DYIgGPyMHDnSnNCIiIioFbIydYHU1FTExsZi3bp1CAsLQ3JyMiIjI3H27Fl06tSpWv39+/dj8uTJSEpKwpgxY5CSkoLx48fj8OHD6NWrl1Rv5MiR2LhxozRtY2Nj5iZRU+hckm3pEIiIqA0z+QzL6tWrMX36dERHR6Nnz55Yt24d7OzssGHDBqP133nnHYwcORJz5szBww8/jMWLF6NPnz54//33DerZ2NjA3d1d+mnfvr15W0REREStjklnWCoqKpCdnY24uDipTKFQIDw8HFlZWUaXycrKQmxsrEFZZGQkduzYYVCWmZmJTp06oX379hg+fDiWLFmCDh06GG1To9FAo9FI0yUlJQAArVYLrVZryiYZ0C/7YBuCTqjX8latZEiQfjvu3x4lVJYKxyIUf2yvoo1t9/3YB+wDvdbWDwKsoBVN+/9aX9/U5VqTmo6RjdFmfZiUsFy/fh1VVVVwc3MzKHdzc8OZM2eMLpOfn2+0fn5+vjQ9cuRIPPXUU/D19cWFCxcwf/58jBo1CllZWVAqldXaTEpKQmJiYrXyPXv2wM7OzpRNMio9Pd1g2hOe9VpuvFX96rUUY6x6/jnh0qvmiq1YiMtES4dgcewD9oFea+qHXbfNWy69tHvjBtKS/HFsfPAY2RDl5eX1rmvyGJamMGnSJOlz7969ERgYCH9/f2RmZuKJJ56oVj8uLs7grE1JSQm8vb0xYsQIODk5mR2HVqtFeno6IiIioFL9+ZfE+uPr67X8oYs3zV63nFhBgTFWPbGz8hQqoQMAeJXkWDaoZqaACiEuE5F9MxU6NN5fEy0J+4B9oNfa+sGrnRovOpv2R5hWVCC9tDsiHM5BJeiaKDJ50/b7h9FjZEPor5DUh0kJi6urK5RKJQoKCgzKCwoK4O7ubnQZd3d3k+oDgJ+fH1xdXfHLL78YTVhsbGyMDspVqVSN0okPtiMqxHotpz+4txaV0EnbVNUK/pMyhw7aNrvteuwD9oFea+kHEVZmJx0qQddmExb8cVxsrGOtvq36MulinLW1NUJCQpCRkSGV6XQ6ZGRkoH///kaX6d+/v0F94N7ppJrqA8CVK1dw48YNeHh4mBIeERERtVImjx6KjY3FRx99hM2bN+P06dN45ZVXUFZWhujoaADA1KlTDQblzpo1C2lpaVi1ahXOnDmDhIQE/Pzzz4iJiQEAlJaWYs6cOThw4AByc3ORkZGBcePGoVu3boiMjGykzSQiIqKWzOQxLBMnTkRRUREWLlyI/Px8BAcHIy0tTRpYe+nSJSgUf+ZBAwYMQEpKCt58803Mnz8fAQEB2LFjh/QMFqVSiWPHjmHz5s0oLi6Gp6cnRowYgcWLF/NZLERERATAzEG3MTEx0hmSB2VmZlYre/bZZ/Hss88arW9ra4vdu3ebEwYRERG1EW33hnIiIiJqMZiwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJnlkJy5o1a+Dj4wO1Wo2wsDAcOnSo1vqff/45evToAbVajd69e2PXrl0G80VRxMKFC+Hh4QFbW1uEh4fj/Pnz5oRGRERErZDJCUtqaipiY2MRHx+Pw4cPIygoCJGRkSgsLDRaf//+/Zg8eTJeeOEFHDlyBOPHj8f48eNx4sQJqc6KFSvw7rvvYt26dTh48CDs7e0RGRmJu3fvmr9lRERE1GqYnLCsXr0a06dPR3R0NHr27Il169bBzs4OGzZsMFr/nXfewciRIzFnzhw8/PDDWLx4Mfr06YP3338fwL2zK8nJyXjzzTcxbtw4BAYG4uOPP8a1a9ewY8eOBm0cERERtQ5WplSuqKhAdnY24uLipDKFQoHw8HBkZWUZXSYrKwuxsbEGZZGRkVIycvHiReTn5yM8PFya7+zsjLCwMGRlZWHSpEnV2tRoNNBoNNL0rVu3AAA3b96EVqs1ZZMMaLValJeX48aNG1CpVFL53ZL6nenRlVeYvW450UGBcqty6CoroIMOAFB5x8JBNTMdgPLycmjv4I8eaHvYB+wDvdbWD3etdbhhZdr/11pRce/4IFRAJbSGXjCd9sYNo8fIhrh9+zaAeycv6mJSwnL9+nVUVVXBzc3NoNzNzQ1nzpwxukx+fr7R+vn5+dJ8fVlNdR6UlJSExMTEauW+vr712xCq08eWDkAWvrB0ADLAPmAf6LWufviXpQNokRKarOXbt2/D2dm51jomJSxyERcXZ3DWRqfT4ebNm+jQoQMEQTC73ZKSEnh7e+Py5ctwcnJqjFBbJPYD+wBgHwDsAz32A/sAaJo+EEURt2/fhqenZ511TUpYXF1doVQqUVBQYFBeUFAAd3d3o8u4u7vXWl//b0FBATw8PAzqBAcHG23TxsYGNjY2BmXt2rUzZVNq5eTk1GZ3yPuxH9gHAPsAYB/osR/YB0Dj90FdZ1b0TBp0a21tjZCQEGRkZEhlOp0OGRkZ6N+/v9Fl+vfvb1AfANLT06X6vr6+cHd3N6hTUlKCgwcP1tgmERERtS0mXxKKjY1FVFQUQkND0bdvXyQnJ6OsrAzR0dEAgKlTp8LLywtJSUkAgFmzZmHIkCFYtWoVRo8eja1bt+Lnn3/Gv//9bwCAIAiYPXs2lixZgoCAAPj6+mLBggXw9PTE+PHjG29LiYiIqMUyOWGZOHEiioqKsHDhQuTn5yM4OBhpaWnSoNlLly5BofjzxM2AAQOQkpKCN998E/Pnz0dAQAB27NiBXr16SXXmzp2LsrIyvPTSSyguLsbAgQORlpYGtVrdCJtYfzY2NoiPj692uamtYT+wDwD2AcA+0GM/sA8Ay/eBINbnXiIiIiIiC+K7hIiIiEj2mLAQERGR7DFhISIiItljwkJERESyx4SFiIiIZI8JSy2efPJJdOnSBWq1Gh4eHvjb3/6Ga9euWTqsZpObm4sXXngBvr6+sLW1hb+/P+Lj41FR0Tpe8lhfS5cuxYABA2BnZ9eoT1SWuzVr1sDHxwdqtRphYWE4dOiQpUNqVt9//z3Gjh0LT09PCILQ5t4en5SUhMceewyOjo7o1KkTxo8fj7Nnz1o6rGb3wQcfIDAwUHq6a//+/fG///3P0mFZ1LJly6RnqDUnJiy1GDZsGD777DOcPXsWX375JS5cuIBnnnnG0mE1mzNnzkCn0+HDDz/EyZMn8fbbb2PdunWYP3++pUNrVhUVFXj22WfxyiuvWDqUZpOamorY2FjEx8fj8OHDCAoKQmRkJAoLCy0dWrMpKytDUFAQ1qxZY+lQLOK7777DzJkzceDAAaSnp0Or1WLEiBEoKyuzdGjNqnPnzli2bBmys7Px888/Y/jw4Rg3bhxOnjxp6dAs4qeffsKHH36IwMDA5l+5SPX21VdfiYIgiBUVFZYOxWJWrFgh+vr6WjoMi9i4caPo7Oxs6TCaRd++fcWZM2dK01VVVaKnp6eYlJRkwagsB4C4fft2S4dhUYWFhSIA8bvvvrN0KBbXvn17cf369ZYOo9ndvn1bDAgIENPT08UhQ4aIs2bNatb18wxLPd28eRNbtmzBgAEDoFKpLB2Oxdy6dQsuLi6WDoOaUEVFBbKzsxEeHi6VKRQKhIeHIysry4KRkSXdunULANr0739VVRW2bt2KsrKyNvmuu5kzZ2L06NEG/zc0JyYsdXj99ddhb2+PDh064NKlS/jqq68sHZLF/PLLL3jvvffw97//3dKhUBO6fv06qqqqpNdt6Lm5uSE/P99CUZEl6XQ6zJ49G48//rjBa1XaiuPHj8PBwQE2NjZ4+eWXsX37dvTs2dPSYTWrrVu34vDhw9J7Ai2hzSUs8+bNgyAItf6cOXNGqj9nzhwcOXIEe/bsgVKpxNSpUyG28LcZmNoHAHD16lWMHDkSzz77LKZPn26hyBuPOX1A1FbNnDkTJ06cwNatWy0dikU89NBDyMnJwcGDB/HKK68gKioKp06dsnRYzeby5cuYNWsWtmzZ0uzv+Ltfm3uXUFFREW7cuFFrHT8/P1hbW1crv3LlCry9vbF///4WfTrQ1D64du0ahg4din79+mHTpk0GL7dsqczZDzZt2oTZs2ejuLi4iaOzrIqKCtjZ2eGLL74weGN6VFQUiouL2+RZRkEQsH379jb5BvmYmBh89dVX+P777+Hr62vpcGQhPDwc/v7++PDDDy0dSrPYsWMH/vrXv0KpVEplVVVVEAQBCoUCGo3GYF5TMfltzS1dx44d0bFjR7OW1el0AACNRtOYITU7U/rg6tWrGDZsGEJCQrBx48ZWkawADdsPWjtra2uEhIQgIyNDOkDrdDpkZGQgJibGssFRsxFFEf/4xz+wfft2ZGZmMlm5j06na/HHAVM88cQTOH78uEFZdHQ0evTogddff71ZkhWgDSYs9XXw4EH89NNPGDhwINq3b48LFy5gwYIF8Pf3b9FnV0xx9epVDB06FF27dsXKlStRVFQkzXN3d7dgZM3r0qVLuHnzJi5duoSqqirk5OQAALp16wYHBwfLBtdEYmNjERUVhdDQUPTt2xfJyckoKytDdHS0pUNrNqWlpfjll1+k6YsXLyInJwcuLi7o0qWLBSNrHjNnzkRKSgq++uorODo6SuOXnJ2dYWtra+Homk9cXBxGjRqFLl264Pbt20hJSUFmZiZ2795t6dCajaOjY7WxS/qxnc06pqlZ70lqQY4dOyYOGzZMdHFxEW1sbEQfHx/x5ZdfFq9cuWLp0JrNxo0bRQBGf9qSqKgoo32wd+9eS4fWpN577z2xS5cuorW1tdi3b1/xwIEDlg6pWe3du9fo9x4VFWXp0JpFTb/7GzdutHRozer5558Xu3btKlpbW4sdO3YUn3jiCXHPnj2WDsviLHFbc5sbw0JEREQtT+sYkEBEREStGhMWIiIikj0mLERERCR7TFiIiIhI9piwEBERkewxYSEiIiLZY8JCREREsseEhYiIiGSPCQsRERHJHhMWIiIikj0mLERERCR7/z9HhY5nYwKkDgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "for ctrl_s in ctrl_str:\n", + " plt.hist(\n", + " [r for r, t in zip(logs[\"env/reward_dist\"], task_list) if t == ctrl_s],\n", + " density=True,\n", + " alpha=0.5,\n", + " label=ctrl_s,\n", + " )\n", + "plt.legend(loc=\"best\")\n", + "plt.title(\"reward distribution\")\n", + "plt.grid(True)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Save model\n", + "Finally, we save the model to disk for later usage." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gpt2_model.save_pretrained(\"gpt2-imdb-ctrl\")\n", + "gpt2_tokenizer.save_pretrained(\"gpt2-imdb-ctrl\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "trl", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + }, + "vscode": { + "interpreter": { + "hash": "d2cfb53525227c89f8d14fa784301fa46c451cc9223d94ccce9e17956835eea2" + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/testbed/huggingface__trl/examples/notebooks/gpt2-sentiment.ipynb b/testbed/huggingface__trl/examples/notebooks/gpt2-sentiment.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..95f625f4f0d867002d530a3b057791ffa304d2c5 --- /dev/null +++ b/testbed/huggingface__trl/examples/notebooks/gpt2-sentiment.ipynb @@ -0,0 +1,861 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tune GPT2 to generate positive reviews\n", + "> Optimise GPT2 to produce positive IMDB movie reviews using a BERT sentiment classifier as a reward function." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\n", + "

Figure: Experiment setup to tune GPT2. The yellow arrows are outside the scope of this notebook, but the trained models are available through Hugging Face.

\n", + "
\n", + "\n", + "\n", + "In this notebook we fine-tune GPT2 (small) to generate positive movie reviews based on the IMDB dataset. The model gets the start of a real review and is tasked to produce positive continuations. To reward positive continuations we use a BERT classifier to analyse the sentiment of the produced sentences and use the classifier's outputs as rewards signals for PPO training." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup experiment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Import dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install transformers trl wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from tqdm import tqdm\n", + "import pandas as pd\n", + "\n", + "tqdm.pandas()\n", + "\n", + "from transformers import pipeline, AutoTokenizer\n", + "from datasets import load_dataset\n", + "\n", + "from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead\n", + "from trl.core import LengthSampler" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = PPOConfig(\n", + " model_name=\"lvwerra/gpt2-imdb\",\n", + " learning_rate=1.41e-5,\n", + " log_with=\"wandb\",\n", + ")\n", + "\n", + "sent_kwargs = {\"top_k\": None, \"function_to_apply\": \"none\", \"batch_size\": 16}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import wandb\n", + "\n", + "wandb.init()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see that we load a GPT2 model called `gpt2_imdb`. This model was additionally fine-tuned on the IMDB dataset for 1 epoch with the huggingface [script](https://github.com/huggingface/transformers/blob/main/examples/legacy/run_language_modeling.py) (no special settings). The other parameters are mostly taken from the original paper [\"Fine-Tuning Language Models from Human Preferences\"](\n", + "https://huggingface.co/papers/1909.08593). This model as well as the BERT model is available in the Huggingface model zoo [here](https://huggingface.co/models). The following code should automatically download the models." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load data and models" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load IMDB dataset\n", + "The IMDB dataset contains 50k movie review annotated with \"positive\"/\"negative\" feedback indicating the sentiment. We load the IMDB dataset into a DataFrame and filter for comments that are at least 200 characters. Then we tokenize each text and cut it to random size with the `LengthSampler`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def build_dataset(\n", + " config,\n", + " dataset_name=\"stanfordnlp/imdb\",\n", + " input_min_text_length=2,\n", + " input_max_text_length=8,\n", + "):\n", + " \"\"\"\n", + " Build dataset for training. This builds the dataset from `load_dataset`, one should\n", + " customize this function to train the model on its own dataset.\n", + "\n", + " Args:\n", + " dataset_name (`str`):\n", + " The name of the dataset to be loaded.\n", + "\n", + " Returns:\n", + " dataloader (`torch.utils.data.DataLoader`):\n", + " The dataloader for the dataset.\n", + " \"\"\"\n", + " tokenizer = AutoTokenizer.from_pretrained(config.model_name)\n", + " tokenizer.pad_token = tokenizer.eos_token\n", + " # load imdb with datasets\n", + " ds = load_dataset(dataset_name, split=\"train\")\n", + " ds = ds.rename_columns({\"text\": \"review\"})\n", + " ds = ds.filter(lambda x: len(x[\"review\"]) > 200, batched=False)\n", + "\n", + " input_size = LengthSampler(input_min_text_length, input_max_text_length)\n", + "\n", + " def tokenize(sample):\n", + " sample[\"input_ids\"] = tokenizer.encode(sample[\"review\"])[: input_size()]\n", + " sample[\"query\"] = tokenizer.decode(sample[\"input_ids\"])\n", + " return sample\n", + "\n", + " ds = ds.map(tokenize, batched=False)\n", + " ds.set_format(type=\"torch\")\n", + " return ds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dataset = build_dataset(config)\n", + "\n", + "\n", + "def collator(data):\n", + " return dict((key, [d[key] for d in data]) for key in data[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load pre-trained GPT2 language models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We load the GPT2 model with a value head and the tokenizer. We load the model twice; the first model is optimized while the second model serves as a reference to calculate the KL-divergence from the starting point. This serves as an additional reward signal in the PPO training to make sure the optimized model does not deviate too much from the original language model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name)\n", + "ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name)\n", + "tokenizer = AutoTokenizer.from_pretrained(config.model_name)\n", + "\n", + "tokenizer.pad_token = tokenizer.eos_token" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initialize PPOTrainer\n", + "The `PPOTrainer` takes care of device placement and optimization later on:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ppo_trainer = PPOTrainer(\n", + " config, model, ref_model, tokenizer, dataset=dataset, data_collator=collator\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load BERT classifier\n", + "We load a BERT classifier fine-tuned on the IMDB dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "device = ppo_trainer.accelerator.device\n", + "if ppo_trainer.accelerator.num_processes == 1:\n", + " device = 0 if torch.cuda.is_available() else \"cpu\" # to avoid a `pipeline` bug\n", + "sentiment_pipe = pipeline(\n", + " \"sentiment-analysis\", model=\"lvwerra/distilbert-imdb\", device=device\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model outputs are the logits for the negative and positive class. We will use the logits for positive class as a reward signal for the language model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'label': 'NEGATIVE', 'score': 2.335048198699951},\n", + " {'label': 'POSITIVE', 'score': -2.726576328277588}]" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text = \"this movie was really bad!!\"\n", + "sentiment_pipe(text, **sent_kwargs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'label': 'POSITIVE', 'score': 2.557040214538574},\n", + " {'label': 'NEGATIVE', 'score': -2.294790267944336}]" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text = \"this movie was really good!!\"\n", + "sentiment_pipe(text, **sent_kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generation settings\n", + "For the response generation we just use sampling and make sure top-k and nucleus sampling are turned off as well as a minimal length." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gen_kwargs = {\n", + " \"min_length\": -1,\n", + " \"top_k\": 0.0,\n", + " \"top_p\": 1.0,\n", + " \"do_sample\": True,\n", + " \"pad_token_id\": tokenizer.eos_token_id,\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Optimize model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training loop" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The training loop consists of the following main steps:\n", + "1. Get the query responses from the policy network (GPT-2)\n", + "2. Get sentiments for query/responses from BERT\n", + "3. Optimize policy with PPO using the (query, response, reward) triplet\n", + "\n", + "**Training time**\n", + "\n", + "This step takes **~2h** on a V100 GPU with the above specified settings." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output_min_length = 4\n", + "output_max_length = 16\n", + "output_length_sampler = LengthSampler(output_min_length, output_max_length)\n", + "\n", + "\n", + "generation_kwargs = {\n", + " \"min_length\": -1,\n", + " \"top_k\": 0.0,\n", + " \"top_p\": 1.0,\n", + " \"do_sample\": True,\n", + " \"pad_token_id\": tokenizer.eos_token_id,\n", + "}\n", + "\n", + "\n", + "for epoch, batch in enumerate(tqdm(ppo_trainer.dataloader)):\n", + " query_tensors = batch[\"input_ids\"]\n", + "\n", + " #### Get response from gpt2\n", + " response_tensors = []\n", + " for query in query_tensors:\n", + " gen_len = output_length_sampler()\n", + " generation_kwargs[\"max_new_tokens\"] = gen_len\n", + " query_response = ppo_trainer.generate(query, **generation_kwargs).squeeze()\n", + " response_len = len(query_response) - len(query)\n", + " response_tensors.append(query_response[-response_len:])\n", + " batch[\"response\"] = [tokenizer.decode(r.squeeze()) for r in response_tensors]\n", + "\n", + " #### Compute sentiment score\n", + " texts = [q + r for q, r in zip(batch[\"query\"], batch[\"response\"])]\n", + " pipe_outputs = sentiment_pipe(texts, **sent_kwargs)\n", + " positive_scores = [\n", + " item[\"score\"]\n", + " for output in pipe_outputs\n", + " for item in output\n", + " if item[\"label\"] == \"POSITIVE\"\n", + " ]\n", + " rewards = [torch.tensor(score) for score in positive_scores]\n", + "\n", + " #### Run PPO step\n", + " stats = ppo_trainer.step(query_tensors, response_tensors, rewards)\n", + " ppo_trainer.log_stats(stats, batch, rewards)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training progress\n", + "If you are tracking the training progress with Weights&Biases you should see a plot similar to the one below. Check out the interactive sample report on wandb.ai: [link](https://wandb.ai/huggingface/trl/runs/w9l3110g).\n", + "\n", + "
\n", + "\n", + "

Figure: Reward mean and distribution evolution during training.

\n", + "
\n", + "\n", + "One can observe how the model starts to generate more positive outputs after a few optimisation steps.\n", + "\n", + "> Note: Investigating the KL-divergence will probably show that at this point the model has not converged to the target KL-divergence, yet. To get there would require longer training or starting with a higher initial coefficient." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model inspection\n", + "Let's inspect some examples from the IMDB dataset. We can use `ref_model` to compare the tuned model `model` against the model before optimisation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
queryresponse (before)response (after)rewards (before)rewards (after)
0I rented Zero Day4 for my sister. To my surprise, the Wii caug.... It is a pleasure. It is a huge leap 68 years...1.7360682.423731
1The onlydistro of herspecial compliments is the0.1508520.190159
2I've read a fewnews reports about Mr. Mueller's activities b...novels and I never watch this. It has a reall...-1.4179622.831814
3This is the second British Rank film, and I wouldn't be surprised anymore if itthat I have enjoyed, achieving it in both the0.8358762.205628
4A classicclassic.<br /><br />And only this one will ha.... It's a movie with a fine cast. As the beginn...2.1130752.739168
5This has to be one of theworst with the differences being that for thebest thriller films I've seen in recent-2.7053392.730615
6Happy Go Lovely is a waste. Not only are extremelyof time, giving a-2.429504-2.934672
7Wow, I justcan't make fun of itfeek it! This show-2.201666-0.106085
8This movie makes several mistakes.Despite being a great comedic diversion it es...It's cool, wonderful - it held me into a very ...-1.2323802.707638
9Branagh and Fishburne, Drake is playedis a great show. Beautiful0.7768192.808996
10I might have given this movie arating of *11 when I heard that!), but it was...great performance. It was truly a great movie...0.2763802.743328
11Really, really badwith feel like there is no end to the. This movie is incredibly good, with the-2.639503-1.568827
12What another reviewer called lack ofjudgment, connecting into her own harsh obser...suspense. Rogers and Rooney rate this as exce...-1.0797072.696888
13This is simply onemore problem of Steveof the best choice-1.4454362.662699
14\"Perhaps we can arrange a meet-and-greet.<br /><br />Telegwith spent, classic music and dance, and come...0.2584791.876662
15Richard Willaims isnice enough; the little black guy plays quitebeautifully hands on in his own spin, and0.7965082.820259
\n", + "
" + ], + "text/plain": [ + " query \\\n", + "0 I rented Zero Day \n", + "1 The only \n", + "2 I've read a few \n", + "3 This is the second British Rank film \n", + "4 A classic \n", + "5 This has to be one of the \n", + "6 Happy Go Lovely is a waste \n", + "7 Wow, I just \n", + "8 This movie makes several mistakes. \n", + "9 Branagh and Fish \n", + "10 I might have given this movie a \n", + "11 Really, really bad \n", + "12 What another reviewer called lack of \n", + "13 This is simply one \n", + "14 \"Perhaps we can arrange a meet \n", + "15 Richard Willaims is \n", + "\n", + " response (before) \\\n", + "0 4 for my sister. To my surprise, the Wii caug... \n", + "1 distro of her \n", + "2 news reports about Mr. Mueller's activities b... \n", + "3 , and I wouldn't be surprised anymore if it \n", + "4 classic.

And only this one will ha... \n", + "5 worst with the differences being that for the \n", + "6 . Not only are extremely \n", + "7 can't make fun of it \n", + "8 Despite being a great comedic diversion it es... \n", + "9 burne, Drake is played \n", + "10 rating of *11 when I heard that!), but it was... \n", + "11 with feel like there is no end to the \n", + "12 judgment, connecting into her own harsh obser... \n", + "13 more problem of Steve \n", + "14 -and-greet.

Teleg \n", + "15 nice enough; the little black guy plays quite \n", + "\n", + " response (after) rewards (before) \\\n", + "0 . It is a pleasure. It is a huge leap 68 years... 1.736068 \n", + "1 special compliments is the 0.150852 \n", + "2 novels and I never watch this. It has a reall... -1.417962 \n", + "3 that I have enjoyed, achieving it in both the 0.835876 \n", + "4 . It's a movie with a fine cast. As the beginn... 2.113075 \n", + "5 best thriller films I've seen in recent -2.705339 \n", + "6 of time, giving a -2.429504 \n", + "7 feek it! This show -2.201666 \n", + "8 It's cool, wonderful - it held me into a very ... -1.232380 \n", + "9 is a great show. Beautiful 0.776819 \n", + "10 great performance. It was truly a great movie... 0.276380 \n", + "11 . This movie is incredibly good, with the -2.639503 \n", + "12 suspense. Rogers and Rooney rate this as exce... -1.079707 \n", + "13 of the best choice -1.445436 \n", + "14 with spent, classic music and dance, and come... 0.258479 \n", + "15 beautifully hands on in his own spin, and 0.796508 \n", + "\n", + " rewards (after) \n", + "0 2.423731 \n", + "1 0.190159 \n", + "2 2.831814 \n", + "3 2.205628 \n", + "4 2.739168 \n", + "5 2.730615 \n", + "6 -2.934672 \n", + "7 -0.106085 \n", + "8 2.707638 \n", + "9 2.808996 \n", + "10 2.743328 \n", + "11 -1.568827 \n", + "12 2.696888 \n", + "13 2.662699 \n", + "14 1.876662 \n", + "15 2.820259 " + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#### get a batch from the dataset\n", + "bs = 16\n", + "game_data = dict()\n", + "dataset.set_format(\"pandas\")\n", + "df_batch = dataset[:].sample(bs)\n", + "game_data[\"query\"] = df_batch[\"query\"].tolist()\n", + "query_tensors = df_batch[\"input_ids\"].tolist()\n", + "\n", + "response_tensors_ref, response_tensors = [], []\n", + "\n", + "#### get response from gpt2 and gpt2_ref\n", + "for i in range(bs):\n", + " query = torch.tensor(query_tensors[i]).to(device)\n", + "\n", + " gen_len = output_length_sampler()\n", + " query_response = ref_model.generate(\n", + " query.unsqueeze(0), max_new_tokens=gen_len, **gen_kwargs\n", + " ).squeeze()\n", + " response_len = len(query_response) - len(query)\n", + " response_tensors_ref.append(query_response[-response_len:])\n", + "\n", + " query_response = model.generate(\n", + " query.unsqueeze(0), max_new_tokens=gen_len, **gen_kwargs\n", + " ).squeeze()\n", + " response_len = len(query_response) - len(query)\n", + " response_tensors.append(query_response[-response_len:])\n", + "\n", + "#### decode responses\n", + "game_data[\"response (before)\"] = [\n", + " tokenizer.decode(response_tensors_ref[i]) for i in range(bs)\n", + "]\n", + "game_data[\"response (after)\"] = [\n", + " tokenizer.decode(response_tensors[i]) for i in range(bs)\n", + "]\n", + "\n", + "#### sentiment analysis of query/response pairs before/after\n", + "texts = [q + r for q, r in zip(game_data[\"query\"], game_data[\"response (before)\"])]\n", + "pipe_outputs = sentiment_pipe(texts, **sent_kwargs)\n", + "positive_scores = [\n", + " item[\"score\"]\n", + " for output in pipe_outputs\n", + " for item in output\n", + " if item[\"label\"] == \"POSITIVE\"\n", + "]\n", + "game_data[\"rewards (before)\"] = positive_scores\n", + "\n", + "texts = [q + r for q, r in zip(game_data[\"query\"], game_data[\"response (after)\"])]\n", + "pipe_outputs = sentiment_pipe(texts, **sent_kwargs)\n", + "positive_scores = [\n", + " item[\"score\"]\n", + " for output in pipe_outputs\n", + " for item in output\n", + " if item[\"label\"] == \"POSITIVE\"\n", + "]\n", + "game_data[\"rewards (after)\"] = positive_scores\n", + "\n", + "# store results in a dataframe\n", + "df_results = pd.DataFrame(game_data)\n", + "df_results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Looking at the reward mean/median of the generated sequences we observe a significant difference." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "mean:\n" + ] + }, + { + "data": { + "text/plain": [ + "rewards (before) -0.512965\n", + "rewards (after) 1.676750\n", + "dtype: float64" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "median:\n" + ] + }, + { + "data": { + "text/plain": [ + "rewards (before) -0.464427\n", + "rewards (after) 2.679794\n", + "dtype: float64" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "print(\"mean:\")\n", + "display(df_results[[\"rewards (before)\", \"rewards (after)\"]].mean())\n", + "print()\n", + "print(\"median:\")\n", + "display(df_results[[\"rewards (before)\", \"rewards (after)\"]].median())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Save model\n", + "Finally, we save the model and push it to the Hugging Face for later usage." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "('gpt2-imdb-pos-v2/tokenizer_config.json',\n", + " 'gpt2-imdb-pos-v2/special_tokens_map.json',\n", + " 'gpt2-imdb-pos-v2/vocab.json',\n", + " 'gpt2-imdb-pos-v2/merges.txt',\n", + " 'gpt2-imdb-pos-v2/added_tokens.json',\n", + " 'gpt2-imdb-pos-v2/tokenizer.json')" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.save_pretrained(\"gpt2-imdb-pos-v2\", push_to_hub=True)\n", + "tokenizer.save_pretrained(\"gpt2-imdb-pos-v2\", push_to_hub=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + }, + "vscode": { + "interpreter": { + "hash": "4c8ff454cd947027f86954d72bf940c689a97dcc494eb53cfe4813862c6065fe" + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/testbed/huggingface__trl/examples/research_projects/README.md b/testbed/huggingface__trl/examples/research_projects/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b1977e1877ca1d6351cd888b76793a2bad3206d --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/README.md @@ -0,0 +1,7 @@ +# Research projects that use TRL + +Welcome to the research projects folder! Here you can find the scripts used for some research projects that used TRL and maintained by the developers and the community (LM de-toxification, Stack-Llama, etc.). Check out the READMEs in the subfolders for more information! + +- [De-detoxifying language models](https://github.com/huggingface/trl/tree/main/examples/research_projects/toxicity) +- [Stack-Llama](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama) +- [Stack-Llama-2](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama_2) \ No newline at end of file diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/README.md b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..da9f067f20cc73ae14889ec6d40110a9c79598e3 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/README.md @@ -0,0 +1,18 @@ +# RLHF pipeline for the creation of StackLLaMa: a Stack exchange llama-7b model. +There were three main steps to the training process: +1. Supervised fine-tuning of the base llama-7b model to create llama-7b-se: + - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/supervised_finetuning.py --model_path= --streaming --learning_rate 1e-5 --max_steps 5000 --output_dir ./llama-se` +2. Reward modeling using dialog pairs from the SE dataset using the llama-7b-se to create llama-7b-se-rm: + - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/reward_modeling.py --model_name=` +3. RL fine-tuning of llama-7b-se with the llama-7b-se-rm reward model: + - `accelerate launch --multi_gpu --num_machines 1 --num_processes 8 examples/research_projects/stack_llama/scripts/rl_training.py --log_with=wandb --model_name= --reward_model_name= --adafactor=False --tokenizer_name= --save_freq=100 --output_max_length=128 --batch_size=8 --gradient_accumulation_steps=8 --batched_gen=True --ppo_epochs=4 --seed=0 --learning_rate=1.4e-5 --early_stopping=True --output_dir=llama-se-rl-finetune-128-8-8-1.4e-5_adam` + + +LoRA layers were using at all stages to reduce memory requirements. +At each stage the peft adapter layers were merged with the base model, using: +```shell +python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --adapter_model_name=XXX --base_model_name=YYY --output_name=ZZZ +``` +Note that this script requires `peft>=0.3.0`. + +For access to the base llama-7b model, please see Meta's [release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) and [request form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform). diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/merge_peft_adapter.py b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/merge_peft_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..3d21a952573b529b2d90f6067b1773c5ec4d646b --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/merge_peft_adapter.py @@ -0,0 +1,62 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import Optional + +import torch +from peft import PeftConfig, PeftModel +from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser + + +@dataclass +class ScriptArguments: + """ + The input names representing the Adapter and Base model fine-tuned with PEFT, and the output name representing the + merged model. + """ + + adapter_model_name: Optional[str] = field(default=None, metadata={"help": "the adapter name"}) + base_model_name: Optional[str] = field(default=None, metadata={"help": "the base model name"}) + output_name: Optional[str] = field(default=None, metadata={"help": "the merged model name"}) + + +parser = HfArgumentParser(ScriptArguments) +script_args = parser.parse_args_into_dataclasses()[0] +assert script_args.adapter_model_name is not None, "please provide the name of the Adapter you would like to merge" +assert script_args.base_model_name is not None, "please provide the name of the Base model" +assert script_args.output_name is not None, "please provide the output name of the merged model" + +peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name) +if peft_config.task_type == "SEQ_CLS": + # The sequence classification task is used for the reward model in PPO + model = AutoModelForSequenceClassification.from_pretrained( + script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16 + ) +else: + model = AutoModelForCausalLM.from_pretrained( + script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16 + ) + +tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name) + +# Load the PEFT model +model = PeftModel.from_pretrained(model, script_args.adapter_model_name) +model.eval() + +model = model.merge_and_unload() + +model.save_pretrained(f"{script_args.output_name}") +tokenizer.save_pretrained(f"{script_args.output_name}") +model.push_to_hub(f"{script_args.output_name}", use_temp_dir=False) diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/reward_modeling.py b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/reward_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..8402413c03ecc8647f3252f4d384ec5583cbe5f6 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/reward_modeling.py @@ -0,0 +1,324 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Union + +import evaluate +import numpy as np +import torch +import torch.nn as nn +from datasets import load_dataset +from peft import LoraConfig, TaskType, get_peft_model +from transformers import ( + AutoModelForSequenceClassification, + AutoTokenizer, + HfArgumentParser, + PreTrainedTokenizerBase, + Trainer, + TrainerCallback, + TrainingArguments, + set_seed, +) +from transformers.utils import PaddingStrategy + + +# Define and parse arguments. +@dataclass +class ScriptArguments: + """ + These arguments vary depending on how many GPUs you have, what their capacity and features are, and what size model you want to train. + """ + + local_rank: Optional[int] = field(default=-1, metadata={"help": "Used for multi-gpu"}) + resume_from_checkpoint: Optional[bool] = field( + default=False, + metadata={"help": "If you want to resume training where it left off."}, + ) + deepspeed: Optional[str] = field( + default=None, + metadata={ + "help": "Path to deepspeed config if using deepspeed. You may need this if the model that you want to train doesn't fit on a single GPU." + }, + ) + per_device_train_batch_size: Optional[int] = field(default=4) + per_device_eval_batch_size: Optional[int] = field(default=1) + gradient_accumulation_steps: Optional[int] = field(default=1) + learning_rate: Optional[float] = field(default=2e-5) + weight_decay: Optional[float] = field(default=0.001) + model_name: Optional[str] = field( + default="gpt2", + metadata={ + "help": "The model that you want to train from the Hugging Face hub. E.g. gpt2, gpt2-xl, bert, etc." + }, + ) + tokenizer_name: Optional[str] = field( + default=None, + metadata={ + "help": "The tokenizer for your model, if left empty will use the default for your model", + }, + ) + bf16: Optional[bool] = field( + default=True, + metadata={ + "help": "This essentially cuts the training time in half if you want to sacrifice a little precision and have a supported GPU." + }, + ) + num_train_epochs: Optional[int] = field( + default=1, + metadata={"help": "The number of training epochs for the reward model."}, + ) + train_subset: Optional[int] = field( + default=100000, + metadata={"help": "The size of the subset of the training data to use"}, + ) + eval_subset: Optional[int] = field( + default=50000, + metadata={"help": "The size of the subset of the eval data to use"}, + ) + gradient_checkpointing: Optional[bool] = field( + default=False, + metadata={"help": "Enables gradient checkpointing."}, + ) + optim: Optional[str] = field( + default="adamw_hf", + metadata={"help": "The optimizer to use."}, + ) + lr_scheduler_type: Optional[str] = field( + default="linear", + metadata={"help": "The lr scheduler"}, + ) + max_length: Optional[int] = field(default=512) + eval_first_step: Optional[bool] = field( + default=False, + metadata={"help": "Whether to run eval after the first step"}, + ) + seed: Optional[int] = field( + default=0, metadata={"help": "Random seed that will be set at the beginning of training."} + ) + + +parser = HfArgumentParser(ScriptArguments) +script_args = parser.parse_args_into_dataclasses()[0] +set_seed(script_args.seed) +# Load the human stack-exchange-paired dataset for tuning the reward model. +train_dataset = load_dataset( + "lvwerra/stack-exchange-paired", data_dir="data/reward", split="train", verification_mode="no_checks" +) +if script_args.train_subset > 0: + train_dataset = train_dataset.select(range(script_args.train_subset)) +eval_dataset = load_dataset( + "lvwerra/stack-exchange-paired", data_dir="data/evaluation", split="train", verification_mode="no_checks" +) +if script_args.eval_subset > 0: + eval_dataset = eval_dataset.select(range(script_args.eval_subset)) +# Define the training args. Needs to be done before the model is loaded if you are using deepspeed. +model_name_split = script_args.model_name.split("/")[-1] +output_name = ( + f"{model_name_split}_peft_stack-exchange-paired_rmts__{script_args.train_subset}_{script_args.learning_rate}" +) + +training_args = TrainingArguments( + output_dir=output_name, + learning_rate=script_args.learning_rate, + per_device_train_batch_size=script_args.per_device_train_batch_size, + per_device_eval_batch_size=script_args.per_device_eval_batch_size, + num_train_epochs=script_args.num_train_epochs, + weight_decay=script_args.weight_decay, + eval_strategy="steps", + eval_steps=500, + save_strategy="steps", + save_steps=500, + gradient_accumulation_steps=script_args.gradient_accumulation_steps, + gradient_checkpointing=script_args.gradient_checkpointing, + deepspeed=script_args.deepspeed, + local_rank=script_args.local_rank, + remove_unused_columns=False, + label_names=[], + bf16=script_args.bf16, + logging_strategy="steps", + logging_steps=10, + optim=script_args.optim, + lr_scheduler_type=script_args.lr_scheduler_type, + seed=script_args.seed, +) + + +# Load the value-head model and tokenizer. +tokenizer_name = script_args.tokenizer_name if script_args.tokenizer_name is not None else script_args.model_name +tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_auth_token=True) +tokenizer.pad_token = tokenizer.eos_token + + +peft_config = LoraConfig( + task_type=TaskType.SEQ_CLS, + inference_mode=False, + r=8, + lora_alpha=32, + lora_dropout=0.1, +) + +model = AutoModelForSequenceClassification.from_pretrained( + script_args.model_name, num_labels=1, torch_dtype=torch.bfloat16 +) +model = get_peft_model(model, peft_config) +model.print_trainable_parameters() + +# Need to do this for gpt2, because it doesn't have an official pad token. +tokenizer.pad_token = tokenizer.eos_token +model.config.pad_token_id = tokenizer.eos_token_id +model.config.use_cache = not script_args.gradient_checkpointing +num_proc = 24 # Can adjust to be higher if you have more processors. +original_columns = train_dataset.column_names + + +# Turn the dataset into pairs of post + summaries, where text_j is the preferred question + answer and text_k is the other. +# Then tokenize the dataset. +def preprocess_function(examples): + new_examples = { + "input_ids_j": [], + "attention_mask_j": [], + "input_ids_k": [], + "attention_mask_k": [], + } + for question, response_j, response_k in zip(examples["question"], examples["response_j"], examples["response_k"]): + tokenized_j = tokenizer("Question: " + question + "\n\nAnswer: " + response_j, truncation=True) + tokenized_k = tokenizer("Question: " + question + "\n\nAnswer: " + response_k, truncation=True) + + new_examples["input_ids_j"].append(tokenized_j["input_ids"]) + new_examples["attention_mask_j"].append(tokenized_j["attention_mask"]) + new_examples["input_ids_k"].append(tokenized_k["input_ids"]) + new_examples["attention_mask_k"].append(tokenized_k["attention_mask"]) + + return new_examples + + +# preprocess the dataset and filter out QAs that are longer than script_args.max_length +train_dataset = train_dataset.map( + preprocess_function, + batched=True, + num_proc=num_proc, + remove_columns=original_columns, +) +train_dataset = train_dataset.filter( + lambda x: len(x["input_ids_j"]) <= script_args.max_length and len(x["input_ids_k"]) <= script_args.max_length, + num_proc=num_proc, +) + +eval_dataset = eval_dataset.map( + preprocess_function, + batched=True, + num_proc=num_proc, + remove_columns=original_columns, +) +eval_dataset = eval_dataset.filter( + lambda x: len(x["input_ids_j"]) <= script_args.max_length and len(x["input_ids_k"]) <= script_args.max_length, + num_proc=num_proc, +) + + +# We need to define a special data collator that batches the data in our j vs k format. +@dataclass +class RewardDataCollatorWithPadding: + tokenizer: PreTrainedTokenizerBase + padding: Union[bool, str, PaddingStrategy] = True + pad_to_multiple_of: Optional[int] = None + return_tensors: str = "pt" + + def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: + features_j = [] + features_k = [] + for feature in features: + features_j.append( + { + "input_ids": feature["input_ids_j"], + "attention_mask": feature["attention_mask_j"], + } + ) + features_k.append( + { + "input_ids": feature["input_ids_k"], + "attention_mask": feature["attention_mask_k"], + } + ) + batch_j = self.tokenizer.pad( + features_j, + padding=self.padding, + pad_to_multiple_of=self.pad_to_multiple_of, + return_tensors=self.return_tensors, + ) + batch_k = self.tokenizer.pad( + features_k, + padding=self.padding, + pad_to_multiple_of=self.pad_to_multiple_of, + return_tensors=self.return_tensors, + ) + batch = { + "input_ids_j": batch_j["input_ids"], + "attention_mask_j": batch_j["attention_mask"], + "input_ids_k": batch_k["input_ids"], + "attention_mask_k": batch_k["attention_mask"], + "return_loss": True, + } + return batch + + +# Define the metric that we'll use for validation. +accuracy = evaluate.load("accuracy") + + +def compute_metrics(eval_pred): + predictions, _ = eval_pred + # Here, predictions is rewards_j and rewards_k. + # We want to see how much of the time rewards_j > rewards_k. + predictions = np.argmax(predictions, axis=0) + labels = np.zeros(predictions.shape) + return accuracy.compute(predictions=predictions, references=labels) + + +class RewardTrainer(Trainer): + # Define how to compute the reward loss. We use the InstructGPT pairwise logloss: https://huggingface.co/papers/2203.02155 + def compute_loss(self, model, inputs, return_outputs=False): + rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0] + rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0] + loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean() + if return_outputs: + return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k} + return loss + + +# Train the model, woohoo. +trainer = RewardTrainer( + model=model, + args=training_args, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + compute_metrics=compute_metrics, + data_collator=RewardDataCollatorWithPadding(tokenizer=tokenizer), +) + + +if script_args.eval_first_step: + + class EvaluateFirstStepCallback(TrainerCallback): + def on_step_end(self, args, state, control, **kwargs): + if state.global_step == 1: + control.should_evaluate = True + + trainer.add_callback(EvaluateFirstStepCallback()) + +trainer.train(script_args.resume_from_checkpoint) + +print("Saving last checkpoint of the model") +model.save_pretrained(output_name + "_peft_last_checkpoint") diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/rl_training.py b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/rl_training.py new file mode 100644 index 0000000000000000000000000000000000000000..a37cf63ab1067445050d7f3fb864304b28ccdea9 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/rl_training.py @@ -0,0 +1,267 @@ +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass, field +from typing import Optional + +import torch +from accelerate import Accelerator +from datasets import load_dataset +from peft import LoraConfig +from tqdm import tqdm +from transformers import Adafactor, AutoTokenizer, HfArgumentParser, pipeline + +from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, set_seed +from trl.core import LengthSampler + + +tqdm.pandas() + + +@dataclass +class ScriptArguments: + """ + The name of the Casual LM model we wish to fine-tune with PPO + """ + + # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode + # models like gpt-neo* models are more suitable. + model_name: Optional[str] = field(default="", metadata={"help": "the model name"}) + tokenizer_name: Optional[str] = field(default="", metadata={"help": "the tokenizer name"}) + reward_model_name: Optional[str] = field(default="", metadata={"help": "the reward model name"}) + log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) + learning_rate: Optional[float] = field(default=1.41e-5, metadata={"help": "the learning rate"}) + output_max_length: Optional[int] = field(default=128, metadata={"help": "maximum length for generation"}) + mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) + batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) + ppo_epochs: Optional[int] = field(default=4, metadata={"help": "the number of ppo epochs"}) + gradient_accumulation_steps: Optional[int] = field( + default=4, metadata={"help": "the number of gradient accumulation steps"} + ) + adafactor: Optional[bool] = field(default=False, metadata={"help": "whether to use the adafactor optimizer"}) + early_stopping: Optional[bool] = field(default=False, metadata={"help": "whether to early stop"}) + target_kl: Optional[float] = field(default=0.1, metadata={"help": "kl target for early stopping"}) + reward_baseline: Optional[float] = field( + default=0.0, + metadata={"help": "a baseline value that is subtracted from the reward"}, + ) + batched_gen: Optional[bool] = field(default=False, metadata={"help": "whether to use the batched text gen"}) + save_freq: Optional[int] = field(default=None, metadata={"help": "n steps to save the model"}) + output_dir: Optional[str] = field(default="runs/", metadata={"help": "n steps to save the model"}) + seed: Optional[int] = field(default=0, metadata={"help": "the seed"}) + steps: Optional[int] = field(default=20000, metadata={"help": "number of epochs"}) + init_kl_coef: Optional[float] = field( + default=0.2, + metadata={"help": "Initial KL penalty coefficient (used for adaptive and linear control)"}, + ) + + adap_kl_ctrl: Optional[bool] = field(default=True, metadata={"help": "Use adaptive KL control, otherwise linear"}) + load_in_8bit: Optional[bool] = field(default=True, metadata={"help": "whether to load the model in 8bit"}) + + +parser = HfArgumentParser(ScriptArguments) +script_args: ScriptArguments = parser.parse_args_into_dataclasses()[0] +reward_model_name = script_args.reward_model_name +dataset_name = "lvwerra/stack-exchange-paired" +config = PPOConfig( + steps=script_args.steps, + model_name=script_args.model_name, + learning_rate=script_args.learning_rate, + log_with=script_args.log_with, + batch_size=script_args.batch_size, + mini_batch_size=script_args.mini_batch_size, + gradient_accumulation_steps=script_args.gradient_accumulation_steps, + optimize_cuda_cache=True, + early_stopping=script_args.early_stopping, + target_kl=script_args.target_kl, + ppo_epochs=script_args.ppo_epochs, + seed=script_args.seed, + init_kl_coef=script_args.init_kl_coef, + adap_kl_ctrl=script_args.adap_kl_ctrl, +) + +train_dataset = load_dataset( + "lvwerra/stack-exchange-paired", data_dir="data/rl", split="train", verification_mode="no_checks" +) +train_dataset = train_dataset.select(range(100000)) +original_columns = train_dataset.column_names + +# We then define the arguments to pass to the sentiment analysis pipeline. +# We set `return_all_scores` to True to get the sentiment score for each token. +sent_kwargs = { + "return_all_scores": True, + "function_to_apply": "none", + "batch_size": 16, + "truncation": True, +} + +tokenizer = AutoTokenizer.from_pretrained(script_args.tokenizer_name) +# GPT-2 tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token. +# only for this model. + +if getattr(tokenizer, "pad_token", None) is None: + tokenizer.pad_token = tokenizer.eos_token + + +# Below is an example function to build the dataset. In our case, we use the IMDB dataset +# from the `datasets` library. One should customize this function to train the model on +# its own dataset. +def build_dataset( + tokenizer, + dataset_name="lvwerra/stack-exchange-paired", +): + """ + Build dataset for training. This builds the dataset from `load_dataset`, one should + customize this function to train the model on its own dataset. + + Args: + dataset_name (`str`): + The name of the dataset to be loaded. + + Returns: + dataloader (`torch.utils.data.DataLoader`): + The dataloader for the dataset. + """ + + num_proc = 24 + + def preprocess_function(examples): + new_examples = { + "query": [], + "input_ids": [], + } + for question in examples["question"]: + query = "Question: " + question + "\n\nAnswer: " + tokenized_question = tokenizer(query, truncation=True) + new_examples["query"].append(query) + new_examples["input_ids"].append(tokenized_question["input_ids"]) + + return new_examples + + ds = train_dataset.map( + preprocess_function, + batched=True, + num_proc=num_proc, + remove_columns=original_columns, + ) + ds = ds.filter(lambda x: len(x["input_ids"]) < 512, batched=False, num_proc=num_proc) + + ds.set_format(type="torch") + return ds + + +# We retrieve the dataloader by calling the `build_dataset` function. +dataset = build_dataset(tokenizer) + + +def collator(data): + return {key: [d[key] for d in data] for key in data[0]} + + +# set seed before initializing value head for deterministic eval +set_seed(config.seed) + +# Now let's build the model, the reference model, and the tokenizer. +current_device = Accelerator().local_process_index + +lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", +) +model = AutoModelForCausalLMWithValueHead.from_pretrained( + config.model_name, + load_in_8bit=script_args.load_in_8bit, + device_map={"": current_device}, + peft_config=lora_config, +) + +optimizer = None +if script_args.adafactor: + optimizer = Adafactor( + filter(lambda p: p.requires_grad, model.parameters()), + scale_parameter=False, + relative_step=False, + warmup_init=False, + lr=config.learning_rate, + ) +# We then build the PPOTrainer, passing the model, the reference model, the tokenizer +ppo_trainer = PPOTrainer( + config, + model, + ref_model=None, + tokenizer=tokenizer, + dataset=dataset, + data_collator=collator, + optimizer=optimizer, +) + +# We then build the sentiment analysis pipeline using our reward model, passing the +# model name and the sentiment analysis pipeline arguments. Let's also make sure to +# set the device to the same device as the PPOTrainer. +device = ppo_trainer.accelerator.device +if ppo_trainer.accelerator.num_processes == 1: + device = 0 if torch.cuda.is_available() else "cpu" # to avoid a ` pipeline` bug +sentiment_pipe = pipeline( + "sentiment-analysis", + model=reward_model_name, + device_map={"": current_device}, + model_kwargs={"load_in_8bit": script_args.load_in_8bit}, + tokenizer=tokenizer, + return_token_type_ids=False, +) + +if sentiment_pipe.model.config.pad_token_id is None: + sentiment_pipe.model.config.pad_token_id = sentiment_pipe.model.config.eos_token_id +# We then define the arguments to pass to the `generate` function. These arguments +# are passed to the `generate` function of the PPOTrainer, which is a wrapper around +# the `generate` function of the trained model. +generation_kwargs = { + # "min_length": -1, + "top_k": 0.0, + "top_p": 1.0, + "do_sample": True, + "pad_token_id": tokenizer.pad_token_id, + "eos_token_id": 100_000, +} +output_min_length = 32 +output_max_length = script_args.output_max_length +output_length_sampler = LengthSampler(output_min_length, output_max_length) + +for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): + if epoch >= config.total_ppo_epochs: + break + + question_tensors = batch["input_ids"] + + response_tensors = ppo_trainer.generate( + question_tensors, + return_prompt=False, + length_sampler=output_length_sampler, + **generation_kwargs, + ) + batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) + + # Compute reward score (using the sentiment analysis pipeline) + texts = [q + r for q, r in zip(batch["query"], batch["response"])] + pipe_outputs = sentiment_pipe(texts, **sent_kwargs) + rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs] + + # Run PPO step + stats = ppo_trainer.step(question_tensors, response_tensors, rewards) + ppo_trainer.log_stats(stats, batch, rewards) + + if script_args.save_freq and epoch and epoch % script_args.save_freq == 0: + ppo_trainer.save_pretrained(script_args.output_dir + f"step_{epoch}") diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/supervised_finetuning.py b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/supervised_finetuning.py new file mode 100644 index 0000000000000000000000000000000000000000..c2d860468aaf64c4ca59942d5ada51507ec22638 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama/scripts/supervised_finetuning.py @@ -0,0 +1,222 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +from accelerate import Accelerator +from datasets import load_dataset +from peft import LoraConfig +from tqdm import tqdm +from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, logging, set_seed + +from trl import SFTTrainer +from trl.trainer import ConstantLengthDataset + + +""" +Fine-Tune Llama-7b on SE paired dataset +""" + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_path", type=str, default="") + parser.add_argument("--dataset_name", type=str, default="lvwerra/stack-exchange-paired") + parser.add_argument("--subset", type=str, default="data/finetune") + parser.add_argument("--split", type=str, default="train") + parser.add_argument("--size_valid_set", type=int, default=4000) + parser.add_argument("--streaming", action="store_true") + parser.add_argument("--shuffle_buffer", type=int, default=5000) + + parser.add_argument("--seq_length", type=int, default=1024) + parser.add_argument("--max_steps", type=int, default=10000) + parser.add_argument("--batch_size", type=int, default=4) + parser.add_argument("--gradient_accumulation_steps", type=int, default=1) + parser.add_argument("--eos_token_id", type=int, default=49152) + + parser.add_argument("--learning_rate", type=float, default=1e-4) + parser.add_argument("--lr_scheduler_type", type=str, default="cosine") + parser.add_argument("--num_warmup_steps", type=int, default=100) + parser.add_argument("--weight_decay", type=float, default=0.05) + + parser.add_argument("--local_rank", type=int, default=0) + parser.add_argument("--fp16", action="store_true", default=False) + parser.add_argument("--bf16", action="store_true", default=False) + parser.add_argument("--gradient_checkpointing", action="store_true", default=False) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--num_workers", type=int, default=None) + parser.add_argument("--output_dir", type=str, default="./checkpoints") + parser.add_argument("--log_freq", default=1, type=int) + parser.add_argument("--eval_freq", default=1000, type=int) + parser.add_argument("--save_freq", default=1000, type=int) + + return parser.parse_args() + + +def chars_token_ratio(dataset, tokenizer, nb_examples=400): + """ + Estimate the average number of characters per token in the dataset. + """ + total_characters, total_tokens = 0, 0 + for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples): + text = prepare_sample_text(example) + total_characters += len(text) + if tokenizer.is_fast: + total_tokens += len(tokenizer(text).tokens()) + else: + total_tokens += len(tokenizer.tokenize(text)) + + return total_characters / total_tokens + + +def print_trainable_parameters(model): + """ + Prints the number of trainable parameters in the model. + """ + trainable_params = 0 + all_param = 0 + for _, param in model.named_parameters(): + all_param += param.numel() + if param.requires_grad: + trainable_params += param.numel() + print( + f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" + ) + + +def prepare_sample_text(example): + """Prepare the text from a sample of the dataset.""" + text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}" + return text + + +def create_datasets(tokenizer, args): + dataset = load_dataset( + args.dataset_name, + data_dir=args.subset, + split=args.split, + use_auth_token=True, + num_proc=args.num_workers if not args.streaming else None, + streaming=args.streaming, + ) + if args.streaming: + print("Loading the dataset in streaming mode") + valid_data = dataset.take(args.size_valid_set) + train_data = dataset.skip(args.size_valid_set) + train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=args.seed) + else: + dataset = dataset.train_test_split(test_size=0.005, seed=args.seed) + train_data = dataset["train"] + valid_data = dataset["test"] + print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}") + + chars_per_token = chars_token_ratio(train_data, tokenizer) + print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}") + + train_dataset = ConstantLengthDataset( + tokenizer, + train_data, + formatting_func=prepare_sample_text, + infinite=True, + seq_length=args.seq_length, + chars_per_token=chars_per_token, + ) + valid_dataset = ConstantLengthDataset( + tokenizer, + valid_data, + formatting_func=prepare_sample_text, + infinite=False, + seq_length=args.seq_length, + chars_per_token=chars_per_token, + ) + return train_dataset, valid_dataset + + +def run_training(args, train_data, val_data): + print("Loading the model") + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + train_data.start_iteration = 0 + + print("Starting main loop") + + training_args = TrainingArguments( + output_dir=args.output_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=args.max_steps, + eval_steps=args.eval_freq, + save_steps=args.save_freq, + logging_steps=args.log_freq, + per_device_train_batch_size=args.batch_size, + per_device_eval_batch_size=args.batch_size, + learning_rate=args.learning_rate, + lr_scheduler_type=args.lr_scheduler_type, + warmup_steps=args.num_warmup_steps, + gradient_accumulation_steps=args.gradient_accumulation_steps, + gradient_checkpointing=args.gradient_checkpointing, + fp16=args.fp16, + bf16=args.bf16, + weight_decay=args.weight_decay, + run_name="llama-7b-finetuned", + report_to="wandb", + ddp_find_unused_parameters=False, + ) + + model = AutoModelForCausalLM.from_pretrained( + args.model_path, load_in_8bit=True, device_map={"": Accelerator().process_index} + ) + + trainer = SFTTrainer( + model=model, + args=training_args, + train_dataset=train_data, + eval_dataset=val_data, + peft_config=lora_config, + packing=True, + ) + + print_trainable_parameters(trainer.model) + + print("Training...") + trainer.train() + + print("Saving last checkpoint of the model") + trainer.model.save_pretrained(os.path.join(args.output_dir, "final_checkpoint/")) + + +def main(args): + tokenizer = AutoTokenizer.from_pretrained(args.model_path) + train_dataset, eval_dataset = create_datasets(tokenizer, args) + run_training(args, train_dataset, eval_dataset) + + +if __name__ == "__main__": + args = get_args() + assert args.model_path != "", "Please provide the llama model path" + + set_seed(args.seed) + os.makedirs(args.output_dir, exist_ok=True) + + logging.set_verbosity_error() + + main(args) diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/README.md b/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..727a631d8d120f25f4605d93e97539443fd5da8d --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/README.md @@ -0,0 +1,76 @@ +# DPO pipeline for the creation of StackLlaMa 2: a Stack exchange llama-v2-7b model + +## Prerequisites + +Install all the dependencies in the `requirements.txt`: + +``` +$ pip install -U -r requirements.txt +``` + +Since we will use `accelerate` for training, make sure to run: +``` +$ accelerate config +``` + +## Training + +There were two main steps to the DPO training process: +1. Supervised fine-tuning of the base llama-v2-7b model to create llama-v2-7b-se: + + ``` + accelerate launch examples/research_projects/stack_llama_2/scripts/sft_llama2.py \ + --output_dir="./sft" \ + --max_steps=500 \ + --logging_steps=10 \ + --save_steps=10 \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=1 \ + --gradient_accumulation_steps=2 \ + --gradient_checkpointing=False \ + --group_by_length=False \ + --learning_rate=1e-4 \ + --lr_scheduler_type="cosine" \ + --warmup_steps=100 \ + --weight_decay=0.05 \ + --optim="paged_adamw_32bit" \ + --bf16=True \ + --remove_unused_columns=False \ + --run_name="sft_llama2" \ + --report_to="wandb" + ``` +1. Run the DPO trainer using the model saved by the previous step: + ``` + accelerate launch examples/research_projects/stack_llama_2/scripts/dpo_llama2.py \ + --model_name_or_path="sft/final_checkpoint" \ + --output_dir="dpo" + ``` + + +## Merging the adaptors + +To merge the adaptors into the base model we can use the `merge_peft_adapter.py` helper script that comes with TRL: + +``` +python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --base_model_name="meta-llama/Llama-2-7b-hf" --adapter_model_name="dpo/final_checkpoint/" --output_name="stack-llama-2" +``` + +which will also push the model to your HuggingFace hub account. + +## Running the model + +We can load the DPO-trained LoRA adaptors which were saved by the DPO training step and load them via: + +```py +from peft import AutoPeftModelForCausalLM + + +model = AutoPeftModelForCausalLM.from_pretrained( + "dpo/final_checkpoint", + low_cpu_mem_usage=True, + torch_dtype=torch.float16, + load_in_4bit=True, +) + +model.generate(...) +``` diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py b/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py new file mode 100644 index 0000000000000000000000000000000000000000..8530cd66c4f45a4fb8ac6917c474f598cd67ef75 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py @@ -0,0 +1,252 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# 0. imports +import os +from dataclasses import dataclass, field +from typing import Dict, Optional + +import torch +from accelerate import Accelerator +from datasets import Dataset, load_dataset +from peft import LoraConfig +from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed + +from trl import DPOConfig, DPOTrainer + + +# Define and parse arguments. +@dataclass +class ScriptArguments: + """ + The arguments for the DPO training script. + """ + + # data parameters + beta: Optional[float] = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"}) + + # training parameters + model_name_or_path: Optional[str] = field( + default="../sft/results/final_checkpoint", + metadata={"help": "the location of the SFT model name or path"}, + ) + learning_rate: Optional[float] = field(default=5e-4, metadata={"help": "optimizer learning rate"}) + lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "the lr scheduler type"}) + warmup_steps: Optional[int] = field(default=100, metadata={"help": "the number of warmup steps"}) + weight_decay: Optional[float] = field(default=0.05, metadata={"help": "the weight decay"}) + optimizer_type: Optional[str] = field(default="paged_adamw_32bit", metadata={"help": "the optimizer type"}) + + per_device_train_batch_size: Optional[int] = field(default=4, metadata={"help": "train batch size per device"}) + per_device_eval_batch_size: Optional[int] = field(default=1, metadata={"help": "eval batch size per device"}) + gradient_accumulation_steps: Optional[int] = field( + default=4, metadata={"help": "the number of gradient accumulation steps"} + ) + gradient_checkpointing: Optional[bool] = field( + default=True, metadata={"help": "whether to use gradient checkpointing"} + ) + + gradient_checkpointing_use_reentrant: Optional[bool] = field( + default=False, metadata={"help": "whether to use reentrant for gradient checkpointing"} + ) + + lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) + lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"}) + lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"}) + + max_prompt_length: Optional[int] = field(default=512, metadata={"help": "the maximum prompt length"}) + max_length: Optional[int] = field(default=1024, metadata={"help": "the maximum sequence length"}) + max_steps: Optional[int] = field(default=1000, metadata={"help": "max number of training steps"}) + logging_steps: Optional[int] = field(default=10, metadata={"help": "the logging frequency"}) + save_steps: Optional[int] = field(default=100, metadata={"help": "the saving frequency"}) + eval_steps: Optional[int] = field(default=100, metadata={"help": "the evaluation frequency"}) + + output_dir: Optional[str] = field(default="./results", metadata={"help": "the output directory"}) + log_freq: Optional[int] = field(default=1, metadata={"help": "the logging frequency"}) + load_in_4bit: Optional[bool] = field(default=True, metadata={"help": "whether to load the model in 4bit"}) + model_dtype: Optional[str] = field( + default="float16", metadata={"help": "model_dtype[float16, bfloat16, float] for loading."} + ) + + # instrumentation + report_to: Optional[str] = field( + default="wandb", + metadata={ + "help": 'The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`,' + '`"comet_ml"`, `"mlflow"`, `"neptune"`, `"tensorboard"`,`"clearml"` and `"wandb"`. ' + 'Use `"all"` to report to all integrations installed, `"none"` for no integrations.' + }, + ) + # debug argument for distributed training + ignore_bias_buffers: Optional[bool] = field( + default=False, + metadata={ + "help": "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See" + "https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992" + }, + ) + seed: Optional[int] = field( + default=0, metadata={"help": "Random seed that will be set at the beginning of training."} + ) + + +def get_stack_exchange_paired( + data_dir: str = "data/rl", + cache_dir: Optional[str] = None, + num_proc=24, +) -> Dataset: + """Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format. + + The dataset is converted to a dictionary with the following structure: + { + 'prompt': List[str], + 'chosen': List[str], + 'rejected': List[str], + } + + Prompts are structured as follows: + "Question: " + + "\n\nAnswer: " + """ + dataset = load_dataset( + "lvwerra/stack-exchange-paired", + split="train", + cache_dir=cache_dir, + data_dir=data_dir, + verification_mode="no_checks", + ) + original_columns = dataset.column_names + + def return_prompt_and_responses(samples) -> Dict[str, str]: + return { + "prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]], + "chosen": samples["response_j"], + "rejected": samples["response_k"], + } + + return dataset.map( + return_prompt_and_responses, + batched=True, + num_proc=num_proc, + remove_columns=original_columns, + ) + + +if __name__ == "__main__": + parser = HfArgumentParser(ScriptArguments) + script_args = parser.parse_args_into_dataclasses()[0] + + set_seed(script_args.seed) + + # 1. load a pretrained model + torch_dtype = torch.float + if script_args.model_dtype == "float16": + torch_dtype = torch.float16 + elif script_args.model_dtype == "bfloat16": + torch_dtype = torch.bfloat16 + + model = AutoModelForCausalLM.from_pretrained( + script_args.model_name_or_path, + low_cpu_mem_usage=True, + torch_dtype=torch_dtype, + load_in_4bit=script_args.load_in_4bit, + device_map={"": Accelerator().local_process_index}, + ) + model.config.use_cache = False + + if script_args.ignore_bias_buffers: + # torch distributed hack + model._ddp_params_and_buffers_to_ignore = [ + name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool + ] + + tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") + tokenizer.pad_token = tokenizer.eos_token + + # 2. Load the Stack-exchange paired dataset + train_dataset = get_stack_exchange_paired(data_dir="data/rl") + train_dataset = train_dataset.filter( + lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length + and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length, + num_proc=script_args.num_proc, + ) + + # 3. Load evaluation dataset + eval_dataset = get_stack_exchange_paired(data_dir="data/evaluation") + eval_dataset = eval_dataset.filter( + lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length + and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length, + num_proc=script_args.num_proc, + ) + + # 4. initialize training arguments: + training_args = DPOConfig( + per_device_train_batch_size=script_args.per_device_train_batch_size, + per_device_eval_batch_size=script_args.per_device_eval_batch_size, + max_steps=script_args.max_steps, + logging_steps=script_args.logging_steps, + save_steps=script_args.save_steps, + gradient_accumulation_steps=script_args.gradient_accumulation_steps, + gradient_checkpointing=script_args.gradient_checkpointing, + learning_rate=script_args.learning_rate, + eval_strategy="steps", + eval_steps=script_args.eval_steps, + output_dir=script_args.output_dir, + report_to=script_args.report_to, + lr_scheduler_type=script_args.lr_scheduler_type, + warmup_steps=script_args.warmup_steps, + optim=script_args.optimizer_type, + bf16=True, + remove_unused_columns=False, + run_name="dpo_llama2", + gradient_checkpointing_kwargs=dict(use_reentrant=script_args.gradient_checkpointing_use_reentrant), + seed=script_args.seed, + ) + + peft_config = LoraConfig( + r=script_args.lora_r, + lora_alpha=script_args.lora_alpha, + lora_dropout=script_args.lora_dropout, + target_modules=[ + "q_proj", + "v_proj", + "k_proj", + "out_proj", + "fc_in", + "fc_out", + "wte", + ], + bias="none", + task_type="CAUSAL_LM", + ) + + # 5. initialize the DPO trainer + dpo_trainer = DPOTrainer( + model, + ref_model=None, + args=training_args, + beta=script_args.beta, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + processing_class=tokenizer, + peft_config=peft_config, + max_prompt_length=script_args.max_prompt_length, + max_length=script_args.max_length, + ) + + # 6. train + dpo_trainer.train() + dpo_trainer.save_model(script_args.output_dir) + + # 7. save + output_dir = os.path.join(script_args.output_dir, "final_checkpoint") + dpo_trainer.model.save_pretrained(output_dir) diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/requirements.txt b/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca124e58df8e4269a4d44d3ceccd0e2a05ea4fae --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/requirements.txt @@ -0,0 +1,7 @@ +transformers +trl +peft +accelerate +datasets +bitsandbytes +wandb diff --git a/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py b/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py new file mode 100644 index 0000000000000000000000000000000000000000..ef34a67f0c4d562e6f1dec04fa4361eab9ba2c06 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py @@ -0,0 +1,212 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Fine-Tune Llama2-7b on SE paired dataset +import os +from dataclasses import dataclass, field +from typing import Optional + +import torch +from accelerate import Accelerator +from datasets import load_dataset +from peft import AutoPeftModelForCausalLM, LoraConfig +from tqdm import tqdm +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + BitsAndBytesConfig, + HfArgumentParser, + is_torch_npu_available, + is_torch_xpu_available, + set_seed, +) + +from trl import SFTConfig, SFTTrainer +from trl.trainer import ConstantLengthDataset + + +@dataclass +class ScriptArguments: + model_name: Optional[str] = field(default="meta-llama/Llama-2-7b-hf", metadata={"help": "the model name"}) + dataset_name: Optional[str] = field(default="lvwerra/stack-exchange-paired", metadata={"help": "the dataset name"}) + subset: Optional[str] = field(default="data/finetune", metadata={"help": "the subset to use"}) + split: Optional[str] = field(default="train", metadata={"help": "the split to use"}) + size_valid_set: Optional[int] = field(default=4000, metadata={"help": "the size of the validation set"}) + streaming: Optional[bool] = field(default=True, metadata={"help": "whether to stream the dataset"}) + shuffle_buffer: Optional[int] = field(default=5000, metadata={"help": "the shuffle buffer size"}) + seq_length: Optional[int] = field(default=1024, metadata={"help": "the sequence length"}) + num_workers: Optional[int] = field(default=4, metadata={"help": "the number of workers"}) + use_bnb: Optional[bool] = field(default=True, metadata={"help": "whether to use BitsAndBytes"}) + + # LoraConfig + lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) + lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"}) + lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"}) + + +parser = HfArgumentParser((ScriptArguments, SFTConfig)) +script_args, training_args = parser.parse_args_into_dataclasses() +peft_config = LoraConfig( + r=script_args.lora_r, + lora_alpha=script_args.lora_alpha, + lora_dropout=script_args.lora_dropout, + target_modules=["q_proj", "v_proj"], + bias="none", + task_type="CAUSAL_LM", +) + +if training_args.group_by_length and training_args.packing: + raise ValueError("Cannot use both packing and group by length") + +# `gradient_checkpointing` was True by default until `1f3314`, but it's actually not used. +# `gradient_checkpointing=True` will cause `Variable._execution_engine.run_backward`. +if training_args.gradient_checkpointing: + raise ValueError("gradient_checkpointing not supported") + +set_seed(training_args.seed) + + +def chars_token_ratio(dataset, tokenizer, nb_examples=400): + """ + Estimate the average number of characters per token in the dataset. + """ + total_characters, total_tokens = 0, 0 + for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples): + text = prepare_sample_text(example) + total_characters += len(text) + if tokenizer.is_fast: + total_tokens += len(tokenizer(text).tokens()) + else: + total_tokens += len(tokenizer.tokenize(text)) + + return total_characters / total_tokens + + +def print_trainable_parameters(model): + """ + Prints the number of trainable parameters in the model. + """ + trainable_params = 0 + all_param = 0 + for _, param in model.named_parameters(): + all_param += param.numel() + if param.requires_grad: + trainable_params += param.numel() + print( + f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" + ) + + +def prepare_sample_text(example): + """Prepare the text from a sample of the dataset.""" + text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}" + return text + + +def create_datasets(tokenizer, args, seed=None): + dataset = load_dataset( + args.dataset_name, + data_dir=args.subset, + split=args.split, + use_auth_token=True, + num_proc=args.num_workers if not args.streaming else None, + streaming=args.streaming, + ) + if args.streaming: + print("Loading the dataset in streaming mode") + valid_data = dataset.take(args.size_valid_set) + train_data = dataset.skip(args.size_valid_set) + train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=seed) + else: + dataset = dataset.train_test_split(test_size=0.005, seed=seed) + train_data = dataset["train"] + valid_data = dataset["test"] + print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}") + + chars_per_token = chars_token_ratio(train_data, tokenizer) + print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}") + + train_dataset = ConstantLengthDataset( + tokenizer, + train_data, + formatting_func=prepare_sample_text, + infinite=True, + seq_length=args.seq_length, + chars_per_token=chars_per_token, + ) + valid_dataset = ConstantLengthDataset( + tokenizer, + valid_data, + formatting_func=prepare_sample_text, + infinite=False, + seq_length=args.seq_length, + chars_per_token=chars_per_token, + ) + return train_dataset, valid_dataset + + +bnb_config = None +if script_args.use_bnb: + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.bfloat16, + ) + +base_model = AutoModelForCausalLM.from_pretrained( + script_args.model_name, + quantization_config=bnb_config, + device_map={"": Accelerator().local_process_index}, + trust_remote_code=True, + use_auth_token=True, +) +base_model.config.use_cache = False + + +tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True) +tokenizer.pad_token = tokenizer.eos_token +tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training + +train_dataset, eval_dataset = create_datasets(tokenizer, script_args, seed=training_args.seed) + +trainer = SFTTrainer( + model=base_model, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + peft_config=peft_config, + max_seq_length=None, + formatting_func=prepare_sample_text, + processing_class=tokenizer, + args=training_args, +) +trainer.train() +trainer.save_model(training_args.output_dir) + +output_dir = os.path.join(training_args.output_dir, "final_checkpoint") +trainer.model.save_pretrained(output_dir) + +# Free memory for merging weights +del base_model +if is_torch_xpu_available(): + torch.xpu.empty_cache() +elif is_torch_npu_available(): + torch.npu.empty_cache() +else: + torch.cuda.empty_cache() + +model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16) +model = model.merge_and_unload() + +output_merged_dir = os.path.join(training_args.output_dir, "final_merged_checkpoint") +model.save_pretrained(output_merged_dir, safe_serialization=True) diff --git a/testbed/huggingface__trl/examples/research_projects/tools/calculator.py b/testbed/huggingface__trl/examples/research_projects/tools/calculator.py new file mode 100644 index 0000000000000000000000000000000000000000..bde5692f6230c0b730af41bd78e23262d79023b1 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/tools/calculator.py @@ -0,0 +1,118 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +import numpy as np +import torch +from transformers import AutoTokenizer, load_tool + +from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment + + +def generate_data(n): + """Generate random arithmetic tasks and answers.""" + tasks, answers = [], [] + for _ in range(n): + a = np.random.randint(0, 50) + b = np.random.randint(0, 50) + op = np.random.choice(["-", "+", "*"]) + tasks.append(f"\n\nWhat is {a} {op} {b}?") + if op == "-": + answers.append(a - b) + elif op == "+": + answers.append(a + b) + else: + answers.append(a * b) + return tasks, answers + + +def exact_match_reward(responses, answers=None): + """Reward if generated response contains correct answer.""" + rewards = [] + pattern = r"Result\s*=\s*(-?\d+(?:\.\d+)?)\s*" # generated by chatGPT + for response, answer in zip(responses, answers): + reward = 0.0 + predicted_number = None + match_pattern = re.findall(pattern, response) + if match_pattern: + predicted_number = float(match_pattern[0]) + if predicted_number is not None: + if np.abs(predicted_number - answer) < 0.01: + reward += 1.0 + rewards.append(torch.tensor(reward)) + return rewards + + +# set up models +model_id = "gpt2" +model = AutoModelForCausalLMWithValueHead.from_pretrained(model_id) +ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(model_id) +tokenizer = AutoTokenizer.from_pretrained(model_id) +tokenizer.pad_token = tokenizer.eos_token + +# system prompt +prompt = """\ +What is 13-3? + +13-310.0 + +Result=10 + +What is 4*3? + +4*312.0 + +Result=12""" + +generation_kwargs = { + "min_length": -1, + "top_k": 0.0, + "top_p": 1.0, + "do_sample": True, + "pad_token_id": tokenizer.eos_token_id, + "eos_token_id": -1, + "max_new_tokens": 32, +} + +# trainer +ppo_config = PPOConfig( + batch_size=256, + learning_rate=1.41e-5, + mini_batch_size=64, + log_with="wandb", +) +ppo_trainer = PPOTrainer(ppo_config, model, ref_model, tokenizer) + +# text env +text_env = TextEnvironment( + model, + tokenizer, + {"SimpleCalculatorTool": load_tool("ybelkada/simple-calculator")}, + exact_match_reward, + prompt, + generation_kwargs=generation_kwargs, +) + +# main training loop +for _step in range(100): + tasks, answers = generate_data(ppo_config.batch_size) + queries, responses, masks, rewards, histories = text_env.run(tasks, answers=answers) + train_stats = ppo_trainer.step(queries, responses, rewards, masks) + + response_texts = [tokenizer.decode(response) for response in responses] + query_texts = [tokenizer.decode(query) for query in queries] + texts = {"query": [qt.split("")[-1].strip() for qt in query_texts], "response": response_texts} + ppo_trainer.log_stats(train_stats, texts, rewards, columns_to_log=["query", "response", "answer"]) +ppo_trainer.save_pretrained(model_id + "-calculator") diff --git a/testbed/huggingface__trl/examples/research_projects/tools/python_interpreter.py b/testbed/huggingface__trl/examples/research_projects/tools/python_interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..1b9d727c6feaa3e383bef2df606f0fa6514cf8a3 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/tools/python_interpreter.py @@ -0,0 +1,193 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from dataclasses import dataclass, field +from typing import Optional + +import numpy as np +import torch +from datasets import load_dataset +from peft import LoraConfig +from transformers import AutoTokenizer, HfArgumentParser, load_tool + +from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment + + +os.environ["HF_ALLOW_CODE_EVAL"] = "1" +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + +@dataclass +class ScriptArguments: + model_name: Optional[str] = field(default="bigcode/starcoderbase", metadata={"help": "the model name"}) + learning_rate: Optional[float] = field(default=1e-5, metadata={"help": "the learning rate"}) + mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) + batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) + gradient_accumulation_steps: Optional[int] = field( + default=16, metadata={"help": "the number of gradient accumulation steps"} + ) + max_new_tokens: Optional[int] = field(default=256, metadata={"help": "max number of generated tokens per turn"}) + ppo_epochs: Optional[int] = field(default=1, metadata={"help": "max number of ppo epochs"}) + n_epochs: Optional[int] = field(default=32, metadata={"help": "max number of ppo epochs"}) + + +parser = HfArgumentParser(ScriptArguments) +script_args = parser.parse_args_into_dataclasses()[0] + + +def exact_match_reward(responses, answers=None): + """Reward if generated response contains correct answer.""" + rewards = [] + pattern = r"Result\s*=\s*(-?\d+(?:\.\d+)?)\s*" # generated by chatGPT + for response, answer in zip(responses, answers): + reward = 0.0 + try: + predicted_number = None + match_pattern = re.findall(pattern, response) + if match_pattern: + predicted_number = float(match_pattern[0]) + if predicted_number is not None: + if np.abs(predicted_number - float(answer)) < 0.1: + reward += 1.0 + except Exception: + pass + rewards.append(torch.tensor(reward)) + return rewards + + +def evaluate(test_dataloader, text_env, ppo_trainer): + test_rewards = [] + for test_batch in test_dataloader: + _, _, _, rewards, _ = text_env.run(test_batch["query"], answers=test_batch["answer"]) + test_rewards.extend(rewards) + test_rewards = ppo_trainer.accelerator.gather_for_metrics( + torch.stack(test_rewards).to(ppo_trainer.accelerator.device) + ) + return test_rewards.mean() + + +lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + target_modules=["c_proj", "c_attn", "q_attn"], +) + +# set up models +model = AutoModelForCausalLMWithValueHead.from_pretrained( + script_args.model_name, + use_auth_token=True, + load_in_4bit=True, + peft_config=lora_config, +) +tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, use_auth_token=True) +tokenizer.pad_token = tokenizer.eos_token + +ds = load_dataset("openai/gsm8k", "main", split="train") +ds = ds.rename_columns({"question": "query"}) +ds = ds.map(lambda x: {"answer": x["answer"].split("#### ")[1]}) +ds = ds.select(range(1, len(ds))) # skip the first sample which is used in prompt + +ds_test = load_dataset("openai/gsm8k", "main", split="test") +ds_test = ds_test.rename_columns({"question": "query"}) +ds_test = ds_test.map(lambda x: {"answer": x["answer"].split("#### ")[1]}) + +test_dataloader = torch.utils.data.DataLoader(ds_test, batch_size=script_args.batch_size) + +# prompt +prompt = """\ +Example of using a Python API to solve math questions. + +Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left? + + +def solution(): + money_initial = 23 + bagels = 5 + bagel_cost = 3 + money_spent = bagels * bagel_cost + money_left = money_initial - money_spent + result = money_left + return result +print(solution()) +72 + +Result = 72 + +Q: """ + +generation_kwargs = { + "min_length": -1, + "top_k": 0.0, + "top_p": 1.0, + "do_sample": True, + "pad_token_id": tokenizer.eos_token_id, + "eos_token_id": -1, + "max_new_tokens": script_args.max_new_tokens, +} + +# trainer +ppo_config = PPOConfig( + batch_size=script_args.batch_size, + learning_rate=script_args.learning_rate, + mini_batch_size=script_args.mini_batch_size, + ppo_epochs=script_args.ppo_epochs, + gradient_accumulation_steps=script_args.gradient_accumulation_steps, + log_with="wandb", + tracker_project_name="trl-gsm8k", + remove_unused_columns=False, + optimize_cuda_cache=True, +) + +ppo_trainer = PPOTrainer(config=ppo_config, model=model, tokenizer=tokenizer, dataset=ds) +test_dataloader = ppo_trainer.accelerator.prepare(test_dataloader) + +# text env +text_env = TextEnvironment( + model, + tokenizer, + [load_tool("lvwerra/python-interpreter")], + exact_match_reward, + prompt, + max_turns=2, + generation_kwargs=generation_kwargs, +) + +# main training loop +for epoch in range(script_args.n_epochs): + for step, batch in enumerate(ppo_trainer.dataloader): + if (step == 0) and (epoch % 4 == 0): # evaluate every 4 epochs + reward_mean_test = evaluate(test_dataloader, text_env, ppo_trainer) + else: + reward_mean_test = None + + queries, responses, masks, rewards, histories = text_env.run(batch["query"], answers=batch["answer"]) + train_stats = ppo_trainer.step(queries, responses, rewards, masks) + + # logging + if reward_mean_test is not None: + train_stats["env/reward_mean_test"] = reward_mean_test + texts = { + "query": batch["query"], + "response": [tokenizer.decode(response) for response in responses], + "answer": batch["answer"], + } + ppo_trainer.log_stats(train_stats, texts, rewards, columns_to_log=["query", "response", "answer"]) + +reward_mean_test = evaluate(test_dataloader, text_env, ppo_trainer) +ppo_trainer.save_pretrained(f"model/{script_args.model_name}-gsm8k") diff --git a/testbed/huggingface__trl/examples/research_projects/tools/triviaqa.py b/testbed/huggingface__trl/examples/research_projects/tools/triviaqa.py new file mode 100644 index 0000000000000000000000000000000000000000..bdf2c822873e519bc9ee86168c945376f62bc784 --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/tools/triviaqa.py @@ -0,0 +1,192 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from dataclasses import dataclass, field +from typing import Optional + +import torch +from datasets import load_dataset +from peft import LoraConfig +from transformers import AutoTokenizer, HfArgumentParser, load_tool + +from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment + + +os.environ["HF_ALLOW_CODE_EVAL"] = "1" +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + +@dataclass +class ScriptArguments: + model_name: Optional[str] = field(default="bigcode/starcoderbase", metadata={"help": "the model name"}) + log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) + learning_rate: Optional[float] = field(default=1e-5, metadata={"help": "the learning rate"}) + mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) + batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) + gradient_accumulation_steps: Optional[int] = field( + default=16, metadata={"help": "the number of gradient accumulation steps"} + ) + max_new_tokens: Optional[int] = field(default=256, metadata={"help": "max number of generated tokens per turn"}) + ppo_epochs: Optional[int] = field(default=1, metadata={"help": "max number of ppo epochs"}) + iterations: Optional[int] = field(default=1000, metadata={"help": "the number of iterations"}) + seed: Optional[int] = field(default=0, metadata={"help": "the random seed"}) + + +parser = HfArgumentParser(ScriptArguments) +script_args = parser.parse_args_into_dataclasses()[0] + +lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + target_modules=["c_proj", "c_attn", "q_attn"], +) + +# set up models +model = AutoModelForCausalLMWithValueHead.from_pretrained( + script_args.model_name, + use_auth_token=True, + trust_remote_code=True, + load_in_4bit=True, + peft_config=lora_config, +) +tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, use_auth_token=True) +tokenizer.pad_token = tokenizer.eos_token + +# system prompt +prompt = """\ +Answer the following question: + +Q: In which branch of the arts is Patricia Neary famous? +A: Ballets +A2: Patricia NearyPatricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe. +Result=Ballets + +Q: Who won Super Bowl XX? +A: Chicago Bears +A2: Super Bowl XXSuper Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans. +Result=Chicago Bears + +Q: """ + +generation_kwargs = { + "min_length": -1, + "top_k": 0.0, + "top_p": 1.0, + "do_sample": True, + "pad_token_id": tokenizer.eos_token_id, + "eos_token_id": -1, + "max_new_tokens": script_args.max_new_tokens, +} + +# trainer +config = PPOConfig( + batch_size=script_args.batch_size, + model_name=script_args.model_name, + learning_rate=script_args.learning_rate, + log_with=script_args.log_with, + mini_batch_size=script_args.mini_batch_size, + ppo_epochs=script_args.ppo_epochs, + gradient_accumulation_steps=script_args.gradient_accumulation_steps, + seed=script_args.seed, + optimize_cuda_cache=True, +) +ppo_trainer = PPOTrainer(config=config, model=model, tokenizer=tokenizer) +dataset = load_dataset("mandarjoshi/trivia_qa", "rc", split="train") +local_seed = script_args.seed + ppo_trainer.accelerator.process_index * 100003 # Prime +dataset = dataset.shuffle(local_seed) + + +def data_generator(): + for i in range(len(dataset)): + yield dataset[i]["question"], list(dataset[i]["answer"]["normalized_aliases"]) + + +gen = data_generator() +gen = iter(gen) + + +def generate_data(n): + tasks, answers = [], [] + for _i in range(n): + q, a = next(gen) + tasks.append(q) + answers.append(a) + return tasks, answers + + +def exact_match_reward(responses, answers=None): + """Reward if generated response contains correct answer.""" + rewards = [] + for response, answer in zip(responses, answers): + reward = 0.0 + for a in answer: + if a.lower() in response.lower(): + reward += 1.0 + break + rewards.append(torch.tensor(reward)) + return rewards + + +def tool_fn(x): + # limit the amount of tokens + return tool(x).split("\n")[1][:600] + + +# text env +tool = load_tool("vwxyzjn/pyserini-wikipedia-kilt-doc") + +text_env = TextEnvironment( + model, + tokenizer, + {"Wiki": tool_fn}, + exact_match_reward, + prompt, + generation_kwargs=generation_kwargs, + max_tool_reponse=400, +) + + +def print_trainable_parameters(model): + trainable_params = 0 + all_param = 0 + for _, param in model.named_parameters(): + all_param += param.numel() + if param.requires_grad: + trainable_params += param.numel() + print( + f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" + ) + + +print_trainable_parameters(model) +# main training loop +for i in range(script_args.iterations): + tasks, answers = generate_data(config.batch_size) + queries, responses, masks, rewards, histories = text_env.run(tasks, answers=answers) + train_stats = ppo_trainer.step(queries, responses, rewards, masks) + response_texts = [tokenizer.decode(response) for response in responses] + query_texts = [tokenizer.decode(query) for query in queries] + texts = { + "query": [qt.split("")[-1].strip() for qt in query_texts], + "response": response_texts, + "answer": [", ".join(item) for item in answers], + } + all_rewards = ppo_trainer.accelerator.gather(torch.tensor(rewards, device=ppo_trainer.accelerator.device)) + ppo_trainer.log_stats(train_stats, texts, list(all_rewards), columns_to_log=["query", "response", "answer"]) + if i % 100 == 0: + ppo_trainer.save_pretrained(f"models/{script_args.model_name}_{script_args.seed}_{i}_triviaqa") diff --git a/testbed/huggingface__trl/examples/research_projects/toxicity/README.md b/testbed/huggingface__trl/examples/research_projects/toxicity/README.md new file mode 100644 index 0000000000000000000000000000000000000000..85967ab57ec5eeb10ea9eb6e372a62a0522e4d7e --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/toxicity/README.md @@ -0,0 +1,7 @@ +# De-detoxifying language models + +To run this code, do the following: + +```shell +ACCELERATE_LOG_LEVEL=info accelerate launch --config_file {CONFIG} examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py --log_with wandb +``` diff --git a/testbed/huggingface__trl/examples/research_projects/toxicity/scripts/evaluate-toxicity.py b/testbed/huggingface__trl/examples/research_projects/toxicity/scripts/evaluate-toxicity.py new file mode 100644 index 0000000000000000000000000000000000000000..fca714706fb7514be55093fd972edd5a5aea16ed --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/toxicity/scripts/evaluate-toxicity.py @@ -0,0 +1,146 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import csv + +import evaluate +import numpy as np +import torch +from datasets import load_dataset +from tqdm import tqdm +from transformers import AutoModelForCausalLM, AutoTokenizer, is_torch_npu_available, is_torch_xpu_available + + +toxicity = evaluate.load("ybelkada/toxicity", "DaNLP/da-electra-hatespeech-detection", module_type="measurement") +ds = load_dataset("OxAISH-AL-LLM/wiki_toxic", split="test") + +parser = argparse.ArgumentParser(description="Evaluate de-toxified models") +parser.add_argument("--model_type", default="all", type=str, help="Relative path to the source model folder") +parser.add_argument("--output_file", default="toxicity.csv", type=str, help="Relative path to the source model folder") +parser.add_argument("--batch_size", default=64, type=int, help="Batch size") +parser.add_argument("--num_samples", default=400, type=int, help="Number of samples") +parser.add_argument("--context_length", default=2000, type=int, help="Number of samples") +parser.add_argument("--max_new_tokens", default=30, type=int, help="Max new tokens for generation") +args = parser.parse_args() + + +if args.model_type == "all": + MODELS_TO_TEST = [ + "ybelkada/gpt-neo-125m-detox", + "EleutherAI/gpt-neo-125M", + "EleutherAI/gpt-neo-2.7B", + "ybelkada/gpt-neo-2.7B-detox", + "ybelkada/gpt-j-6b-sharded-bf16", + "ybelkada/gpt-j-6b-detoxs", + ] +elif args.model_type == "gpt-neo": + MODELS_TO_TEST = [ + "ybelkada/gpt-neo-125m-detox", + "EleutherAI/gpt-neo-125M", + "EleutherAI/gpt-neo-2.7B", + "ybelkada/gpt-neo-2.7B-detox", + ] +elif args.model_type == "gpt-j": + MODELS_TO_TEST = [ + "ybelkada/gpt-j-6b-sharded-bf16", + "ybelkada/gpt-j-6b-detox", + ] +else: + MODELS_TO_TEST = [args.model_type] +NUM_SAMPLES = args.num_samples +BATCH_SIZE = args.batch_size +output_file = args.output_file +max_new_tokens = args.max_new_tokens +context_length = args.context_length +if is_torch_xpu_available(): + device = torch.xpu.current_device() +elif is_torch_npu_available(): + device = torch.npu.current_device() +else: + device = torch.cuda.current_device() if torch.cuda.is_available() else "cpu" + +# consider only toxic prompts +ds = ds.filter(lambda x: x["label"] == 1) + +toxicities = {} + +# open a csv file +file = open(f"{output_file}", "w", newline="") +writer = csv.writer(file) +# add first rows +writer.writerow(["model_id", "mean_toxicity", "std_toxicity"]) + + +for model_id in tqdm(MODELS_TO_TEST): + model = AutoModelForCausalLM.from_pretrained(model_id, device_map={"": device}, torch_dtype=torch.bfloat16) + tokenizer = AutoTokenizer.from_pretrained(model_id) + tokenizer.pad_token = tokenizer.eos_token + tokenizer.padding_side = "left" + input_texts = [] + + for i, example in enumerate(ds): + # set seed + torch.manual_seed(42) + + input_text = example["comment_text"] + input_texts.append(input_text[:2000]) + + if i > NUM_SAMPLES: + break + + if (i + 1) % BATCH_SIZE == 0: + inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(device) + inputs.input_ids = inputs.input_ids[:context_length] + inputs.attention_mask = inputs.attention_mask[:context_length] + outputs = model.generate(**inputs, do_sample=True, max_new_tokens=max_new_tokens, use_cache=True) + generated_texts = tokenizer.batch_decode(outputs, skip_special_tokens=True) + generated_texts = [ + generated_text.replace(input_texts[i], "") for i, generated_text in enumerate(generated_texts) + ] + toxicity_score = toxicity.compute(predictions=generated_texts) + input_texts = [] + + if model_id not in toxicities: + toxicities[model_id] = [] + toxicities[model_id].extend(toxicity_score["toxicity"]) + + # last batch + inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(device) + outputs = model.generate(**inputs, do_sample=True, max_new_tokens=30) + generated_texts = tokenizer.batch_decode(outputs, skip_special_tokens=True) + generated_texts = [generated_text.replace(input_texts[i], "") for i, generated_text in enumerate(generated_texts)] + toxicity_score = toxicity.compute(predictions=generated_texts) + toxicities[model_id].extend(toxicity_score["toxicity"]) + + # compute mean & std using np + mean = np.mean(toxicities[model_id]) + std = np.std(toxicities[model_id]) + + # save to file + writer.writerow([model_id, mean, std]) + + # print + print(f"Model: {model_id} - Mean: {mean} - Std: {std}") + + model = None + if is_torch_xpu_available(): + torch.xpu.empty_cache() + elif is_torch_npu_available(): + torch.npu.empty_cache() + else: + torch.cuda.empty_cache() + +# close file +file.close() diff --git a/testbed/huggingface__trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py b/testbed/huggingface__trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py new file mode 100644 index 0000000000000000000000000000000000000000..51f6d284c40c6c3f672ac1fe78f56e15ace5fccc --- /dev/null +++ b/testbed/huggingface__trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py @@ -0,0 +1,237 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass, field +from typing import Optional + +import torch +from datasets import load_dataset +from torch.optim import Adam +from tqdm import tqdm +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + HfArgumentParser, + RobertaForSequenceClassification, + RobertaTokenizer, +) + +from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, create_reference_model, set_seed +from trl.core import LengthSampler + + +tqdm.pandas() + +######################################################################## +# This is a fully working simple example to use trl with accelerate. +# +# This example fine-tunes a GPTJ model to generate less toxic contents +# by using allenai/real-toxicity-prompts dataset. We use PPO +# (proximal policy optimization) to optimize the model. +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - multi GPUS (using DeepSpeed ZeRO-Offload stages 1 & 2) +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To run it in each of these various modes, first initialize the accelerate +# configuration with `accelerate config` +# +######################################################################## + + +# We first define the configuration of the experiment, defining the model, the dataset, +# the training parameters, and the PPO parameters. +# Check the default arguments in the `PPOConfig` class for more details. +# If you want to log with tensorboard, add the kwarg +# `project_kwargs={"logging_dir": PATH_TO_LOGS}` to the PPOConfig. +@dataclass +class ScriptArguments: + """ + The name of the Casual LM model we wish to fine-tune with PPO + """ + + # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode + # models like gpt-neo* models are more suitable. + model_name: Optional[str] = field(default="ybelkada/gpt-j-6b-sharded-bf16", metadata={"help": "the model name"}) + log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) + learning_rate: Optional[float] = field(default=(1.47e-5) * 2, metadata={"help": "the learning rate"}) + mini_batch_size: Optional[int] = field(default=4, metadata={"help": "the PPO minibatch size"}) + batch_size: Optional[int] = field(default=16, metadata={"help": "the batch size"}) + gradient_accumulation_steps: Optional[int] = field( + default=1, metadata={"help": "the number of gradient accumulation steps"} + ) + model_save_path: Optional[str] = field( + default="./gpt-j-6B-detoxified-long-context-26-shl-1e4-final", + metadata={"help": "the path to save the model"}, + ) + + +parser = HfArgumentParser(ScriptArguments) +script_args = parser.parse_args_into_dataclasses()[0] + +config = PPOConfig( + model_name=script_args.model_name, + learning_rate=script_args.learning_rate, + log_with=script_args.log_with, + ppo_epochs=100, + mini_batch_size=script_args.mini_batch_size, + batch_size=script_args.batch_size, + gradient_accumulation_steps=script_args.gradient_accumulation_steps, +) + + +# Below is an example function to build the dataset. In our case, we use the IMDB dataset +# from the `datasets` library. One should customize this function to train the model on +# its own dataset. +def build_dataset( + config, dataset_name="allenai/real-toxicity-prompts", input_min_text_length=5, input_max_text_length=10 +): + """ + Build dataset for training. This builds the dataset from `load_dataset`, one should + customize this function to train the model on its own dataset. + + Args: + dataset_name (`str`): + The name of the dataset to be loaded. + + Returns: + dataloader (`torch.utils.data.DataLoader`): + The dataloader for the dataset. + """ + tokenizer = AutoTokenizer.from_pretrained(config.model_name) + tokenizer.pad_token = tokenizer.eos_token + + ds = load_dataset(dataset_name, split="train") + + def filter_fn(sample): + toxicity = sample["prompt"]["toxicity"] + return toxicity is not None and toxicity > 0.3 + + ds = ds.filter(filter_fn, batched=False) + + input_size = LengthSampler(input_min_text_length, input_max_text_length) + + def tokenize(sample): + prompt = sample["prompt"]["text"] + continuation = sample["continuation"]["text"] + + sample["input_ids"] = tokenizer.encode(prompt + continuation)[: input_size()] + sample["query"] = tokenizer.decode(sample["input_ids"]) + return sample + + ds = ds.map(tokenize, batched=False) + ds.set_format(type="torch") + + ds = ds.train_test_split(test_size=0.2, shuffle=False)["train"] + + return ds + + +# We retrieve the dataloader by calling the `build_dataset` function. +min_input_length = 30 +max_input_length = 40 +dataset = build_dataset(config, input_min_text_length=min_input_length, input_max_text_length=max_input_length) + + +def collator(data): + return {key: [d[key] for d in data] for key in data[0]} + + +# set seed before initializing value head for deterministic eval +set_seed(config.seed) + +# Now let's build the model, the reference model, and the tokenizer. We first load the model +# in bfloat16 to save memory using `transformers`. +model = AutoModelForCausalLM.from_pretrained(config.model_name, torch_dtype=torch.bfloat16) +# And then we pass the loaded model to `AutoModelForCausalLMWithValueHead`. +model = AutoModelForCausalLMWithValueHead.from_pretrained(model) + +# We create a reference model by sharing 20 layers +ref_model = create_reference_model(model, num_shared_layers=20) + +# We make sure to use `Adam` optimizer on the model parameters that require gradients. +optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.learning_rate) + +# GPT-2 / GPT-J tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token. +# only for this model. +tokenizer = AutoTokenizer.from_pretrained(config.model_name) +tokenizer.pad_token = tokenizer.eos_token + +# We then build the PPOTrainer, passing the model, the reference model, the tokenizer +ppo_trainer = PPOTrainer( + config, + model, + ref_model=ref_model, + tokenizer=tokenizer, + dataset=dataset, + data_collator=collator, + optimizer=optimizer, +) + +# We then build the reward pipeline, we will use the toxicity model to compute the reward. +# We first load the toxicity model and tokenizer. +toxicity_model_id = "facebook/roberta-hate-speech-dynabench-r4-target" +toxicity_tokenizer = RobertaTokenizer.from_pretrained(toxicity_model_id) +# We load the toxicity model in fp16 to save memory. +toxicity_model = RobertaForSequenceClassification.from_pretrained(toxicity_model_id, torch_dtype=torch.float16).to( + ppo_trainer.accelerator.device +) + + +# We then define the arguments to pass to the `generate` function. These arguments +# are passed to the `generate` function of the PPOTrainer, which is a wrapper around +# the `generate` function of the trained model. +generation_kwargs = { + "min_length": -1, + "top_k": 0.0, + "top_p": 1.0, + "do_sample": True, + "pad_token_id": tokenizer.eos_token_id, +} +output_min_length = 20 +output_max_length = 30 +output_length_sampler = LengthSampler(output_min_length, output_max_length) + +model_save_path = script_args.model_save_path + +for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): + query_tensors = batch["input_ids"] + + # Get response from the policy model + response_tensors = [] + for query in query_tensors: + gen_len = output_length_sampler() + generation_kwargs["max_new_tokens"] = gen_len + response = ppo_trainer.generate(query, **generation_kwargs) + response_tensors.append(response.squeeze()[-gen_len:]) + batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] + + # Compute sentiment score + texts = batch["response"] + toxicity_inputs = toxicity_tokenizer(texts, padding=True, truncation=True, return_tensors="pt").to( + ppo_trainer.accelerator.device + ) + logits = toxicity_model(**toxicity_inputs).logits.float() + toxicity_labels = (logits[:, 0]).tolist() + + rewards = [torch.tensor(output) for output in toxicity_labels] + + # Run PPO step + stats = ppo_trainer.step(query_tensors, response_tensors, rewards) + ppo_trainer.log_stats(stats, batch, rewards) + + # Save model every 100 epochs + if epoch % 100 == 0: + if ppo_trainer.accelerator.is_main_process: + ppo_trainer.save_pretrained(model_save_path) diff --git a/testbed/huggingface__trl/examples/scripts/alignprop.py b/testbed/huggingface__trl/examples/scripts/alignprop.py new file mode 100644 index 0000000000000000000000000000000000000000..1948080f4b4cff9b02e3f3df08eb2824aa783a27 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/alignprop.py @@ -0,0 +1,135 @@ +# Copyright 2023 metric-space, The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Total Batch size = 128 = 4 (num_gpus) * 8 (per_device_batch) * 4 (accumulation steps) +Feel free to reduce batch size or increasing truncated_rand_backprop_min to a higher value to reduce memory usage. + +CUDA_VISIBLE_DEVICES=0,1,2,3 python examples/scripts/alignprop.py \ + --num_epochs=20 \ + --train_gradient_accumulation_steps=4 \ + --sample_num_steps=50 \ + --train_batch_size=8 \ + --tracker_project_name="stable_diffusion_training" \ + --log_with="wandb" + +""" + +from dataclasses import dataclass, field + +import numpy as np +from transformers import HfArgumentParser + +from trl import AlignPropConfig, AlignPropTrainer, DefaultDDPOStableDiffusionPipeline +from trl.models.auxiliary_modules import aesthetic_scorer + + +@dataclass +class ScriptArguments: + pretrained_model: str = field( + default="runwayml/stable-diffusion-v1-5", metadata={"help": "the pretrained model to use"} + ) + pretrained_revision: str = field(default="main", metadata={"help": "the pretrained model revision to use"}) + hf_hub_model_id: str = field( + default="alignprop-finetuned-stable-diffusion", metadata={"help": "HuggingFace repo to save model weights to"} + ) + hf_hub_aesthetic_model_id: str = field( + default="trl-lib/ddpo-aesthetic-predictor", + metadata={"help": "HuggingFace model ID for aesthetic scorer model weights"}, + ) + hf_hub_aesthetic_model_filename: str = field( + default="aesthetic-model.pth", + metadata={"help": "HuggingFace model filename for aesthetic scorer model weights"}, + ) + use_lora: bool = field(default=True, metadata={"help": "Whether to use LoRA."}) + + +# list of example prompts to feed stable diffusion +animals = [ + "cat", + "dog", + "horse", + "monkey", + "rabbit", + "zebra", + "spider", + "bird", + "sheep", + "deer", + "cow", + "goat", + "lion", + "frog", + "chicken", + "duck", + "goose", + "bee", + "pig", + "turkey", + "fly", + "llama", + "camel", + "bat", + "gorilla", + "hedgehog", + "kangaroo", +] + + +def prompt_fn(): + return np.random.choice(animals), {} + + +def image_outputs_logger(image_pair_data, global_step, accelerate_logger): + # For the sake of this example, we will only log the last batch of images + # and associated data + result = {} + images, prompts, _ = [image_pair_data["images"], image_pair_data["prompts"], image_pair_data["rewards"]] + for i, image in enumerate(images[:4]): + prompt = prompts[i] + result[f"{prompt}"] = image.unsqueeze(0).float() + accelerate_logger.log_images( + result, + step=global_step, + ) + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, AlignPropConfig)) + script_args, training_args = parser.parse_args_into_dataclasses() + training_args.project_kwargs = { + "logging_dir": "./logs", + "automatic_checkpoint_naming": True, + "total_limit": 5, + "project_dir": "./save", + } + + pipeline = DefaultDDPOStableDiffusionPipeline( + script_args.pretrained_model, + pretrained_model_revision=script_args.pretrained_revision, + use_lora=script_args.use_lora, + ) + trainer = AlignPropTrainer( + training_args, + aesthetic_scorer(script_args.hf_hub_aesthetic_model_id, script_args.hf_hub_aesthetic_model_filename), + prompt_fn, + pipeline, + image_samples_hook=image_outputs_logger, + ) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/bco.py b/testbed/huggingface__trl/examples/scripts/bco.py new file mode 100644 index 0000000000000000000000000000000000000000..d37a1fb3a945cc369dbafb12334539ccba67a094 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/bco.py @@ -0,0 +1,167 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Run the BCO training script with the commands below. In general, the optimal configuration for BCO will be similar to that of KTO. + +# Full training: +python examples/scripts/bco.py \ + --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct \ + --trust_remote_code \ + --dataset_name trl-lib/ultrafeedback-gpt-3.5-turbo-helpfulness \ + --per_device_train_batch_size 16 \ + --per_device_eval_batch_size 32 \ + --num_train_epochs 1 \ + --learning_rate 1e-6 \ + --gradient_checkpointing \ + --gradient_accumulation_steps 1 \ + --logging_steps 0.01 \ + --eval_steps 0.2 \ + --save_strategy no \ + --output_dir=bco-aligned-model \ + --logging_first_step \ + --max_length 2048 \ + --max_prompt_length 1536 \ + --max_completion_length 1024 \ + --no_remove_unused_columns \ + --warmup_ratio 0.1 \ + --bf16 \ + --report_to wandb + +# QLoRA: +python examples/scripts/bco.py \ + --model_name_or_path=nnheui/stablelm-2-1_6b-sft-full \ + --per_device_train_batch_size 16 \ + --per_device_eval_batch_size 32 \ + --num_train_epochs 1 \ + --learning_rate 1e-6 \ + --gradient_checkpointing \ + --gradient_accumulation_steps 1 \ + --logging_steps 0.01 \ + --eval_steps 0.2 \ + --save_strategy no \ + --output_dir=bco-aligned-model-lora \ + --logging_first_step \ + --warmup_ratio 0.1 \ + --report_to wandb \ + --max_length 2048 \ + --max_prompt_length 1536 \ + --max_completion_length 1024 \ + --no_remove_unused_columns \ + --warmup_ratio 0.1 \ + --bf16 \ + --use_peft \ + --load_in_4bit \ + --lora_target_modules=all-linear \ + --lora_r=16 \ + --lora_alpha=16 +""" + +from functools import partial + +import torch +import torch.nn.functional as F +from accelerate import Accelerator +from datasets import load_dataset +from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, PreTrainedModel + +from trl import BCOConfig, BCOTrainer, ModelConfig, ScriptArguments, get_peft_config, setup_chat_format + + +def embed_prompt(input_ids: torch.LongTensor, attention_mask: torch.LongTensor, model: PreTrainedModel): + """ + Borrowed from https://huggingface.co/nomic-ai/nomic-embed-text-v1.5#transformers + """ + + def mean_pooling(model_output, attention_mask): + token_embeddings = model_output[0] + input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) + + with torch.no_grad(): + model_output = model(input_ids=input_ids, attention_mask=attention_mask) + embeddings = mean_pooling(model_output, attention_mask) + + matryoshka_dim = 512 + # normalize embeddings + embeddings = F.normalize(embeddings, p=2, dim=1) + embeddings = F.layer_norm(embeddings, normalized_shape=(embeddings.shape[1],)) + embeddings = embeddings[:, :matryoshka_dim] + + return embeddings + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, BCOConfig, ModelConfig)) + script_args, training_args, model_args = parser.parse_args_into_dataclasses() + + training_args.gradient_checkpointing_kwargs = {"use_reentrant": True} + + # Load a pretrained model + model = AutoModelForCausalLM.from_pretrained( + model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code + ) + ref_model = AutoModelForCausalLM.from_pretrained( + model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code + ) + + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code + ) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + + # If we are aligning a base model, we use ChatML as the default template + if tokenizer.chat_template is None: + model, tokenizer = setup_chat_format(model, tokenizer) + + dataset = load_dataset(script_args.dataset_name) + + accelerator = Accelerator() + embedding_model = AutoModel.from_pretrained( + "nomic-ai/nomic-embed-text-v1.5", + trust_remote_code=model_args.trust_remote_code, + safe_serialization=True, + torch_dtype=torch.bfloat16, + device_map="auto", + ) + embedding_model = accelerator.prepare_model(embedding_model) + embedding_tokenizer = AutoTokenizer.from_pretrained( + "bert-base-uncased", trust_remote_code=model_args.trust_remote_code + ) + embedding_func = partial( + embed_prompt, + model=embedding_model, + ) + + # Initialize the BCO trainer + trainer = BCOTrainer( + model, + ref_model, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + peft_config=get_peft_config(model_args), + embedding_func=embedding_func, + embedding_tokenizer=embedding_tokenizer, + ) + + # Train and push the model to the Hub + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/chat.py b/testbed/huggingface__trl/examples/scripts/chat.py new file mode 100644 index 0000000000000000000000000000000000000000..d29200055c73b42b57594b0cc8087b247d87ea8d --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/chat.py @@ -0,0 +1,368 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import copy +import json +import os +import pwd +import re +import sys +import time +from threading import Thread + +import torch +from rich.console import Console +from rich.live import Live +from rich.markdown import Markdown +from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer + +from trl import TrlParser, init_zero_verbose +from trl.commands.cli_utils import ChatArguments +from trl.trainer.utils import get_quantization_config + + +init_zero_verbose() + +HELP_STRING = """\ + +**TRL CHAT INTERFACE** + +The chat interface is a simple tool to try out a chat model. + +Besides talking to the model there are several commands: +- **clear**: clears the current conversation and start a new one +- **example {NAME}**: load example named `{NAME}` from the config and use it as the user input +- **set {SETTING_NAME}={SETTING_VALUE};**: change the system prompt or generation settings (multiple settings are separated by a ';'). +- **reset**: same as clear but also resets the generation configs to defaults if they have been changed by **set** +- **save {SAVE_NAME} (optional)**: save the current chat and settings to file by default to `./chat_history/{MODEL_NAME}/chat_{DATETIME}.yaml` or `{SAVE_NAME}` if provided +- **exit**: closes the interface +""" + +SUPPORTED_GENERATION_KWARGS = [ + "max_new_tokens", + "do_sample", + "num_beams", + "temperature", + "top_p", + "top_k", + "repetition_penalty", +] + +SETTING_RE = r"^set\s+[A-Za-z\s_]+=[A-Za-z\d\s.!\"#$%&'()*+,-/:<=>?@\[\]^_`{|}~]+(?:;\s*[A-Za-z\s_]+=[A-Za-z\d\s.!\"#$%&'()*+,-/:<=>?@\[\]^_`{|}~]+)*$" + + +class RichInterface: + def __init__(self, model_name=None, user_name=None): + self._console = Console() + if model_name is None: + self.model_name = "assistant" + else: + self.model_name = model_name + if user_name is None: + self.user_name = "user" + else: + self.user_name = user_name + + def stream_output(self, output_stream): + """Stream output from a role.""" + # This method is originally from the FastChat CLI: https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/cli.py + # Create a Live context for updating the console output + text = "" + self._console.print(f"[bold blue]<{self.model_name}>:") + with Live(console=self._console, refresh_per_second=4) as live: + # Read lines from the stream + for i, outputs in enumerate(output_stream): + if not outputs or i == 0: + continue + text += outputs + # Render the accumulated text as Markdown + # NOTE: this is a workaround for the rendering "unstandard markdown" + # in rich. The chatbots output treat "\n" as a new line for + # better compatibility with real-world text. However, rendering + # in markdown would break the format. It is because standard markdown + # treat a single "\n" in normal text as a space. + # Our workaround is adding two spaces at the end of each line. + # This is not a perfect solution, as it would + # introduce trailing spaces (only) in code block, but it works well + # especially for console output, because in general the console does not + # care about trailing spaces. + lines = [] + for line in text.splitlines(): + lines.append(line) + if line.startswith("```"): + # Code block marker - do not add trailing spaces, as it would + # break the syntax highlighting + lines.append("\n") + else: + lines.append(" \n") + markdown = Markdown("".join(lines).strip(), code_theme="github-dark") + # Update the Live console output + live.update(markdown) + self._console.print() + return text + + def input(self): + input = self._console.input(f"[bold red]<{self.user_name}>:\n") + self._console.print() + return input + + def clear(self): + self._console.clear() + + def print_user_message(self, text): + self._console.print(f"[bold red]<{self.user_name}>:[/ bold red]\n{text}") + self._console.print() + + def print_green(self, text): + self._console.print(f"[bold green]{text}") + self._console.print() + + def print_red(self, text): + self._console.print(f"[bold red]{text}") + self._console.print() + + def print_help(self): + self._console.print(Markdown(HELP_STRING)) + self._console.print() + + +def get_username(): + return pwd.getpwuid(os.getuid())[0] + + +def create_default_filename(model_name): + time_str = time.strftime("%Y-%m-%d_%H-%M-%S") + return f"{model_name}/chat_{time_str}.json" + + +def save_chat(chat, args, filename): + output_dict = {} + output_dict["settings"] = vars(args) + output_dict["chat_history"] = chat + + folder = args.save_folder + + if filename is None: + filename = create_default_filename(args.model_name_or_path) + filename = os.path.join(folder, filename) + os.makedirs(os.path.dirname(filename), exist_ok=True) + + with open(filename, "w") as f: + json.dump(output_dict, f, indent=4) + return os.path.abspath(filename) + + +def clear_chat_history(system_prompt): + if system_prompt is None: + chat = [] + else: + chat = [{"role": "system", "content": system_prompt}] + return chat + + +def parse_settings(user_input, current_args, interface): + settings = user_input[4:].strip().split(";") + settings = [(setting.split("=")[0], setting[len(setting.split("=")[0]) + 1 :]) for setting in settings] + settings = dict(settings) + error = False + + for name in settings: + if hasattr(current_args, name): + try: + if isinstance(getattr(current_args, name), bool): + if settings[name] == "True": + settings[name] = True + elif settings[name] == "False": + settings[name] = False + else: + raise ValueError + else: + settings[name] = type(getattr(current_args, name))(settings[name]) + except ValueError: + interface.print_red( + f"Cannot cast setting {name} (={settings[name]}) to {type(getattr(current_args, name))}." + ) + else: + interface.print_red(f"There is no '{name}' setting.") + + if error: + interface.print_red("There was an issue parsing the settings. No settings have been changed.") + return current_args, False + else: + for name in settings: + setattr(current_args, name, settings[name]) + interface.print_green(f"Set {name} to {settings[name]}.") + + time.sleep(1.5) # so the user has time to read the changes + return current_args, True + + +def load_model_and_tokenizer(args): + tokenizer = AutoTokenizer.from_pretrained( + args.model_name_or_path, + revision=args.model_revision, + trust_remote_code=args.trust_remote_code, + ) + + torch_dtype = args.torch_dtype if args.torch_dtype in ["auto", None] else getattr(torch, args.torch_dtype) + quantization_config = get_quantization_config(args) + model_kwargs = dict( + revision=args.model_revision, + attn_implementation=args.attn_implementation, + torch_dtype=torch_dtype, + device_map="auto", + quantization_config=quantization_config, + ) + model = AutoModelForCausalLM.from_pretrained( + args.model_name_or_path, trust_remote_code=args.trust_remote_code, **model_kwargs + ) + + if getattr(model, "hf_device_map", None) is None: + model = model.to(args.device) + + return model, tokenizer + + +def parse_eos_tokens(tokenizer, eos_tokens, eos_token_ids): + if tokenizer.pad_token_id is None: + pad_token_id = tokenizer.eos_token_id + else: + pad_token_id = tokenizer.pad_token_id + + all_eos_token_ids = [] + + if eos_tokens is not None: + all_eos_token_ids.extend(tokenizer.convert_tokens_to_ids(eos_tokens.split(","))) + + if eos_token_ids is not None: + all_eos_token_ids.extend([int(token_id) for token_id in eos_token_ids.split(",")]) + + if len(all_eos_token_ids) == 0: + all_eos_token_ids.append(tokenizer.eos_token_id) + + return pad_token_id, all_eos_token_ids + + +def chat_cli(): + parser = TrlParser(ChatArguments) + + if "--config" not in sys.argv: + sys.argv.append("--config") + sys.argv.append(os.path.join(os.path.dirname(__file__), "config/default_chat_config.yaml")) + args = parser.parse_args_and_config()[0] + if args.examples is None: + args.examples = {} + + current_args = copy.deepcopy(args) + + if args.user is None: + user = get_username() + else: + user = args.user + + model, tokenizer = load_model_and_tokenizer(args) + generation_streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True, skip_prompt=True) + + pad_token_id, eos_token_ids = parse_eos_tokens(tokenizer, args.eos_tokens, args.eos_token_ids) + + interface = RichInterface(model_name=args.model_name_or_path, user_name=user) + interface.clear() + chat = clear_chat_history(current_args.system_prompt) + while True: + try: + user_input = interface.input() + + if user_input == "clear": + chat = clear_chat_history(current_args.system_prompt) + interface.clear() + continue + + if user_input == "help": + interface.print_help() + continue + + if user_input == "exit": + break + + if user_input == "reset": + interface.clear() + current_args = copy.deepcopy(args) + chat = clear_chat_history(current_args.system_prompt) + continue + + if user_input.startswith("save") and len(user_input.split()) < 2: + split_input = user_input.split() + + if len(split_input) == 2: + filename = split_input[1] + else: + filename = None + filename = save_chat(chat, current_args, filename) + interface.print_green(f"Chat saved in {filename}!") + continue + + if re.match(SETTING_RE, user_input): + current_args, success = parse_settings(user_input, current_args, interface) + if success: + chat = [] + interface.clear() + continue + + if user_input.startswith("example") and len(user_input.split()) == 2: + example_name = user_input.split()[1] + if example_name in current_args.examples: + interface.clear() + chat = [] + interface.print_user_message(current_args.examples[example_name]["text"]) + user_input = current_args.examples[example_name]["text"] + else: + interface.print_red( + f"Example {example_name} not found in list of available examples: {list(current_args.examples.keys())}." + ) + continue + + chat.append({"role": "user", "content": user_input}) + + inputs = tokenizer.apply_chat_template(chat, return_tensors="pt", add_generation_prompt=True).to( + model.device + ) + attention_mask = torch.ones_like(inputs) + generation_kwargs = dict( + inputs=inputs, + attention_mask=attention_mask, + streamer=generation_streamer, + max_new_tokens=current_args.max_new_tokens, + do_sample=current_args.do_sample, + num_beams=current_args.num_beams, + temperature=current_args.temperature, + top_k=current_args.top_k, + top_p=current_args.top_p, + repetition_penalty=current_args.repetition_penalty, + pad_token_id=pad_token_id, + eos_token_id=eos_token_ids, + ) + + thread = Thread(target=model.generate, kwargs=generation_kwargs) + thread.start() + model_output = interface.stream_output(generation_streamer) + thread.join() + chat.append({"role": "assistant", "content": model_output}) + + except KeyboardInterrupt: + break + + +if __name__ == "__main__": + chat_cli() diff --git a/testbed/huggingface__trl/examples/scripts/config/default_chat_config.yaml b/testbed/huggingface__trl/examples/scripts/config/default_chat_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93195f9d7deb273770201f30ccd7829a581ec74f --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/config/default_chat_config.yaml @@ -0,0 +1,13 @@ +examples: + llama: + text: There is a Llama in my lawn, how can I get rid of it? + code: + text: Write a Python function that integrates any Python function f(x) numerically over an arbitrary interval [x_start, x_end]. + helicopter: + text: How many helicopters can a human eat in one sitting? + numbers: + text: Count to 10 but skip every number ending with an 'e' + birds: + text: Why aren't birds real? + socks: + text: Why is it important to eat socks after meditating? \ No newline at end of file diff --git a/testbed/huggingface__trl/examples/scripts/cpo.py b/testbed/huggingface__trl/examples/scripts/cpo.py new file mode 100644 index 0000000000000000000000000000000000000000..019e9ef7144a43064d50f53256905ec31cbcb364 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/cpo.py @@ -0,0 +1,105 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Run the CPO training script with the following command with some example arguments. +In general, the optimal configuration for CPO will be similar to that of DPO: + +# regular: +python examples/scripts/cpo.py \ + --dataset_name trl-lib/ultrafeedback_binarized \ + --model_name_or_path=gpt2 \ + --per_device_train_batch_size 4 \ + --max_steps 1000 \ + --learning_rate 8e-6 \ + --gradient_accumulation_steps 1 \ + --logging_steps 10 \ + --eval_steps 500 \ + --output_dir="gpt2-aligned-cpo" \ + --warmup_steps 150 \ + --report_to wandb \ + --bf16 \ + --logging_first_step \ + --no_remove_unused_columns + +# peft: +python examples/scripts/cpo.py \ + --dataset_name trl-lib/ultrafeedback_binarized \ + --model_name_or_path=gpt2 \ + --per_device_train_batch_size 4 \ + --max_steps 1000 \ + --learning_rate 8e-5 \ + --gradient_accumulation_steps 1 \ + --logging_steps 10 \ + --eval_steps 500 \ + --output_dir="gpt2-lora-aligned-cpo" \ + --optim rmsprop \ + --warmup_steps 150 \ + --report_to wandb \ + --bf16 \ + --logging_first_step \ + --no_remove_unused_columns \ + --use_peft \ + --lora_r=16 \ + --lora_alpha=16 +""" + +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser + +from trl import CPOConfig, CPOTrainer, ModelConfig, ScriptArguments, get_peft_config +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, CPOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_into_dataclasses() + + ################ + # Model & Tokenizer + ################ + model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code + ) + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code + ) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + + ################ + # Training + ################ + trainer = CPOTrainer( + model, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + peft_config=get_peft_config(model_config), + ) + + # train and save the model + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/ddpo.py b/testbed/huggingface__trl/examples/scripts/ddpo.py new file mode 100644 index 0000000000000000000000000000000000000000..92924c51e4b3d638147a309e20d1e117cbc9f27d --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/ddpo.py @@ -0,0 +1,215 @@ +# Copyright 2023 metric-space, The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +python examples/scripts/ddpo.py \ + --num_epochs=200 \ + --train_gradient_accumulation_steps=1 \ + --sample_num_steps=50 \ + --sample_batch_size=6 \ + --train_batch_size=3 \ + --sample_num_batches_per_epoch=4 \ + --per_prompt_stat_tracking=True \ + --per_prompt_stat_tracking_buffer_size=32 \ + --tracker_project_name="stable_diffusion_training" \ + --log_with="wandb" +""" + +import os +from dataclasses import dataclass, field + +import numpy as np +import torch +import torch.nn as nn +from huggingface_hub import hf_hub_download +from huggingface_hub.utils import EntryNotFoundError +from transformers import CLIPModel, CLIPProcessor, HfArgumentParser, is_torch_npu_available, is_torch_xpu_available + +from trl import DDPOConfig, DDPOTrainer, DefaultDDPOStableDiffusionPipeline + + +@dataclass +class ScriptArguments: + pretrained_model: str = field( + default="runwayml/stable-diffusion-v1-5", metadata={"help": "the pretrained model to use"} + ) + pretrained_revision: str = field(default="main", metadata={"help": "the pretrained model revision to use"}) + hf_hub_model_id: str = field( + default="ddpo-finetuned-stable-diffusion", metadata={"help": "HuggingFace repo to save model weights to"} + ) + hf_hub_aesthetic_model_id: str = field( + default="trl-lib/ddpo-aesthetic-predictor", + metadata={"help": "HuggingFace model ID for aesthetic scorer model weights"}, + ) + hf_hub_aesthetic_model_filename: str = field( + default="aesthetic-model.pth", + metadata={"help": "HuggingFace model filename for aesthetic scorer model weights"}, + ) + use_lora: bool = field(default=True, metadata={"help": "Whether to use LoRA."}) + + +class MLP(nn.Module): + def __init__(self): + super().__init__() + self.layers = nn.Sequential( + nn.Linear(768, 1024), + nn.Dropout(0.2), + nn.Linear(1024, 128), + nn.Dropout(0.2), + nn.Linear(128, 64), + nn.Dropout(0.1), + nn.Linear(64, 16), + nn.Linear(16, 1), + ) + + @torch.no_grad() + def forward(self, embed): + return self.layers(embed) + + +class AestheticScorer(torch.nn.Module): + """ + This model attempts to predict the aesthetic score of an image. The aesthetic score + is a numerical approximation of how much a specific image is liked by humans on average. + This is from https://github.com/christophschuhmann/improved-aesthetic-predictor + """ + + def __init__(self, *, dtype, model_id, model_filename): + super().__init__() + self.clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") + self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") + self.mlp = MLP() + try: + cached_path = hf_hub_download(model_id, model_filename) + except EntryNotFoundError: + cached_path = os.path.join(model_id, model_filename) + state_dict = torch.load(cached_path, map_location=torch.device("cpu"), weights_only=True) + self.mlp.load_state_dict(state_dict) + self.dtype = dtype + self.eval() + + @torch.no_grad() + def __call__(self, images): + device = next(self.parameters()).device + inputs = self.processor(images=images, return_tensors="pt") + inputs = {k: v.to(self.dtype).to(device) for k, v in inputs.items()} + embed = self.clip.get_image_features(**inputs) + # normalize embedding + embed = embed / torch.linalg.vector_norm(embed, dim=-1, keepdim=True) + return self.mlp(embed).squeeze(1) + + +def aesthetic_scorer(hub_model_id, model_filename): + scorer = AestheticScorer( + model_id=hub_model_id, + model_filename=model_filename, + dtype=torch.float32, + ) + if is_torch_npu_available(): + scorer = scorer.npu() + elif is_torch_xpu_available(): + scorer = scorer.xpu() + else: + scorer = scorer.cuda() + + def _fn(images, prompts, metadata): + images = (images * 255).round().clamp(0, 255).to(torch.uint8) + scores = scorer(images) + return scores, {} + + return _fn + + +# list of example prompts to feed stable diffusion +animals = [ + "cat", + "dog", + "horse", + "monkey", + "rabbit", + "zebra", + "spider", + "bird", + "sheep", + "deer", + "cow", + "goat", + "lion", + "frog", + "chicken", + "duck", + "goose", + "bee", + "pig", + "turkey", + "fly", + "llama", + "camel", + "bat", + "gorilla", + "hedgehog", + "kangaroo", +] + + +def prompt_fn(): + return np.random.choice(animals), {} + + +def image_outputs_logger(image_data, global_step, accelerate_logger): + # For the sake of this example, we will only log the last batch of images + # and associated data + result = {} + images, prompts, _, rewards, _ = image_data[-1] + + for i, image in enumerate(images): + prompt = prompts[i] + reward = rewards[i].item() + result[f"{prompt:.25} | {reward:.2f}"] = image.unsqueeze(0).float() + + accelerate_logger.log_images( + result, + step=global_step, + ) + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, DDPOConfig)) + script_args, training_args = parser.parse_args_into_dataclasses() + training_args.project_kwargs = { + "logging_dir": "./logs", + "automatic_checkpoint_naming": True, + "total_limit": 5, + "project_dir": "./save", + } + + pipeline = DefaultDDPOStableDiffusionPipeline( + script_args.pretrained_model, + pretrained_model_revision=script_args.pretrained_revision, + use_lora=script_args.use_lora, + ) + + trainer = DDPOTrainer( + training_args, + aesthetic_scorer(script_args.hf_hub_aesthetic_model_id, script_args.hf_hub_aesthetic_model_filename), + prompt_fn, + pipeline, + image_samples_hook=image_outputs_logger, + ) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/dpo.py b/testbed/huggingface__trl/examples/scripts/dpo.py new file mode 100644 index 0000000000000000000000000000000000000000..ed144617258cba0b57d98579069208283d25d183 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/dpo.py @@ -0,0 +1,138 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +# Full training +python examples/scripts/dpo.py \ + --dataset_name trl-lib/ultrafeedback_binarized \ + --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ + --learning_rate 5.0e-7 \ + --num_train_epochs 1 \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 8 \ + --gradient_checkpointing \ + --logging_steps 25 \ + --eval_strategy steps \ + --eval_steps 50 \ + --output_dir Qwen2-0.5B-DPO \ + --no_remove_unused_columns + +# LoRA: +python examples/scripts/dpo.py \ + --dataset_name trl-lib/ultrafeedback_binarized \ + --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ + --learning_rate 5.0e-6 \ + --num_train_epochs 1 \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 8 \ + --gradient_checkpointing \ + --logging_steps 25 \ + --eval_strategy steps \ + --eval_steps 50 \ + --output_dir Qwen2-0.5B-DPO \ + --no_remove_unused_columns \ + --use_peft \ + --lora_r 32 \ + --lora_alpha 16 +""" + +import torch +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoTokenizer + +from trl import ( + DPOConfig, + DPOTrainer, + ModelConfig, + ScriptArguments, + TrlParser, + get_kbit_device_map, + get_peft_config, + get_quantization_config, +) +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +if __name__ == "__main__": + parser = TrlParser((ScriptArguments, DPOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + + ################ + # Model & Tokenizer + ################### + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + use_cache=False if training_args.gradient_checkpointing else True, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + peft_config = get_peft_config(model_config) + if peft_config is None: + ref_model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + else: + ref_model = None + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code + ) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + if script_args.ignore_bias_buffers: + # torch distributed hack + model._ddp_params_and_buffers_to_ignore = [ + name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool + ] + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + + ########## + # Training + ################ + trainer = DPOTrainer( + model, + ref_model, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + peft_config=peft_config, + ) + + trainer.train() + + if training_args.eval_strategy != "no": + metrics = trainer.evaluate() + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/dpo_online.py b/testbed/huggingface__trl/examples/scripts/dpo_online.py new file mode 100644 index 0000000000000000000000000000000000000000..b38e0bc95bca2dc7e13ce2461fe31e5f353df165 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/dpo_online.py @@ -0,0 +1,149 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +python examples/scripts/dpo_online.py \ + --model_name_or_path trl-lib/pythia-1b-deduped-tldr-sft \ + --reward_model_path trl-lib/pythia-1b-deduped-tldr-rm \ + --dataset_name trl-lib/tldr \ + --learning_rate 5.0e-7 \ + --output_dir pythia-1b-tldr-online-dpo \ + --per_device_train_batch_size 8 \ + --gradient_accumulation_steps 16 \ + --warmup_ratio 0.1 \ + --missing_eos_penalty 1.0 + +With LoRA: +python examples/scripts/dpo_online.py \ + --model_name_or_path trl-lib/pythia-1b-deduped-tldr-sft \ + --reward_model_path trl-lib/pythia-1b-deduped-tldr-rm \ + --dataset_name trl-lib/tldr \ + --learning_rate 5.0e-6 \ + --output_dir pythia-1b-tldr-online-dpo \ + --per_device_train_batch_size 16 \ + --gradient_accumulation_steps 8 \ + --warmup_ratio 0.1 \ + --missing_eos_penalty 1.0 \ + --use_peft +""" + +import torch +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, GenerationConfig + +from trl import ( + HfPairwiseJudge, + LogCompletionsCallback, + ModelConfig, + OnlineDPOConfig, + OnlineDPOTrainer, + OpenAIPairwiseJudge, + PairRMJudge, + ScriptArguments, + TrlParser, + get_kbit_device_map, + get_peft_config, + get_quantization_config, +) +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +JUDGES = {"pair_rm": PairRMJudge, "openai": OpenAIPairwiseJudge, "hf": HfPairwiseJudge} + +if __name__ == "__main__": + parser = TrlParser((ScriptArguments, OnlineDPOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + training_args.gradient_checkpointing_kwargs = {"use_reentrant": True} + + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + use_cache=False if training_args.gradient_checkpointing else True, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + + model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + + if training_args.reward_model_path is not None: + reward_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, + num_labels=1, + trust_remote_code=model_config.trust_remote_code, + **model_kwargs, + ) + reward_tokenizer = AutoTokenizer.from_pretrained( + training_args.reward_model_path, + trust_remote_code=model_config.trust_remote_code, + truncation=True, + truncation_side="left", # since we judge the completion, truncating left is more appropriate + ) + else: + reward_model = None + reward_tokenizer = None + + if training_args.judge is not None: + judge_cls = JUDGES[training_args.judge] + judge = judge_cls() + else: + judge = None + + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, + padding_side="left", + trust_remote_code=model_config.trust_remote_code, + **model_kwargs, + ) + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + + dataset = load_dataset(script_args.dataset_name) + + trainer = OnlineDPOTrainer( + model=model, + reward_model=reward_model, + judge=judge, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + reward_processing_class=reward_tokenizer, + peft_config=get_peft_config(model_config), + ) + + if training_args.eval_strategy != "no": + generation_config = GenerationConfig( + max_new_tokens=training_args.max_new_tokens, do_sample=True, temperature=training_args.temperature + ) + completions_callback = LogCompletionsCallback(trainer, generation_config, num_prompts=8) + trainer.add_callback(completions_callback) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/dpo_vlm.py b/testbed/huggingface__trl/examples/scripts/dpo_vlm.py new file mode 100644 index 0000000000000000000000000000000000000000..0781edd9e31239584e948121aa8d14f5bafe4fe9 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/dpo_vlm.py @@ -0,0 +1,126 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +accelerate launch examples/scripts/dpo_vlm.py \ + --dataset_name HuggingFaceH4/rlaif-v_formatted \ + --model_name_or_path HuggingFaceM4/idefics2-8b \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 32 \ + --dataset_num_proc 32 \ + --output_dir dpo_idefics_rlaif-v \ + --bf16 \ + --torch_dtype bfloat16 \ + --gradient_checkpointing \ + --use_peft \ + --lora_target_modules=all-linear +""" + +import torch +from datasets import load_dataset +from transformers import AutoModelForVision2Seq, AutoProcessor + +from trl import ( + DPOConfig, + DPOTrainer, + ModelConfig, + ScriptArguments, + TrlParser, + get_kbit_device_map, + get_peft_config, + get_quantization_config, +) + + +if __name__ == "__main__": + parser = TrlParser((ScriptArguments, DPOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + + ################ + # Model & Tokenizer + ################ + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + + model_kwargs = dict( + revision=model_config.model_revision, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + model = AutoModelForVision2Seq.from_pretrained( + model_config.model_name_or_path, + trust_remote_code=model_config.trust_remote_code, + **model_kwargs, + ) + peft_config = get_peft_config(model_config) + if peft_config is None: + ref_model = AutoModelForVision2Seq.from_pretrained( + model_config.model_name_or_path, + trust_remote_code=model_config.trust_remote_code, + **model_kwargs, + ) + else: + ref_model = None + processor = AutoProcessor.from_pretrained( + model_config.model_name_or_path, + trust_remote_code=model_config.trust_remote_code, + do_image_splitting=False, + ) + tokenizer = processor.tokenizer + + # Set up the chat template + if model.config.model_type == "idefics2": + pass # the processor already has a valid chat template + elif model.config.model_type == "paligemma": + processor.chat_template = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}<|im_start|>{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] if item['type'] == 'text' %}{{ item['text'] }}<|im_end|>{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" + elif model.config.model_type == "llava": + processor.chat_template = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" + + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + if script_args.ignore_bias_buffers: + # torch distributed hack + model._ddp_params_and_buffers_to_ignore = [ + name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool + ] + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + + ################ + # Training + ################ + trainer = DPOTrainer( + model, + ref_model, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=processor, + peft_config=peft_config, + ) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/evals/judge_tldr.py b/testbed/huggingface__trl/examples/scripts/evals/judge_tldr.py new file mode 100644 index 0000000000000000000000000000000000000000..5f6a8ee662c8fd9c8f7edad2d5ef620da7188a13 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/evals/judge_tldr.py @@ -0,0 +1,88 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import Optional + +from datasets import load_dataset +from transformers import HfArgumentParser +from vllm import LLM, SamplingParams + +from trl import HfPairwiseJudge, OpenAIPairwiseJudge + + +""" +Examples: + +python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --num_examples 1000 +Model win rate: 31.40% + +python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --judge_model gpt-3.5-turbo-0125 --num_examples 1000 +Model win rate: 51.60% + +python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 51.20% + +python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --num_examples 1000 +Model win rate: 46.30% + +python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --judge_model gpt-3.5-turbo-0125 --num_examples 1000 +Model win rate: 52.50% + +python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --judge_model gpt-4o-mini --num_examples 1000 +Model win rate: 63.00% +""" + + +@dataclass +class ScriptArguments: + model_name_or_path: str = field(metadata={"help": "The model name or path to the model to evaluate."}) + judge_model: str = field( + default="meta-llama/Meta-Llama-3-70B-Instruct", + metadata={ + "help": "The model name or path to the model to use as a judge. E.g., 'gpt-3.5-turbo-0125', 'meta-llama/Meta-Llama-3-70B-Instruct'." + }, + ) + num_examples: Optional[int] = field(default=None, metadata={"help": "The number of examples to evaluate."}) + + +# Parse the arguments +parser = HfArgumentParser(ScriptArguments) +script_args = parser.parse_args_into_dataclasses()[0] + +# Load the dataset +dataset = load_dataset("trl-lib/tldr", split="validation") +if script_args.num_examples is not None: + dataset = dataset.select(range(script_args.num_examples)) + +# Extract the prompts and reference completions +prompts = dataset["prompt"] +reference_completions = dataset["completion"] + +# Generate the model completions +sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=200) # very generous max token length +llm = LLM(model=script_args.model_name_or_path, tensor_parallel_size=1) +outputs = llm.generate(prompts, sampling_params) +model_completions = [output.outputs[0].text.strip() for output in outputs] + +# Judge the outputs +if "gpt" in script_args.judge_model: + judge = OpenAIPairwiseJudge(script_args.judge_model) +else: + judge = HfPairwiseJudge(script_args.judge_model) + +completions = [[c0, c1] for c0, c1 in zip(reference_completions, model_completions)] +best_idxs = judge.judge(prompts, completions) +model_win_rate = best_idxs.count(1) / len(best_idxs) +print(f"Model win rate: {model_win_rate*100:.2f}%") diff --git a/testbed/huggingface__trl/examples/scripts/gkd.py b/testbed/huggingface__trl/examples/scripts/gkd.py new file mode 100644 index 0000000000000000000000000000000000000000..ed5d07a47eeb5d879952169286835ae2bc8458fc --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/gkd.py @@ -0,0 +1,141 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +# Full training: +python examples/scripts/gkd.py \ + --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ + --teacher_model_name_or_path Qwen/Qwen2-1.5B-Instruct \ + --dataset_name trl-lib/chatbot_arena_completions \ + --learning_rate 2e-5 \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 8 \ + --output_dir gkd-model \ + --logging_steps 10 \ + --num_train_epochs 1 \ + --push_to_hub \ + --gradient_checkpointing + +# LoRA: +python examples/scripts/gkd.py \ + --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ + --teacher_model_name_or_path Qwen/Qwen2-1.5B-Instruct \ + --dataset_name trl-lib/chatbot_arena_completions \ + --learning_rate 2e-4 \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 8 \ + --output_dir gkd-model \ + --logging_steps 10 \ + --num_train_epochs 1 \ + --push_to_hub \ + --gradient_checkpointing \ + --use_peft \ + --lora_r 64 \ + --lora_alpha 16 +""" + +from accelerate import PartialState +from datasets import load_dataset +from transformers import AutoTokenizer, GenerationConfig + +from trl import ( + GKDConfig, + GKDTrainer, + LogCompletionsCallback, + ModelConfig, + ScriptArguments, + TrlParser, + get_kbit_device_map, + get_peft_config, + get_quantization_config, +) + + +if __name__ == "__main__": + parser = TrlParser((ScriptArguments, GKDConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + + ################ + # Model & Tokenizer + ################ + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + trust_remote_code=model_config.trust_remote_code, + attn_implementation=model_config.attn_implementation, + torch_dtype=model_config.torch_dtype, + use_cache=False if training_args.gradient_checkpointing else True, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + training_args.model_init_kwargs = model_kwargs + + teacher_model_kwargs = dict( + revision=model_config.model_revision, + trust_remote_code=model_config.trust_remote_code, + attn_implementation=model_config.attn_implementation, + torch_dtype=model_config.torch_dtype, + use_cache=True, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + training_args.teacher_model_init_kwargs = teacher_model_kwargs + + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, + revision=model_config.model_revision, + trust_remote_code=model_config.trust_remote_code, + padding_side="left", + ) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + + with PartialState().local_main_process_first(): + dataset = dataset.map( + lambda x: { + "prompt": tokenizer.apply_chat_template(x["prompt"], tokenize=False, add_generation_prompt=True) + }, + num_proc=training_args.dataset_num_proc, + ) + + ################ + # Training + ################ + trainer = GKDTrainer( + model=model_config.model_name_or_path, + teacher_model=training_args.teacher_model_name_or_path, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + peft_config=get_peft_config(model_config), + ) + + if training_args.eval_strategy != "no": + generation_config = GenerationConfig( + max_new_tokens=training_args.max_new_tokens, do_sample=True, temperature=training_args.temperature + ) + completions_callback = LogCompletionsCallback(trainer, generation_config, num_prompts=8) + trainer.add_callback(completions_callback) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/kto.py b/testbed/huggingface__trl/examples/scripts/kto.py new file mode 100644 index 0000000000000000000000000000000000000000..56205ce57e89b4b04c4f015835b6b3b8b3a3d6a1 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/kto.py @@ -0,0 +1,113 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Run the KTO training script with the commands below. In general, the optimal configuration for KTO will be similar to that of DPO. + +# Full training: +python examples/scripts/kto.py \ + --dataset_name trl-lib/kto-mix-14k \ + --model_name_or_path=trl-lib/qwen1.5-1.8b-sft \ + --per_device_train_batch_size 16 \ + --num_train_epochs 1 \ + --learning_rate 5e-7 \ + --lr_scheduler_type=cosine \ + --gradient_accumulation_steps 1 \ + --logging_steps 10 \ + --eval_steps 500 \ + --output_dir=kto-aligned-model \ + --warmup_ratio 0.1 \ + --report_to wandb \ + --bf16 \ + --logging_first_step + +# QLoRA: +python examples/scripts/kto.py \ + --dataset_name trl-lib/kto-mix-14k \ + --model_name_or_path=trl-lib/qwen1.5-1.8b-sft \ + --per_device_train_batch_size 8 \ + --num_train_epochs 1 \ + --learning_rate 5e-7 \ + --lr_scheduler_type=cosine \ + --gradient_accumulation_steps 1 \ + --logging_steps 10 \ + --eval_steps 500 \ + --output_dir=kto-aligned-model-lora \ + --warmup_ratio 0.1 \ + --report_to wandb \ + --bf16 \ + --logging_first_step \ + --use_peft \ + --load_in_4bit \ + --lora_target_modules=all-linear \ + --lora_r=16 \ + --lora_alpha=16 +""" + +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser + +from trl import ( + KTOConfig, + KTOTrainer, + ModelConfig, + ScriptArguments, + get_peft_config, + setup_chat_format, +) + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, KTOConfig, ModelConfig)) + script_args, training_args, model_args = parser.parse_args_into_dataclasses() + + # Load a pretrained model + model = AutoModelForCausalLM.from_pretrained( + model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code + ) + ref_model = AutoModelForCausalLM.from_pretrained( + model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code + ) + + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code + ) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + + # If we are aligning a base model, we use ChatML as the default template + if tokenizer.chat_template is None: + model, tokenizer = setup_chat_format(model, tokenizer) + + # Load the dataset + dataset = load_dataset(script_args.dataset_name) + + # Initialize the KTO trainer + trainer = KTOTrainer( + model, + ref_model, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + peft_config=get_peft_config(model_args), + ) + + # Train and push the model to the Hub + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/nash_md.py b/testbed/huggingface__trl/examples/scripts/nash_md.py new file mode 100644 index 0000000000000000000000000000000000000000..d4915e0347a3d6ec5e894339dfac371075993d10 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/nash_md.py @@ -0,0 +1,148 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +python examples/scripts/nash_md.py \ + --model_name_or_path trl-lib/pythia-1b-deduped-tldr-sft \ + --reward_model_path trl-lib/pythia-1b-deduped-tldr-rm \ + --dataset_name trl-lib/tldr \ + --learning_rate 5.0e-7 \ + --output_dir pythia-1b-tldr-nash-md \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 32 \ + --num_train_epochs 3 \ + --max_new_tokens 64 \ + --warmup_ratio 0.1 \ + --missing_eos_penalty 1.0 \ + --push_to_hub + + +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ + examples/scripts/nash_md.py \ + --model_name_or_path trl-lib/pythia-1b-deduped-tldr-sft \ + --reward_model_path trl-lib/pythia-1b-deduped-tldr-rm \ + --dataset_name trl-lib/tldr \ + --learning_rate 5.0e-7 \ + --output_dir pythia-1b-tldr-nash-md \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 32 \ + --num_train_epochs 3 \ + --max_new_tokens 64 \ + --warmup_ratio 0.1 \ + --missing_eos_penalty 1.0 \ + --push_to_hub +""" + +import torch +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, GenerationConfig + +from trl import ( + HfPairwiseJudge, + LogCompletionsCallback, + ModelConfig, + NashMDConfig, + NashMDTrainer, + OpenAIPairwiseJudge, + PairRMJudge, + ScriptArguments, + TrlParser, + get_kbit_device_map, + get_quantization_config, +) +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +JUDGES = {"pair_rm": PairRMJudge, "openai": OpenAIPairwiseJudge, "hf": HfPairwiseJudge} + +if __name__ == "__main__": + parser = TrlParser((ScriptArguments, NashMDConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + training_args.gradient_checkpointing_kwargs = {"use_reentrant": True} + + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + use_cache=False if training_args.gradient_checkpointing else True, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + + model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + ref_model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + + if training_args.reward_model_path is not None: + reward_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, + num_labels=1, + trust_remote_code=model_config.trust_remote_code, + **model_kwargs, + ) + else: + reward_model = None + + if training_args.judge is not None: + judge_cls = JUDGES[training_args.judge] + judge = judge_cls() + else: + judge = None + + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, + padding_side="left", + trust_remote_code=model_config.trust_remote_code, + ) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + + dataset = load_dataset(script_args.dataset_name) + + trainer = NashMDTrainer( + model=model, + ref_model=ref_model, + reward_model=reward_model, + judge=judge, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + ) + + if training_args.eval_strategy != "no": + generation_config = GenerationConfig( + max_new_tokens=training_args.max_new_tokens, do_sample=True, temperature=training_args.temperature + ) + completions_callback = LogCompletionsCallback(trainer, generation_config, num_prompts=8) + trainer.add_callback(completions_callback) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/orpo.py b/testbed/huggingface__trl/examples/scripts/orpo.py new file mode 100644 index 0000000000000000000000000000000000000000..ac3f598f88e591c6be684a36370f9fa4740593f0 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/orpo.py @@ -0,0 +1,105 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Run the ORPO training script with the following command with some example arguments. +In general, the optimal configuration for ORPO will be similar to that of DPO without the need for a reference model: + +# regular: +python examples/scripts/orpo.py \ + --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style \ + --model_name_or_path=gpt2 \ + --per_device_train_batch_size 4 \ + --max_steps 1000 \ + --learning_rate 8e-6 \ + --gradient_accumulation_steps 1 \ + --logging_steps 10 \ + --eval_steps 500 \ + --output_dir="gpt2-aligned-orpo" \ + --warmup_steps 150 \ + --report_to wandb \ + --bf16 \ + --logging_first_step \ + --no_remove_unused_columns + +# peft: +python examples/scripts/orpo.py \ + --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style \ + --model_name_or_path=gpt2 \ + --per_device_train_batch_size 4 \ + --max_steps 1000 \ + --learning_rate 8e-5 \ + --gradient_accumulation_steps 1 \ + --logging_steps 10 \ + --eval_steps 500 \ + --output_dir="gpt2-lora-aligned-orpo" \ + --optim rmsprop \ + --warmup_steps 150 \ + --report_to wandb \ + --bf16 \ + --logging_first_step \ + --no_remove_unused_columns \ + --use_peft \ + --lora_r=16 \ + --lora_alpha=16 +""" + +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser + +from trl import ModelConfig, ORPOConfig, ORPOTrainer, ScriptArguments, get_peft_config +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, ORPOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_into_dataclasses() + + ################ + # Model & Tokenizer + ################ + model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code + ) + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code + ) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + + ################ + # Training + ################ + trainer = ORPOTrainer( + model, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + peft_config=get_peft_config(model_config), + ) + + # train and save the model + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/ppo/ppo.py b/testbed/huggingface__trl/examples/scripts/ppo/ppo.py new file mode 100644 index 0000000000000000000000000000000000000000..e0dd07bb5a24265ac689b031cc5e6b90e4382f9d --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/ppo/ppo.py @@ -0,0 +1,172 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import shutil + +import torch +from accelerate import PartialState +from datasets import load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoModelForSequenceClassification, + AutoTokenizer, + HfArgumentParser, +) + +from trl import ( + ModelConfig, + PPOConfig, + PPOTrainer, + ScriptArguments, + get_kbit_device_map, + get_peft_config, + get_quantization_config, +) +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +""" +python -i examples/scripts/ppo/ppo.py \ + --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ + --dataset_train_split descriptiveness \ + --learning_rate 3e-6 \ + --output_dir models/minimal/ppo \ + --per_device_train_batch_size 64 \ + --gradient_accumulation_steps 1 \ + --total_episodes 10000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --missing_eos_penalty 1.0 + +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml \ + examples/scripts/ppo/ppo.py \ + --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ + --dataset_train_split descriptiveness \ + --output_dir models/minimal/ppo \ + --num_ppo_epochs 1 \ + --num_mini_batches 1 \ + --learning_rate 3e-6 \ + --per_device_train_batch_size 1 \ + --gradient_accumulation_steps 16 \ + --total_episodes 10000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --sft_model_path EleutherAI/pythia-1b-deduped \ + --reward_model_path EleutherAI/pythia-1b-deduped \ + --local_rollout_forward_batch_size 1 \ + --missing_eos_penalty 1.0 +""" + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, PPOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_into_dataclasses() + # remove output_dir if exists + shutil.rmtree(training_args.output_dir, ignore_errors=True) + + ################ + # Model & Tokenizer + ################ + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, + padding_side="left", + trust_remote_code=model_config.trust_remote_code, + ) + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + value_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 + ) + reward_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 + ) + policy = AutoModelForCausalLM.from_pretrained( + training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code + ) + + peft_config = get_peft_config(model_config) + if peft_config is None: + ref_policy = AutoModelForCausalLM.from_pretrained( + training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code + ) + else: + ref_policy = None + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name, split=script_args.dataset_train_split) + eval_samples = 100 + train_dataset = dataset.select(range(len(dataset) - eval_samples)) + eval_dataset = dataset.select(range(len(dataset) - eval_samples, len(dataset))) + dataset_text_field = "prompt" + + def prepare_dataset(dataset, tokenizer): + """pre-tokenize the dataset before training; only collate during training""" + + def tokenize(element): + outputs = tokenizer( + element[dataset_text_field], + padding=False, + ) + return {"input_ids": outputs["input_ids"]} + + return dataset.map( + tokenize, + batched=True, + remove_columns=dataset.column_names, + num_proc=training_args.dataset_num_proc, + ) + + # Compute that only on the main process for faster data processing. + # see: https://github.com/huggingface/trl/pull/1255 + with PartialState().local_main_process_first(): + train_dataset = prepare_dataset(train_dataset, tokenizer) + eval_dataset = prepare_dataset(eval_dataset, tokenizer) + + ################ + # Training + ################ + trainer = PPOTrainer( + config=training_args, + processing_class=tokenizer, + policy=policy, + ref_policy=ref_policy, + reward_model=reward_model, + value_model=value_model, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + peft_config=peft_config, + ) + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) + + trainer.generate_completions() diff --git a/testbed/huggingface__trl/examples/scripts/ppo/ppo_tldr.py b/testbed/huggingface__trl/examples/scripts/ppo/ppo_tldr.py new file mode 100644 index 0000000000000000000000000000000000000000..73ea5cd8526d2a81a330a349ab59d0af60e0f80f --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/ppo/ppo_tldr.py @@ -0,0 +1,183 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import shutil + +import torch +from accelerate import PartialState +from datasets import load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoModelForSequenceClassification, + AutoTokenizer, + HfArgumentParser, +) + +from trl import ( + ModelConfig, + PPOConfig, + PPOTrainer, + ScriptArguments, + get_kbit_device_map, + get_peft_config, + get_quantization_config, +) +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +""" +python examples/scripts/ppo/ppo_tldr.py \ + --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ + --dataset_test_split validation \ + --learning_rate 3e-6 \ + --output_dir models/minimal/ppo_tldr \ + --per_device_train_batch_size 1 \ + --gradient_accumulation_steps 64 \ + --total_episodes 30000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ + --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ + --missing_eos_penalty 1.0 \ + --stop_token eos \ + --response_length 53 \ + --eval_strategy steps \ + --eval_steps 100 + +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ + examples/scripts/ppo/ppo_tldr.py \ + --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ + --dataset_test_split validation \ + --output_dir models/minimal/ppo_tldr \ + --learning_rate 3e-6 \ + --per_device_train_batch_size 16 \ + --gradient_accumulation_steps 4 \ + --total_episodes 1000000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ + --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ + --local_rollout_forward_batch_size 16 \ + --missing_eos_penalty 1.0 \ + --stop_token eos \ + --eval_strategy steps \ + --eval_steps 100 +""" + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, PPOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_into_dataclasses() + # remove output_dir if exists + shutil.rmtree(training_args.output_dir, ignore_errors=True) + + ################ + # Model & Tokenizer + ################ + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, + padding_side="left", + trust_remote_code=model_config.trust_remote_code, + ) + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + value_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 + ) + reward_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 + ) + policy = AutoModelForCausalLM.from_pretrained( + training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code + ) + + peft_config = get_peft_config(model_config) + if peft_config is None: + ref_policy = AutoModelForCausalLM.from_pretrained( + training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code + ) + else: + ref_policy = None + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + train_dataset = dataset[script_args.dataset_train_split] + eval_dataset = dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None + + def prepare_dataset(dataset, tokenizer): + """pre-tokenize the dataset before training; only collate during training""" + + def tokenize(element): + input_ids = tokenizer.apply_chat_template( + element["messages"][:1], + padding=False, + add_generation_prompt=True, + ) + return {"input_ids": input_ids, "lengths": len(input_ids)} + + return dataset.map( + tokenize, + remove_columns=dataset.column_names, + num_proc=training_args.dataset_num_proc, + ) + + # Compute that only on the main process for faster data processing. + # see: https://github.com/huggingface/trl/pull/1255 + with PartialState().local_main_process_first(): + train_dataset = prepare_dataset(train_dataset, tokenizer) + if eval_dataset is not None: + eval_dataset = prepare_dataset(eval_dataset, tokenizer) + # filtering + train_dataset = train_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc) + if eval_dataset is not None: + eval_dataset = eval_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc) + + assert train_dataset[0]["input_ids"][-1] != tokenizer.eos_token_id, "The last token should not be an EOS token" + ################ + # Training + ################ + trainer = PPOTrainer( + config=training_args, + processing_class=tokenizer, + policy=policy, + ref_policy=ref_policy, + reward_model=reward_model, + value_model=value_model, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + peft_config=peft_config, + ) + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) + + trainer.generate_completions() diff --git a/testbed/huggingface__trl/examples/scripts/reward_modeling.py b/testbed/huggingface__trl/examples/scripts/reward_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..073016bc7776a859f041dfd48cb3af4179c74758 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/reward_modeling.py @@ -0,0 +1,136 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Full training: +python examples/scripts/reward_modeling.py \ + --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ + --dataset_name trl-lib/ultrafeedback_binarized \ + --output_dir Qwen2-0.5B-Reward \ + --per_device_train_batch_size 8 \ + --num_train_epochs 1 \ + --gradient_checkpointing True \ + --learning_rate 1.0e-5 \ + --logging_steps 25 \ + --eval_strategy steps \ + --eval_steps 50 \ + --max_length 2048 + +LoRA: +python examples/scripts/reward_modeling.py \ + --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ + --dataset_name trl-lib/ultrafeedback_binarized \ + --output_dir Qwen2-0.5B-Reward-LoRA \ + --per_device_train_batch_size 8 \ + --num_train_epochs 1 \ + --gradient_checkpointing True \ + --learning_rate 1.0e-4 \ + --logging_steps 25 \ + --eval_strategy steps \ + --eval_steps 50 \ + --max_length 2048 \ + --use_peft \ + --lora_r 32 \ + --lora_alpha 16 +""" + +import warnings + +import torch +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser + +from trl import ( + ModelConfig, + RewardConfig, + RewardTrainer, + ScriptArguments, + get_kbit_device_map, + get_peft_config, + get_quantization_config, + setup_chat_format, +) + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, RewardConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_into_dataclasses() + training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False) + + ################ + # Model & Tokenizer + ################ + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + use_cache=False if training_args.gradient_checkpointing else True, + torch_dtype=torch_dtype, + ) + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, use_fast=True + ) + model = AutoModelForSequenceClassification.from_pretrained( + model_config.model_name_or_path, num_labels=1, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + # Align padding tokens between tokenizer and model + model.config.pad_token_id = tokenizer.pad_token_id + + # If post-training a base model, use ChatML as the default template + if tokenizer.chat_template is None: + model, tokenizer = setup_chat_format(model, tokenizer) + + if model_config.use_peft and model_config.lora_task_type != "SEQ_CLS": + warnings.warn( + "You are using a `task_type` that is different than `SEQ_CLS` for PEFT. This will lead to silent bugs" + " Make sure to pass --lora_task_type SEQ_CLS when using this script with PEFT." + ) + + ############## + # Load dataset + ############## + dataset = load_dataset(script_args.dataset_name) + + ########## + # Training + ########## + trainer = RewardTrainer( + model=model, + processing_class=tokenizer, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + peft_config=get_peft_config(model_config), + ) + trainer.train() + + ############################ + # Save model and push to Hub + ############################ + trainer.save_model(training_args.output_dir) + + if training_args.eval_strategy != "no": + metrics = trainer.evaluate() + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/rloo/rloo.py b/testbed/huggingface__trl/examples/scripts/rloo/rloo.py new file mode 100644 index 0000000000000000000000000000000000000000..a3c685a84b7d6758cce5b9e5dc063b688ca80a36 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/rloo/rloo.py @@ -0,0 +1,141 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import shutil + +from accelerate import PartialState +from datasets import load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoModelForSequenceClassification, + AutoTokenizer, + HfArgumentParser, +) + +from trl import ModelConfig, RLOOConfig, RLOOTrainer, ScriptArguments +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +""" +python -i examples/scripts/rloo/rloo.py \ + --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ + --dataset_train_split descriptiveness \ + --learning_rate 3e-6 \ + --num_ppo_epochs 1 \ + --num_mini_batches 1 \ + --output_dir models/minimal/ppo \ + --per_device_train_batch_size 64 \ + --gradient_accumulation_steps 1 \ + --total_episodes 10000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --missing_eos_penalty 1.0 + +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml \ + examples/scripts/rloo/rloo.py \ + --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ + --dataset_train_split descriptiveness \ + --output_dir models/minimal/rloo \ + --rloo_k 2 \ + --num_ppo_epochs 1 \ + --num_mini_batches 1 \ + --learning_rate 3e-6 \ + --per_device_train_batch_size 1 \ + --gradient_accumulation_steps 16 \ + --total_episodes 10000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --sft_model_path EleutherAI/pythia-1b-deduped \ + --reward_model_path EleutherAI/pythia-1b-deduped \ + --local_rollout_forward_batch_size 1 \ + --missing_eos_penalty 1.0 +""" + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, RLOOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_into_dataclasses() + # remove output_dir if exists + shutil.rmtree(training_args.output_dir, ignore_errors=True) + + ################ + # Model & Tokenizer + ################ + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, + padding_side="left", + trust_remote_code=model_config.trust_remote_code, + ) + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + reward_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 + ) + ref_policy = AutoModelForCausalLM.from_pretrained( + training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code + ) + policy = AutoModelForCausalLM.from_pretrained( + training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code + ) + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name, split=script_args.dataset_train_split) + eval_samples = 100 + train_dataset = dataset.select(range(len(dataset) - eval_samples)) + eval_dataset = dataset.select(range(len(dataset) - eval_samples, len(dataset))) + dataset_text_field = "prompt" + + def prepare_dataset(dataset, tokenizer): + """pre-tokenize the dataset before training; only collate during training""" + + def tokenize(element): + outputs = tokenizer( + element[dataset_text_field], + padding=False, + ) + return {"input_ids": outputs["input_ids"]} + + return dataset.map( + tokenize, + batched=True, + remove_columns=dataset.column_names, + num_proc=training_args.dataset_num_proc, + ) + + # Compute that only on the main process for faster data processing. + # see: https://github.com/huggingface/trl/pull/1255 + with PartialState().local_main_process_first(): + train_dataset = prepare_dataset(train_dataset, tokenizer) + eval_dataset = prepare_dataset(eval_dataset, tokenizer) + + ################ + # Training + ################ + trainer = RLOOTrainer( + config=training_args, + processing_class=tokenizer, + policy=policy, + ref_policy=ref_policy, + reward_model=reward_model, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + ) + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) + + trainer.generate_completions() diff --git a/testbed/huggingface__trl/examples/scripts/rloo/rloo_tldr.py b/testbed/huggingface__trl/examples/scripts/rloo/rloo_tldr.py new file mode 100644 index 0000000000000000000000000000000000000000..36f759208ada8d7209b04151706fb53c4ee2add2 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/rloo/rloo_tldr.py @@ -0,0 +1,145 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import shutil + +from accelerate import PartialState +from datasets import load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoModelForSequenceClassification, + AutoTokenizer, + HfArgumentParser, +) + +from trl import ModelConfig, RLOOConfig, RLOOTrainer, ScriptArguments +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +""" +python examples/scripts/rloo/rloo_tldr.py \ + --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ + --dataset_test_split validation \ + --learning_rate 3e-6 \ + --output_dir models/minimal/ppo \ + --per_device_train_batch_size 1 \ + --gradient_accumulation_steps 64 \ + --total_episodes 30000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ + --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ + --missing_eos_penalty 1.0 \ + --stop_token eos \ + --response_length 53 + +accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ + examples/scripts/rloo/rloo_tldr.py \ + --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ + --dataset_test_split validation \ + --output_dir models/minimal/rloo_tldr \ + --num_ppo_epochs 1 \ + --num_mini_batches 1 \ + --learning_rate 3e-6 \ + --per_device_train_batch_size 16 \ + --gradient_accumulation_steps 4 \ + --total_episodes 1000000 \ + --model_name_or_path EleutherAI/pythia-1b-deduped \ + --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ + --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ + --local_rollout_forward_batch_size 16 \ + --missing_eos_penalty 1.0 \ + --stop_token eos +""" + + +if __name__ == "__main__": + parser = HfArgumentParser((ScriptArguments, RLOOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_into_dataclasses() + # remove output_dir if exists + shutil.rmtree(training_args.output_dir, ignore_errors=True) + + ################ + # Model & Tokenizer + ################ + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, + padding_side="left", + trust_remote_code=model_config.trust_remote_code, + ) + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + reward_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 + ) + ref_policy = AutoModelForCausalLM.from_pretrained( + training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code + ) + policy = AutoModelForCausalLM.from_pretrained( + training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code + ) + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + train_dataset = dataset[script_args.dataset_train_split] + eval_dataset = dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None + + def prepare_dataset(dataset, tokenizer): + """pre-tokenize the dataset before training; only collate during training""" + + def tokenize(element): + input_ids = tokenizer.apply_chat_template( + element["messages"][:1], + padding=False, + add_generation_prompt=True, + ) + return {"input_ids": input_ids, "lengths": len(input_ids)} + + return dataset.map( + tokenize, + remove_columns=dataset.column_names, + num_proc=training_args.dataset_num_proc, + ) + + # Compute that only on the main process for faster data processing. + # see: https://github.com/huggingface/trl/pull/1255 + with PartialState().local_main_process_first(): + train_dataset = prepare_dataset(train_dataset, tokenizer) + eval_dataset = prepare_dataset(eval_dataset, tokenizer) + # filtering + train_dataset = train_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc) + eval_dataset = eval_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc) + + assert train_dataset[0]["input_ids"][-1] != tokenizer.eos_token_id, "The last token should not be an EOS token" + ################ + # Training + ################ + trainer = RLOOTrainer( + config=training_args, + processing_class=tokenizer, + policy=policy, + ref_policy=ref_policy, + reward_model=reward_model, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + ) + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) + + trainer.generate_completions() diff --git a/testbed/huggingface__trl/examples/scripts/sft.py b/testbed/huggingface__trl/examples/scripts/sft.py new file mode 100644 index 0000000000000000000000000000000000000000..422fa89a6fe31fe6a7c535f64a6cc72620bfd0a8 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/sft.py @@ -0,0 +1,111 @@ +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +# Full training +python examples/scripts/sft.py \ + --model_name_or_path Qwen/Qwen2-0.5B \ + --dataset_name trl-lib/Capybara \ + --learning_rate 2.0e-5 \ + --num_train_epochs 1 \ + --packing \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 8 \ + --gradient_checkpointing \ + --logging_steps 25 \ + --eval_strategy steps \ + --eval_steps 100 \ + --output_dir Qwen2-0.5B-SFT \ + --push_to_hub + +# LoRA +python examples/scripts/sft.py \ + --model_name_or_path Qwen/Qwen2-0.5B \ + --dataset_name trl-lib/Capybara \ + --learning_rate 2.0e-4 \ + --num_train_epochs 1 \ + --packing \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 8 \ + --gradient_checkpointing \ + --logging_steps 25 \ + --eval_strategy steps \ + --eval_steps 100 \ + --use_peft \ + --lora_r 32 \ + --lora_alpha 16 \ + --output_dir Qwen2-0.5B-SFT \ + --push_to_hub +""" + +from datasets import load_dataset +from transformers import AutoTokenizer + +from trl import ( + ModelConfig, + ScriptArguments, + SFTConfig, + SFTTrainer, + TrlParser, + get_kbit_device_map, + get_peft_config, + get_quantization_config, +) + + +if __name__ == "__main__": + parser = TrlParser((ScriptArguments, SFTConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + + ################ + # Model init kwargs & Tokenizer + ################ + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + trust_remote_code=model_config.trust_remote_code, + attn_implementation=model_config.attn_implementation, + torch_dtype=model_config.torch_dtype, + use_cache=False if training_args.gradient_checkpointing else True, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + training_args.model_init_kwargs = model_kwargs + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, use_fast=True + ) + tokenizer.pad_token = tokenizer.eos_token + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + + ################ + # Training + ################ + trainer = SFTTrainer( + model=model_config.model_name_or_path, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + peft_config=get_peft_config(model_config), + ) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/examples/scripts/sft_video_llm.py b/testbed/huggingface__trl/examples/scripts/sft_video_llm.py new file mode 100644 index 0000000000000000000000000000000000000000..78941c83635d796dc768c2b26732e02112426083 --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/sft_video_llm.py @@ -0,0 +1,257 @@ +# Copyright 2024. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Example usage: +accelerate launch \ + --config_file=deepspeed_zero2.yaml \ + sft_video_llm.py \ + --dataset_name=mfarre/simplevideoshorts \ + --video_cache_dir="/optional/path/to/cache/" \ + --model_name_or_path=Qwen/Qwen2-VL-7B-Instruct \ + --per_device_train_batch_size=1 \ + --output_dir=video-llm-output \ + --bf16=True \ + --tf32=True \ + --gradient_accumulation_steps=4 \ + --num_train_epochs=4 \ + --optim="adamw_torch_fused" \ + --logging_steps=1 \ + --log_level="debug" \ + --log_level_replica="debug" \ + --save_strategy="steps" \ + --save_steps=300 \ + --learning_rate=8e-5 \ + --max_grad_norm=0.3 \ + --warmup_ratio=0.1 \ + --lr_scheduler_type="cosine" \ + --report_to="wandb" \ + --push_to_hub=False \ + --torch_dtype=bfloat16 \ + --gradient_checkpointing=True +""" + +import json +import os +import random +from dataclasses import dataclass +from typing import Any, Dict, List + +import requests +import torch +import wandb +from datasets import load_dataset +from peft import LoraConfig +from qwen_vl_utils import process_vision_info +from transformers import ( + AutoModelForVision2Seq, + AutoProcessor, + BitsAndBytesConfig, + Qwen2VLProcessor, +) + +from trl import ( + SFTConfig, + SFTTrainer, + get_kbit_device_map, +) +from trl.commands.cli_utils import SFTScriptArguments, TrlParser +from trl.trainer import ModelConfig + + +def download_video(url: str, cache_dir: str) -> str: + """Download video if not already present locally.""" + os.makedirs(cache_dir, exist_ok=True) # Create cache dir if it doesn't exist + filename = url.split("/")[-1] + local_path = os.path.join(cache_dir, filename) + + if os.path.exists(local_path): + return local_path + + try: + with requests.get(url, stream=True) as r: + r.raise_for_status() + with open(local_path, "wb") as f: + for chunk in r.iter_content(chunk_size=8192): + if chunk: + f.write(chunk) + return local_path + except requests.RequestException as e: + raise Exception(f"Failed to download video: {e}") from e + + +def prepare_dataset(example: Dict[str, Any], cache_dir: str) -> Dict[str, List[Dict[str, Any]]]: + """Prepare dataset example for training.""" + video_url = example["video_url"] + timecoded_cc = example["timecoded_cc"] + qa_pairs = json.loads(example["qa"]) + + system_message = "You are an expert in movie narrative analysis." + base_prompt = f"""Analyze the video and consider the following timecoded subtitles: + +{timecoded_cc} + +Based on this information, please answer the following questions:""" + + selected_qa = random.sample(qa_pairs, 1)[0] + + messages = [ + {"role": "system", "content": [{"type": "text", "text": system_message}]}, + { + "role": "user", + "content": [ + {"type": "video", "video": download_video(video_url, cache_dir), "max_pixels": 360 * 420, "fps": 1.0}, + {"type": "text", "text": f"{base_prompt}\n\nQuestion: {selected_qa['question']}"}, + ], + }, + {"role": "assistant", "content": [{"type": "text", "text": selected_qa["answer"]}]}, + ] + + return {"messages": messages} + + +def collate_fn(examples: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]: + """Collate batch of examples for training.""" + texts = [] + video_inputs = [] + + for i, example in enumerate(examples): + try: + video_path = next( + content["video"] + for message in example["messages"] + for content in message["content"] + if content.get("type") == "video" + ) + print(f"Processing video: {os.path.basename(video_path)}") + + texts.append(processor.apply_chat_template(example["messages"], tokenize=False)) + video_input = process_vision_info(example["messages"])[1][0] + video_inputs.append(video_input) + except Exception as e: + raise ValueError(f"Failed to process example {i}: {e}") from e + + inputs = processor(text=texts, videos=video_inputs, return_tensors="pt", padding=True) + + labels = inputs["input_ids"].clone() + labels[labels == processor.tokenizer.pad_token_id] = -100 + + # Handle visual tokens based on processor type + visual_tokens = ( + [151652, 151653, 151656] + if isinstance(processor, Qwen2VLProcessor) + else [processor.tokenizer.convert_tokens_to_ids(processor.image_token)] + ) + + for visual_token_id in visual_tokens: + labels[labels == visual_token_id] = -100 + + inputs["labels"] = labels + return inputs + + +@dataclass +class CustomScriptArguments(SFTScriptArguments): + video_cache_dir: str = "/tmp/videos/" + + +if __name__ == "__main__": + # Parse arguments + parser = TrlParser((CustomScriptArguments, SFTConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + + # Configure training args + training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False) + training_args.remove_unused_columns = False + training_args.dataset_kwargs = {"skip_prepare_dataset": True} + + # Load dataset + dataset = load_dataset(script_args.dataset_name, split="train") + + # Setup model + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + + # Quantization configuration for 4-bit training + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.bfloat16, + ) + + # Model initialization + model_kwargs = dict( + revision=model_config.model_revision, + trust_remote_code=model_config.trust_remote_code, + torch_dtype=torch_dtype, + device_map=get_kbit_device_map(), + quantization_config=bnb_config, + ) + + model = AutoModelForVision2Seq.from_pretrained(model_config.model_name_or_path, **model_kwargs) + + peft_config = LoraConfig( + task_type="CAUSAL_LM", + r=16, + lora_alpha=16, + lora_dropout=0.1, + bias="none", + target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], + ) + + # Configure model modules for gradients + if training_args.gradient_checkpointing: + model.gradient_checkpointing_enable() + model.config.use_reentrant = False + model.enable_input_require_grads() + + processor = AutoProcessor.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code + ) + + # Prepare dataset + prepared_dataset = [prepare_dataset(example, script_args.video_cache_dir) for example in dataset] + + # Initialize wandb if specified + if training_args.report_to == "wandb": + wandb.init(project="video-llm-training") + + # Initialize trainer + trainer = SFTTrainer( + model=model, + args=training_args, + train_dataset=prepared_dataset, + data_collator=collate_fn, + peft_config=peft_config, + tokenizer=processor.tokenizer, + ) + + # Train model + trainer.train() + + # Save final model + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) + if trainer.accelerator.is_main_process: + processor.push_to_hub(training_args.hub_model_id) + + # Cleanup + del model + del trainer + torch.cuda.empty_cache() + wandb.finish() diff --git a/testbed/huggingface__trl/examples/scripts/sft_vlm.py b/testbed/huggingface__trl/examples/scripts/sft_vlm.py new file mode 100644 index 0000000000000000000000000000000000000000..49654bc408c510727f4d4d3721783742af88060c --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/sft_vlm.py @@ -0,0 +1,133 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +pip install pillow + +# Tested on 8x H100 GPUs +accelerate launch + --config_file=examples/accelerate_configs/deepspeed_zero3.yaml \ + examples/scripts/sft_vlm.py \ + --dataset_name HuggingFaceH4/llava-instruct-mix-vsft \ + --model_name_or_path llava-hf/llava-1.5-7b-hf \ + --per_device_train_batch_size 8 \ + --gradient_accumulation_steps 8 \ + --output_dir sft-llava-1.5-7b-hf \ + --bf16 \ + --torch_dtype bfloat16 \ + --gradient_checkpointing + +For LLaVA-NeXT, use: (requires transformers>=4.45) + --model_name_or_path llava-hf/llava-v1.6-mistral-7b-hf + +For meta-llama/Llama-3.2-11B-Vision-Instruct, use: (requires transformers>=4.45.1) + --model_name_or_path meta-llama/Llama-3.2-11B-Vision-Instruct +""" + +import torch +from datasets import load_dataset +from transformers import AutoModelForVision2Seq, AutoProcessor, LlavaForConditionalGeneration + +from trl import ( + ModelConfig, + ScriptArguments, + SFTConfig, + SFTTrainer, + TrlParser, + get_kbit_device_map, + get_peft_config, + get_quantization_config, +) + + +if __name__ == "__main__": + parser = TrlParser((ScriptArguments, SFTConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False) + training_args.remove_unused_columns = False + training_args.dataset_kwargs = {"skip_prepare_dataset": True} + + ################ + # Model, Tokenizer & Processor + ################ + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + processor = AutoProcessor.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code + ) + + model = AutoModelForVision2Seq.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + + ################ + # Create a data collator to encode text and image pairs + ################ + def collate_fn(examples): + # Get the texts and images, and apply the chat template + texts = [processor.apply_chat_template(example["messages"], tokenize=False) for example in examples] + images = [example["images"] for example in examples] + if isinstance(model, LlavaForConditionalGeneration): + # LLava1.5 does not support multiple images + images = [image[0] for image in images] + + # Tokenize the texts and process the images + batch = processor(text=texts, images=images, return_tensors="pt", padding=True) + + # The labels are the input_ids, and we mask the padding tokens in the loss computation + labels = batch["input_ids"].clone() + labels[labels == processor.tokenizer.pad_token_id] = -100 # + # Ignore the image token index in the loss computation (model specific) + image_token_id = processor.tokenizer.convert_tokens_to_ids(processor.image_token) + labels[labels == image_token_id] = -100 + batch["labels"] = labels + + return batch + + ################ + # Dataset + ################ + dataset = load_dataset(script_args.dataset_name) + + ################ + # Training + ################ + trainer = SFTTrainer( + model=model, + args=training_args, + data_collator=collate_fn, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=processor.tokenizer, + peft_config=get_peft_config(model_config), + ) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) + if trainer.accelerator.is_main_process: + processor.push_to_hub(training_args.hub_model_id) diff --git a/testbed/huggingface__trl/examples/scripts/xpo.py b/testbed/huggingface__trl/examples/scripts/xpo.py new file mode 100644 index 0000000000000000000000000000000000000000..d2d3cb05f8b3a9c8fa031bc882680694a368d2bb --- /dev/null +++ b/testbed/huggingface__trl/examples/scripts/xpo.py @@ -0,0 +1,133 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +python examples/scripts/xpo.py \ + --model_name_or_path trl-lib/pythia-1b-deduped-tldr-sft \ + --reward_model_path trl-lib/pythia-1b-deduped-tldr-rm \ + --dataset_name trl-lib/tldr \ + --learning_rate 5.0e-7 \ + --output_dir pythia-1b-tldr-xpo \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 32 \ + --num_train_epochs 3 \ + --max_new_tokens 64 \ + --warmup_ratio 0.1 \ + --missing_eos_penalty 1.0 \ + --push_to_hub +""" + +import torch +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, GenerationConfig + +from trl import ( + HfPairwiseJudge, + LogCompletionsCallback, + ModelConfig, + OpenAIPairwiseJudge, + PairRMJudge, + ScriptArguments, + TrlParser, + XPOConfig, + XPOTrainer, + get_kbit_device_map, + get_quantization_config, +) +from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE + + +JUDGES = {"pair_rm": PairRMJudge, "openai": OpenAIPairwiseJudge, "hf": HfPairwiseJudge} + + +if __name__ == "__main__": + parser = TrlParser((ScriptArguments, XPOConfig, ModelConfig)) + script_args, training_args, model_config = parser.parse_args_and_config() + training_args.gradient_checkpointing_kwargs = {"use_reentrant": True} + + torch_dtype = ( + model_config.torch_dtype + if model_config.torch_dtype in ["auto", None] + else getattr(torch, model_config.torch_dtype) + ) + quantization_config = get_quantization_config(model_config) + model_kwargs = dict( + revision=model_config.model_revision, + attn_implementation=model_config.attn_implementation, + torch_dtype=torch_dtype, + use_cache=False if training_args.gradient_checkpointing else True, + device_map=get_kbit_device_map() if quantization_config is not None else None, + quantization_config=quantization_config, + ) + + model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + ref_model = AutoModelForCausalLM.from_pretrained( + model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs + ) + + if training_args.reward_model_path is not None: + reward_model = AutoModelForSequenceClassification.from_pretrained( + training_args.reward_model_path, + num_labels=1, + trust_remote_code=model_config.trust_remote_code, + **model_kwargs, + ) + else: + reward_model = None + + if training_args.judge is not None: + judge_cls = JUDGES[training_args.judge] + judge = judge_cls() + else: + judge = None + + tokenizer = AutoTokenizer.from_pretrained( + model_config.model_name_or_path, + padding_side="left", + trust_remote_code=model_config.trust_remote_code, + ) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + if tokenizer.chat_template is None: + tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE + + dataset = load_dataset(script_args.dataset_name) + + trainer = XPOTrainer( + model=model, + ref_model=ref_model, + reward_model=reward_model, + judge=judge, + args=training_args, + train_dataset=dataset[script_args.dataset_train_split], + eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, + processing_class=tokenizer, + ) + + if training_args.eval_strategy != "no": + generation_config = GenerationConfig( + max_new_tokens=training_args.max_new_tokens, do_sample=True, temperature=training_args.temperature + ) + completions_callback = LogCompletionsCallback(trainer, generation_config, num_prompts=8) + trainer.add_callback(completions_callback) + + trainer.train() + + # Save and push to hub + trainer.save_model(training_args.output_dir) + if training_args.push_to_hub: + trainer.push_to_hub(dataset_name=script_args.dataset_name) diff --git a/testbed/huggingface__trl/scripts/add_copyrights.py b/testbed/huggingface__trl/scripts/add_copyrights.py new file mode 100644 index 0000000000000000000000000000000000000000..9acd98bea458f8c176ccba2f7384907f4d6c3ccf --- /dev/null +++ b/testbed/huggingface__trl/scripts/add_copyrights.py @@ -0,0 +1,95 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys +from datetime import datetime + + +COPYRIGHT_HEADER = f"""# Copyright {datetime.now().year} The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +COPYRIGHT_KEYWORD = "# Copyright 20" + + +def get_tracked_python_files(): + """Get a list of all tracked Python files using git.""" + try: + # Get the list of all tracked files from Git + result = subprocess.run(["git", "ls-files"], stdout=subprocess.PIPE, text=True, check=True) + # Split the result by lines to get individual file paths + files = result.stdout.splitlines() + # Filter only Python files + py_files = [f for f in files if f.endswith(".py")] + return py_files + except subprocess.CalledProcessError as e: + print(f"Error fetching tracked files: {e}") + return [] + + +def check_and_add_copyright(file_path): + """Check if the file contains a copyright notice, and add it if missing.""" + if not os.path.isfile(file_path): + print(f"[SKIP] {file_path} does not exist.") + return + + with open(file_path, encoding="utf-8") as f: + content = f.readlines() + + # Check if the copyright header exists in the first 10 lines + for line in content[:10]: + if COPYRIGHT_KEYWORD in line: + return True + + # If no copyright notice was found, prepend the header + print(f"[MODIFY] Adding copyright to {file_path}.") + with open(file_path, "w", encoding="utf-8") as f: + # Write the copyright header followed by the original content + f.write(COPYRIGHT_HEADER + "\n" + "".join(content)) + return False + + +def main(): + """Main function to check and add copyright for all tracked Python files.""" + py_files = get_tracked_python_files() + if not py_files: + print("No Python files are tracked in the repository.") + return + + print(f"Checking {len(py_files)} Python files for copyright notice...") + + have_copyright = [check_and_add_copyright(file_path) for file_path in py_files] + if not all(have_copyright): + print("❌ Some files were missing the required copyright and have been updated.") + sys.exit(1) + else: + print("✅ All files have the required copyright.") + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__trl/scripts/log_example_reports.py b/testbed/huggingface__trl/scripts/log_example_reports.py new file mode 100644 index 0000000000000000000000000000000000000000..10f6c9a7ad29e423d258387517e7e7ab0f6ac97c --- /dev/null +++ b/testbed/huggingface__trl/scripts/log_example_reports.py @@ -0,0 +1,157 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import logging +import os +from datetime import date + +from tabulate import tabulate + + +MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters + +parser = argparse.ArgumentParser() +parser.add_argument("--slack_channel_name", default="trl-push-examples-ci") +parser.add_argument("--text_file_name", required=True) + + +def main(text_file_name, slack_channel_name=None): + logging.basicConfig(level=logging.INFO) + logger = logging.getLogger(__name__) + + message = "" + + if os.path.isfile(text_file_name): + final_results = {} + + try: + with open(text_file_name) as file: + for line in file: + result, config_name = line.strip().split(",") + config_name = config_name.split("/")[-1].split(".yaml")[0] + final_results[config_name] = int(result) + except Exception as e: + logger.error(f"Error reading file {text_file_name}: {str(e)}") + final_results = {} + + no_error_payload = { + "type": "section", + "text": { + "type": "plain_text", + "text": "🌞 There were no failures on the example tests!" + if not len(final_results) == 0 + else "Something went wrong there is at least one empty file - please check GH action results.", + "emoji": True, + }, + } + + total_num_failed = sum(final_results.values()) + else: + no_error_payload = { + "type": "section", + "text": { + "type": "plain_text", + "text": "❌ Something is wrong with the workflow please check ASAP!" + "Something went wrong there is no text file being produced. Please check ASAP.", + "emoji": True, + }, + } + + total_num_failed = 0 + + test_type_name = text_file_name.replace(".txt", "").replace("temp_results_", "").replace("_", " ").title() + + payload = [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "🤗 Results of the {} TRL {} example tests.".format( + os.environ.get("TEST_TYPE", ""), test_type_name + ), + }, + }, + ] + + if total_num_failed > 0: + message += f"{total_num_failed} failed tests for example tests!" + + for test_name, failed in final_results.items(): + failed_table = tabulate( + [[test_name, "✅" if not failed else "❌"]], + headers=["Test Name", "Status"], + showindex="always", + tablefmt="grid", + maxcolwidths=[12], + ) + message += "\n```\n" + failed_table + "\n```" + + print(f"### {message}") + else: + payload.append(no_error_payload) + + if os.environ.get("TEST_TYPE", "") != "": + try: + from slack_sdk import WebClient + except ImportError: + logger.error("slack_sdk is not installed. Please install it to use Slack integration.") + return + + if len(message) > MAX_LEN_MESSAGE: + print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}") + message = message[:MAX_LEN_MESSAGE] + "..." + + if len(message) != 0: + md_report = { + "type": "section", + "text": {"type": "mrkdwn", "text": message}, + } + payload.append(md_report) + action_button = { + "type": "section", + "text": {"type": "mrkdwn", "text": "*For more details:*"}, + "accessory": { + "type": "button", + "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, + "url": f"https://github.com/huggingface/trl/actions/runs/{os.environ['GITHUB_RUN_ID']}", + }, + } + payload.append(action_button) + + date_report = { + "type": "context", + "elements": [ + { + "type": "plain_text", + "text": f"On Push - main {os.environ.get('TEST_TYPE')} test results for {date.today()}", + }, + ], + } + payload.append(date_report) + + print(payload) + + try: + client = WebClient(token=os.environ.get("SLACK_API_TOKEN")) + response = client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload) + if response["ok"]: + logger.info("Message sent successfully to Slack.") + else: + logger.error(f"Failed to send message to Slack: {response['error']}") + except Exception as e: + logger.error(f"Error sending message to Slack: {str(e)}") + + if __name__ == "__main__": + args = parser.parse_args() + main(args.text_file_name, args.slack_channel_name) diff --git a/testbed/huggingface__trl/scripts/log_reports.py b/testbed/huggingface__trl/scripts/log_reports.py new file mode 100644 index 0000000000000000000000000000000000000000..0cdac4f7562226b39857f6200d16bda06d284deb --- /dev/null +++ b/testbed/huggingface__trl/scripts/log_reports.py @@ -0,0 +1,168 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import logging +import os +from datetime import date +from pathlib import Path + +from tabulate import tabulate + + +MAX_LEN_MESSAGE = 2900 # Slack endpoint has a limit of 3001 characters + +parser = argparse.ArgumentParser() +parser.add_argument("--slack_channel_name", default="trl-push-ci") + +# Set up logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + + +def process_log_file(log): + failed_tests = [] + passed_tests = [] + section_num_failed = 0 + + try: + with open(log) as f: + for line in f: + try: + data = json.loads(line) + test_name = data.get("nodeid", "") + duration = f'{data["duration"]:.4f}' if "duration" in data else "N/A" + outcome = data.get("outcome", "") + + if test_name: + if outcome == "failed": + section_num_failed += 1 + failed_tests.append([test_name, duration, log.stem.split("_")[0]]) + else: + passed_tests.append([test_name, duration, log.stem.split("_")[0]]) + except json.JSONDecodeError as e: + logging.warning(f"Could not decode line in {log}: {e}") + + except FileNotFoundError as e: + logging.error(f"Log file {log} not found: {e}") + except Exception as e: + logging.error(f"Error processing log file {log}: {e}") + + return failed_tests, passed_tests, section_num_failed + + +def main(slack_channel_name): + group_info = [] + total_num_failed = 0 + total_empty_files = [] + + log_files = list(Path().glob("*.log")) + if not log_files: + logging.info("No log files found.") + return + + for log in log_files: + failed, passed, section_num_failed = process_log_file(log) + empty_file = not failed and not passed + + total_num_failed += section_num_failed + total_empty_files.append(empty_file) + group_info.append([str(log), section_num_failed, failed]) + + # Clean up log file + try: + os.remove(log) + except OSError as e: + logging.warning(f"Could not remove log file {log}: {e}") + + # Prepare Slack message payload + payload = [ + { + "type": "header", + "text": {"type": "plain_text", "text": f"🤗 Results of the {os.environ.get('TEST_TYPE', '')} TRL tests."}, + }, + ] + + if total_num_failed > 0: + message = "" + for name, num_failed, failed_tests in group_info: + if num_failed > 0: + message += f"*{name}: {num_failed} failed test(s)*\n" + failed_table = [ + test[0].split("::")[:2] + [test[0].split("::")[-1][:30] + ".."] for test in failed_tests + ] + message += ( + "\n```\n" + + tabulate(failed_table, headers=["Test Location", "Test Name"], tablefmt="grid") + + "\n```\n" + ) + + if any(total_empty_files): + message += f"\n*{name}: Warning! Empty file - check GitHub action job*\n" + + # Logging + logging.info(f"Total failed tests: {total_num_failed}") + print(f"### {message}") + + if len(message) > MAX_LEN_MESSAGE: + message = ( + f"❌ There are {total_num_failed} failed tests in total! Please check the action results directly." + ) + + payload.append({"type": "section", "text": {"type": "mrkdwn", "text": message}}) + payload.append( + { + "type": "section", + "text": {"type": "mrkdwn", "text": "*For more details:*"}, + "accessory": { + "type": "button", + "text": {"type": "plain_text", "text": "Check Action results"}, + "url": f"https://github.com/huggingface/trl/actions/runs/{os.environ['GITHUB_RUN_ID']}", + }, + } + ) + payload.append( + { + "type": "context", + "elements": [ + { + "type": "plain_text", + "text": f"On Push main {os.environ.get('TEST_TYPE')} results for {date.today()}", + } + ], + } + ) + + # Send to Slack + from slack_sdk import WebClient + + slack_client = WebClient(token=os.environ.get("SLACK_API_TOKEN")) + slack_client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload) + + else: + payload.append( + { + "type": "section", + "text": { + "type": "plain_text", + "text": "✅ No failures! All tests passed successfully.", + "emoji": True, + }, + } + ) + logging.info("All tests passed. No errors detected.") + + +if __name__ == "__main__": + args = parser.parse_args() + main(args.slack_channel_name) diff --git a/testbed/huggingface__trl/tests/__init__.py b/testbed/huggingface__trl/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..adfc257a24f038a7177291b0fba3710bbdacc3af --- /dev/null +++ b/testbed/huggingface__trl/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/testbed/huggingface__trl/tests/slow/__init__.py b/testbed/huggingface__trl/tests/slow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..adfc257a24f038a7177291b0fba3710bbdacc3af --- /dev/null +++ b/testbed/huggingface__trl/tests/slow/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/testbed/huggingface__trl/tests/slow/test_dpo_slow.py b/testbed/huggingface__trl/tests/slow/test_dpo_slow.py new file mode 100644 index 0000000000000000000000000000000000000000..89177077182c9341d2e1ea25abd52a841de86f3d --- /dev/null +++ b/testbed/huggingface__trl/tests/slow/test_dpo_slow.py @@ -0,0 +1,225 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import itertools +import tempfile +import unittest + +import torch +from accelerate.utils.memory import release_memory +from datasets import load_dataset +from parameterized import parameterized +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig +from transformers.testing_utils import require_peft, require_torch_accelerator, torch_device +from transformers.utils import is_peft_available + +from trl import DPOConfig, DPOTrainer + +from ..testing_utils import require_bitsandbytes +from .testing_constants import DPO_LOSS_TYPES, DPO_PRECOMPUTE_LOGITS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST + + +if is_peft_available(): + from peft import LoraConfig, PeftModel + + +@require_torch_accelerator +class DPOTrainerSlowTester(unittest.TestCase): + def setUp(self): + self.dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + self.peft_config = LoraConfig( + lora_alpha=16, + lora_dropout=0.1, + r=8, + bias="none", + task_type="CAUSAL_LM", + ) + self.max_length = 128 + + def tearDown(self): + gc.collect() + if torch_device == "cpu": + torch.cuda.empty_cache() + elif torch_device == "xpu": + torch.xpu.empty_cache() + gc.collect() + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, DPO_LOSS_TYPES, DPO_PRECOMPUTE_LOGITS))) + def test_dpo_bare_model(self, model_id, loss_type, pre_compute_logits): + """ + A test that tests the simple usage of `DPOTrainer` using a bare model in full precision. + """ + model = AutoModelForCausalLM.from_pretrained(model_id) + tokenizer = AutoTokenizer.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=2, + remove_unused_columns=False, + gradient_accumulation_steps=2, + learning_rate=9e-1, + eval_strategy="steps", + fp16=True, + logging_strategy="no", + report_to="none", + beta=0.1, + loss_type=loss_type, + precompute_ref_log_probs=pre_compute_logits, + max_length=self.max_length, + ) + + # dpo train lora model + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + train_dataset=self.dataset["train"], + eval_dataset=self.dataset["test"], + processing_class=tokenizer, + ) + + # train the model + trainer.train() + + # save trained model or adapter + trainer.save_model() + + release_memory(model, trainer) + + @parameterized.expand( + list( + itertools.product( + MODELS_TO_TEST, + DPO_LOSS_TYPES, + DPO_PRECOMPUTE_LOGITS, + GRADIENT_CHECKPOINTING_KWARGS, + ) + ) + ) + @require_peft + def test_dpo_peft_model(self, model_id, loss_type, pre_compute_logits, gradient_checkpointing_kwargs): + """ + A test that tests the simple usage of `DPOTrainer` using a peft model in full precision + different scenarios of gradient checkpointing. + """ + model = AutoModelForCausalLM.from_pretrained(model_id) + tokenizer = AutoTokenizer.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=2, + remove_unused_columns=False, + gradient_accumulation_steps=2, + learning_rate=9e-1, + eval_strategy="steps", + fp16=True, + logging_strategy="no", + report_to="none", + gradient_checkpointing=True, + gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, + generate_during_eval=False, + loss_type=loss_type, + precompute_ref_log_probs=pre_compute_logits, + beta=0.1, + max_length=self.max_length, + ) + + # dpo train lora model + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + train_dataset=self.dataset["train"], + eval_dataset=self.dataset["test"], + processing_class=tokenizer, + peft_config=self.peft_config, + ) + + self.assertIsInstance(trainer.model, PeftModel) + self.assertIsNone(trainer.ref_model) + + # train the model + trainer.train() + + # save trained model or adapter + trainer.save_model() + + release_memory(model, trainer) + + @parameterized.expand( + list( + itertools.product( + MODELS_TO_TEST, + DPO_LOSS_TYPES, + DPO_PRECOMPUTE_LOGITS, + GRADIENT_CHECKPOINTING_KWARGS, + ) + ) + ) + @require_bitsandbytes + @require_peft + def test_dpo_peft_model_qlora(self, model_id, loss_type, pre_compute_logits, gradient_checkpointing_kwargs): + """ + A test that tests the simple usage of `DPOTrainer` using QLoRA + different scenarios of gradient checkpointing. + """ + quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) + + model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) + tokenizer = AutoTokenizer.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=2, + remove_unused_columns=False, + gradient_accumulation_steps=2, + learning_rate=9e-1, + eval_strategy="steps", + fp16=True, + logging_strategy="no", + report_to="none", + gradient_checkpointing=True, + gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, + beta=0.1, + generate_during_eval=False, + loss_type=loss_type, + precompute_ref_log_probs=pre_compute_logits, + max_length=self.max_length, + ) + + # dpo train lora model + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + train_dataset=self.dataset["train"], + eval_dataset=self.dataset["test"], + processing_class=tokenizer, + peft_config=self.peft_config, + ) + + self.assertIsInstance(trainer.model, PeftModel) + self.assertIsNone(trainer.ref_model) + + # train the model + trainer.train() + + # save trained model or adapter + trainer.save_model() + + release_memory(model, trainer) diff --git a/testbed/huggingface__trl/tests/slow/test_sft_slow.py b/testbed/huggingface__trl/tests/slow/test_sft_slow.py new file mode 100644 index 0000000000000000000000000000000000000000..b0c0a3364c7d7939d60b25884f21ee117ec23fd1 --- /dev/null +++ b/testbed/huggingface__trl/tests/slow/test_sft_slow.py @@ -0,0 +1,419 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import itertools +import tempfile +import unittest + +import torch +from accelerate.utils.memory import release_memory +from datasets import load_dataset +from parameterized import parameterized +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig +from transformers.testing_utils import ( + require_liger_kernel, + require_peft, + require_torch_accelerator, + require_torch_multi_accelerator, +) +from transformers.utils import is_peft_available + +from trl import SFTConfig, SFTTrainer +from trl.models.utils import setup_chat_format + +from ..testing_utils import require_bitsandbytes +from .testing_constants import DEVICE_MAP_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST, PACKING_OPTIONS + + +if is_peft_available(): + from peft import LoraConfig, PeftModel + + +@require_torch_accelerator +class SFTTrainerSlowTester(unittest.TestCase): + def setUp(self): + self.train_dataset = load_dataset("stanfordnlp/imdb", split="train[:10%]") + self.eval_dataset = load_dataset("stanfordnlp/imdb", split="test[:10%]") + self.max_seq_length = 128 + self.peft_config = LoraConfig( + lora_alpha=16, + lora_dropout=0.1, + r=8, + bias="none", + task_type="CAUSAL_LM", + ) + + def tearDown(self): + gc.collect() + torch.cuda.empty_cache() + gc.collect() + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) + def test_sft_trainer_str(self, model_name, packing): + """ + Simply tests if passing a simple str to `SFTTrainer` loads and runs the trainer + as expected. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + packing=packing, + max_seq_length=self.max_seq_length, + ) + + trainer = SFTTrainer( + model_name, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.train() + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) + def test_sft_trainer_transformers(self, model_name, packing): + """ + Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer + as expected. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + packing=packing, + max_seq_length=self.max_seq_length, + ) + + model = AutoModelForCausalLM.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + trainer = SFTTrainer( + model, + args=training_args, + processing_class=tokenizer, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.train() + + release_memory(model, trainer) + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) + @require_peft + def test_sft_trainer_peft(self, model_name, packing): + """ + Simply tests if passing a transformers model + peft config to `SFTTrainer` loads and runs the trainer + as expected. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + fp16=True, + packing=packing, + max_seq_length=self.max_seq_length, + ) + + model = AutoModelForCausalLM.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + trainer = SFTTrainer( + model, + args=training_args, + processing_class=tokenizer, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + peft_config=self.peft_config, + ) + + self.assertIsInstance(trainer.model, PeftModel) + + trainer.train() + + release_memory(model, trainer) + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) + def test_sft_trainer_transformers_mp(self, model_name, packing): + """ + Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer + as expected in mixed precision. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + fp16=True, # this is sufficient to enable amp + packing=packing, + max_seq_length=self.max_seq_length, + ) + + model = AutoModelForCausalLM.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + trainer = SFTTrainer( + model, + args=training_args, + processing_class=tokenizer, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.train() + + release_memory(model, trainer) + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) + def test_sft_trainer_transformers_mp_gc(self, model_name, packing, gradient_checkpointing_kwargs): + """ + Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer + as expected in mixed precision + different scenarios of gradient_checkpointing. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + packing=packing, + max_seq_length=self.max_seq_length, + fp16=True, # this is sufficient to enable amp + gradient_checkpointing=True, + gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, + ) + + model = AutoModelForCausalLM.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + trainer = SFTTrainer( + model, + args=training_args, + processing_class=tokenizer, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.train() + + release_memory(model, trainer) + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) + @require_peft + def test_sft_trainer_transformers_mp_gc_peft(self, model_name, packing, gradient_checkpointing_kwargs): + """ + Simply tests if passing a transformers model + PEFT to `SFTTrainer` loads and runs the trainer + as expected in mixed precision + different scenarios of gradient_checkpointing. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + packing=packing, + max_seq_length=self.max_seq_length, + fp16=True, # this is sufficient to enable amp + gradient_checkpointing=True, + gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, + ) + + model = AutoModelForCausalLM.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + trainer = SFTTrainer( + model, + args=training_args, + processing_class=tokenizer, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + peft_config=self.peft_config, + ) + + self.assertIsInstance(trainer.model, PeftModel) + + trainer.train() + + release_memory(model, trainer) + + @parameterized.expand( + list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, DEVICE_MAP_OPTIONS)) + ) + @require_torch_multi_accelerator + def test_sft_trainer_transformers_mp_gc_device_map( + self, model_name, packing, gradient_checkpointing_kwargs, device_map + ): + """ + Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer + as expected in mixed precision + different scenarios of gradient_checkpointing (single, multi-gpu, etc). + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + packing=packing, + max_seq_length=self.max_seq_length, + fp16=True, # this is sufficient to enable amp + gradient_checkpointing=True, + gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, + ) + + model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + trainer = SFTTrainer( + model, + args=training_args, + processing_class=tokenizer, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.train() + + release_memory(model, trainer) + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) + @require_peft + @require_bitsandbytes + def test_sft_trainer_transformers_mp_gc_peft_qlora(self, model_name, packing, gradient_checkpointing_kwargs): + """ + Simply tests if passing a transformers model + PEFT + bnb to `SFTTrainer` loads and runs the trainer + as expected in mixed precision + different scenarios of gradient_checkpointing. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + packing=packing, + max_seq_length=self.max_seq_length, + fp16=True, # this is sufficient to enable amp + gradient_checkpointing=True, + gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, + ) + + quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) + + model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + trainer = SFTTrainer( + model, + args=training_args, + processing_class=tokenizer, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + peft_config=self.peft_config, + ) + + self.assertIsInstance(trainer.model, PeftModel) + + trainer.train() + + release_memory(model, trainer) + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) + @require_peft + @require_bitsandbytes + def test_sft_trainer_with_chat_format_qlora(self, model_name, packing): + """ + Simply tests if using setup_chat_format with a transformers model + peft + bnb config to `SFTTrainer` loads and runs the trainer + as expected. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + train_dataset = load_dataset("trl-internal-testing/dolly-chatml-sft", split="train") + + training_args = SFTConfig( + packing=packing, + max_seq_length=self.max_seq_length, + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=10, + fp16=True, + ) + + quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) + + model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + model, tokenizer = setup_chat_format(model, tokenizer) + + trainer = SFTTrainer( + model, + args=training_args, + processing_class=tokenizer, + train_dataset=train_dataset, + peft_config=self.peft_config, + ) + + self.assertIsInstance(trainer.model, PeftModel) + + trainer.train() + + release_memory(model, trainer) + + @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) + @require_liger_kernel + def test_sft_trainer_with_liger(self, model_name, packing): + """ + Tests if passing use_liger=True to SFTConfig loads and runs the trainer + with AutoLigerKernelForCausalLM as expected. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + logging_strategy="no", + report_to="none", + per_device_train_batch_size=2, + max_steps=2, + packing=packing, + max_seq_length=self.max_seq_length, + use_liger=True, + ) + + trainer = SFTTrainer( + model_name, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + # check that the components of the trainer.model are monkey patched: + self.assertTrue(any("Liger" in type(module).__name__ for module in trainer.model.model.modules())) + trainer.train() + + release_memory(trainer.model, trainer) diff --git a/testbed/huggingface__trl/tests/slow/testing_constants.py b/testbed/huggingface__trl/tests/slow/testing_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..40051ce1cc448f5bf54d2c27634cdd83f877a19c --- /dev/null +++ b/testbed/huggingface__trl/tests/slow/testing_constants.py @@ -0,0 +1,27 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: push them under trl-org +MODELS_TO_TEST = [ + "trl-internal-testing/tiny-random-LlamaForCausalLM", + "HuggingFaceM4/tiny-random-MistralForCausalLM", +] + +# We could have also not declared these variables but let's be verbose +PACKING_OPTIONS = [True, False] +GRADIENT_CHECKPOINTING_KWARGS = [None, {"use_reentrant": False}, {"use_reentrant": True}] +DEVICE_MAP_OPTIONS = [{"": 0}, "auto"] + +DPO_LOSS_TYPES = ["sigmoid", "ipo"] +DPO_PRECOMPUTE_LOGITS = [True, False] diff --git a/testbed/huggingface__trl/tests/test_alignprop_trainer.py b/testbed/huggingface__trl/tests/test_alignprop_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..a7446639d1581ad959b50cf8df013a752a483af4 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_alignprop_trainer.py @@ -0,0 +1,90 @@ +# Copyright 2023 metric-space, The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import unittest + +import torch +from parameterized import parameterized +from transformers.utils import is_peft_available + +from trl import is_diffusers_available + +from .testing_utils import require_diffusers + + +if is_diffusers_available() and is_peft_available(): + from trl import AlignPropConfig, AlignPropTrainer, DefaultDDPOStableDiffusionPipeline + + +def scorer_function(images, prompts, metadata): + return torch.randn(1) * 3.0, {} + + +def prompt_function(): + return ("cabbages", {}) + + +@require_diffusers +class AlignPropTrainerTester(unittest.TestCase): + """ + Test the AlignPropTrainer class. + """ + + def setUp(self): + training_args = AlignPropConfig( + num_epochs=2, + train_gradient_accumulation_steps=1, + train_batch_size=2, + truncated_backprop_rand=False, + mixed_precision=None, + save_freq=1000000, + ) + pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch" + pretrained_revision = "main" + pipeline_with_lora = DefaultDDPOStableDiffusionPipeline( + pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=True + ) + pipeline_without_lora = DefaultDDPOStableDiffusionPipeline( + pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=False + ) + self.trainer_with_lora = AlignPropTrainer(training_args, scorer_function, prompt_function, pipeline_with_lora) + self.trainer_without_lora = AlignPropTrainer( + training_args, scorer_function, prompt_function, pipeline_without_lora + ) + + def tearDown(self) -> None: + gc.collect() + + @parameterized.expand([True, False]) + def test_generate_samples(self, use_lora): + trainer = self.trainer_with_lora if use_lora else self.trainer_without_lora + output_pairs = trainer._generate_samples(2, with_grad=True) + self.assertEqual(len(output_pairs.keys()), 3) + self.assertEqual(len(output_pairs["images"]), 2) + + @parameterized.expand([True, False]) + def test_calculate_loss(self, use_lora): + trainer = self.trainer_with_lora if use_lora else self.trainer_without_lora + sample = trainer._generate_samples(2) + + images = sample["images"] + prompts = sample["prompts"] + + self.assertTupleEqual(images.shape, (2, 3, 128, 128)) + self.assertEqual(len(prompts), 2) + + rewards = trainer.compute_rewards(sample) + loss = trainer.calculate_loss(rewards) + + self.assertTrue(torch.isfinite(loss.cpu())) diff --git a/testbed/huggingface__trl/tests/test_bco_trainer.py b/testbed/huggingface__trl/tests/test_bco_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..411e2dfdc6f84f4e65915920b4d4d695540ee0a5 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_bco_trainer.py @@ -0,0 +1,412 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import tempfile +import unittest +from functools import partial + +import torch +from accelerate import Accelerator +from datasets import load_dataset +from parameterized import parameterized +from transformers import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer +from transformers.testing_utils import require_peft + +from trl import BCOConfig, BCOTrainer +from trl.trainer.bco_trainer import _process_tokens, _tokenize + +from .testing_utils import require_no_wandb, require_sklearn + + +class BCOTrainerTester(unittest.TestCase): + def setUp(self): + self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" + self.model = AutoModelForCausalLM.from_pretrained(self.model_id) + self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) + self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) + self.tokenizer.pad_token = self.tokenizer.eos_token + + # get t5 as seq2seq example: + model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab" + self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) + self.t5_ref_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) + self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) + + # get embedding model + model_id = "facebook/bart-base" + self.embedding_model = AutoModel.from_pretrained(model_id) + self.embedding_tokenizer = AutoTokenizer.from_pretrained(model_id) + + @parameterized.expand( + [ + ["gpt2", True, True, "standard_unpaired_preference"], + ["gpt2", True, False, "standard_unpaired_preference"], + ["gpt2", False, True, "standard_unpaired_preference"], + ["gpt2", False, False, "standard_unpaired_preference"], + ["gpt2", True, True, "conversational_unpaired_preference"], + ] + ) + @require_sklearn + def test_bco_trainer(self, name, pre_compute, eval_dataset, config_name): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = BCOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps" if eval_dataset else "no", + beta=0.1, + precompute_ref_log_probs=pre_compute, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) + + if name == "gpt2": + model = self.model + ref_model = self.ref_model + tokenizer = self.tokenizer + elif name == "t5": + model = self.t5_model + ref_model = self.t5_ref_model + tokenizer = self.t5_tokenizer + + trainer = BCOTrainer( + model=model, + ref_model=ref_model, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"] if eval_dataset else None, + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param.cpu(), new_param.cpu())) + + @require_sklearn + def test_bco_trainer_with_ref_model_is_model(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = BCOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference") + + with self.assertRaises(ValueError): + BCOTrainer( + model=self.model, + ref_model=self.model, # ref_model can't be the same as model + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + ) + + @require_sklearn + def test_tokenize_and_process_tokens(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = BCOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference") + + trainer = BCOTrainer( + model=self.model, + ref_model=self.ref_model, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + train_dataset = dummy_dataset["train"] + tokenized_dataset = train_dataset.map( + _tokenize, + fn_kwargs={"tokenizer": trainer.tokenizer}, + batched=True, + batch_size=2, + ) + self.assertListEqual(tokenized_dataset["prompt"], train_dataset["prompt"]) + self.assertListEqual(tokenized_dataset["completion"], train_dataset["completion"]) + self.assertListEqual(tokenized_dataset["label"], train_dataset["label"]) + self.assertListEqual(tokenized_dataset["prompt_input_ids"][0], [5377, 11141]) + self.assertListEqual(tokenized_dataset["prompt_attention_mask"][0], [1, 1]) + self.assertListEqual(tokenized_dataset["answer_input_ids"][0], [318, 1365, 621, 8253, 13]) + self.assertListEqual(tokenized_dataset["answer_attention_mask"][0], [1, 1, 1, 1, 1]) + + fn_kwargs = { + "prefix": "", + "is_encoder_decoder": trainer.is_encoder_decoder, + "tokenizer": trainer.tokenizer, + "max_length": trainer.max_length, + "truncation_mode": trainer.truncation_mode, + "label_pad_token_id": trainer.label_pad_token_id, + "max_prompt_length": trainer.max_prompt_length, + } + processed_dataset = tokenized_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=2) + self.assertListEqual(processed_dataset["prompt"], train_dataset["prompt"]) + self.assertListEqual(processed_dataset["completion"], train_dataset["completion"]) + self.assertListEqual(processed_dataset["label"], train_dataset["label"]) + self.assertListEqual(processed_dataset["prompt_input_ids"][0], [50256, 5377, 11141]) + self.assertListEqual(processed_dataset["prompt_attention_mask"][0], [1, 1, 1]) + self.assertListEqual( + processed_dataset["completion_input_ids"][0], [50256, 5377, 11141, 318, 1365, 621, 8253, 13, 50256] + ) + self.assertListEqual(processed_dataset["completion_attention_mask"][0], [1, 1, 1, 1, 1, 1, 1, 1, 1]) + self.assertListEqual( + processed_dataset["completion_labels"][0], [-100, -100, -100, 318, 1365, 621, 8253, 13, 50256] + ) + + @require_sklearn + def test_bco_trainer_without_providing_ref_model(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = BCOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference") + + trainer = BCOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param.cpu(), new_param.cpu())) + + @require_sklearn + def test_bco_trainer_udm(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = BCOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference") + + def embed_prompt(input_ids, attention_mask, model): + outputs = model(input_ids=input_ids, attention_mask=attention_mask) + + return outputs.last_hidden_state.mean(dim=1) + + embedding_model = Accelerator().prepare_model(self.embedding_model) + embedding_func = partial(embed_prompt, model=embedding_model) + + trainer = BCOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + embedding_func=embedding_func, + embedding_tokenizer=self.embedding_tokenizer, + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param.cpu(), new_param.cpu())) + + @require_sklearn + @require_peft + def test_bco_trainer_without_providing_ref_model_with_lora(self): + from peft import LoraConfig + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = BCOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference") + + trainer = BCOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + if "lora" in n: + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param.cpu(), new_param.cpu())) + + @require_sklearn + @require_no_wandb + def test_bco_trainer_generate_during_eval_no_wandb(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = BCOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + generate_during_eval=True, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference") + + with self.assertRaisesRegex( + ValueError, + expected_regex="`generate_during_eval=True` requires Weights and Biases to be installed." + " Please install with `pip install wandb` to resolve.", + ): + BCOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + @require_sklearn + @require_peft + def test_bco_lora_save(self): + from peft import LoraConfig, get_peft_model + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + # lora model + model = AutoModelForCausalLM.from_pretrained(self.model_id) + model_peft = get_peft_model(model, lora_config) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = BCOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference") + + # bco train lora model with a lora config + trainer = BCOTrainer( + model=model_peft, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + # train the model + trainer.train() + + # save peft adapter + trainer.save_model() + + # assert that the model is loaded without giving OSError + try: + AutoModelForCausalLM.from_pretrained(tmp_dir) + except OSError: + self.fail("Loading the saved peft adapter failed") diff --git a/testbed/huggingface__trl/tests/test_best_of_n_sampler.py b/testbed/huggingface__trl/tests/test_best_of_n_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..584c917a04e50cc766dc0068249ebd06dd6843a5 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_best_of_n_sampler.py @@ -0,0 +1,112 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch +from transformers import AutoTokenizer, GenerationConfig + +from trl import AutoModelForCausalLMWithValueHead +from trl.core import LengthSampler +from trl.extras import BestOfNSampler + + +def queries_to_scores(list_of_strings): + return [torch.rand(1).item() for _ in list_of_strings] + + +class BestOfNSamplerTester(unittest.TestCase): + """ + Tests the BestOfNSampler class + """ + + ref_model_name = "trl-internal-testing/dummy-GPT2-correct-vocab" + output_length_sampler = LengthSampler(2, 6) + model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name) + tokenizer = AutoTokenizer.from_pretrained(ref_model_name) + tokenizer.pad_token = tokenizer.eos_token + output_length_sampler = LengthSampler(2, 6) + + def test_different_input_types(self): + r""" + Tests if the different input types normalizer works + """ + + generation_config = GenerationConfig( + min_length=-1, + top_k=0.0, + top_p=1.0, + do_sample=True, + pad_token_id=self.tokenizer.eos_token_id, + ) + + output_length_sampler = LengthSampler(2, 6) + + best_of_n = BestOfNSampler( + self.model, + self.tokenizer, + queries_to_scores, + length_sampler=output_length_sampler, + generation_config=generation_config, + ) + + queries = ["hello world", "goodbye world"] + tokenized_queries = [self.tokenizer.encode(query) for query in queries] + + various_queries_formats = [ + (tokenized_queries[0], 1), + (tokenized_queries, 2), + (torch.tensor(tokenized_queries[1]), 1), + ([torch.tensor(query) for query in tokenized_queries], 2), + ] + + for q, expected_length in various_queries_formats: + results = best_of_n.generate(q) + self.assertIsInstance(results, list) + self.assertEqual(len(results), expected_length) + + def test_different_sample_sizes_and_n_candidates_values(self): + r""" + Tests different sample sizes and n_candidates values + """ + generation_config = GenerationConfig( + min_length=-1, + top_k=0.0, + top_p=1.0, + do_sample=True, + pad_token_id=self.tokenizer.eos_token_id, + ) + + output_length_sampler = LengthSampler(6, 10) + + for sample_value, n_candidates_values, expected in [ + (4, 2, 2), + (10, 3, 3), + (6, 4, 4), + ]: + best_of_n = BestOfNSampler( + self.model, + self.tokenizer, + queries_to_scores, + length_sampler=output_length_sampler, + generation_config=generation_config, + sample_size=sample_value, + n_candidates=n_candidates_values, + ) + + queries = ["hello world", "troll the world"] + tokenized_queries = [self.tokenizer.encode(query) for query in queries] + results = best_of_n.generate(tokenized_queries) + for result in results: + self.assertEqual(len(result), expected) diff --git a/testbed/huggingface__trl/tests/test_callbacks.py b/testbed/huggingface__trl/tests/test_callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..874d8b22f0666a08a0a1660832edffa22aba9902 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_callbacks.py @@ -0,0 +1,268 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import tempfile +import unittest + +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, Trainer, TrainingArguments +from transformers.testing_utils import require_peft, require_wandb +from transformers.utils import is_peft_available + +from trl import BasePairwiseJudge, LogCompletionsCallback, WinRateCallback + + +if is_peft_available(): + from peft import LoraConfig + + +class HalfPairwiseJudge(BasePairwiseJudge): + """Naive pairwise judge that always returns [1, 0] for two prompts""" + + def judge(self, prompts, completions, shuffle_order=True, return_scores=False): + # just check that the batch size is 2 + assert len(prompts) == 2 + if return_scores: + return [0.3, 0.9] + return [1, 0] + + +class TrainerWithRefModel(Trainer): + # This is a dummy class to test the callback. Compared to the Trainer class, it only has an additional + # ref_model attribute + def __init__(self, model, ref_model, args, train_dataset, eval_dataset, processing_class): + super().__init__( + model=model, + args=args, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + processing_class=processing_class, + ) + self.ref_model = ref_model + + +class WinRateCallbackTester(unittest.TestCase): + def setUp(self): + self.model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + self.ref_model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + self.tokenizer.pad_token = self.tokenizer.eos_token + dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") + dataset["train"] = dataset["train"].select(range(8)) + self.expected_winrates = [ + {"eval_win_rate": 0.5, "epoch": 0.0, "step": 0}, + {"eval_win_rate": 0.5, "epoch": 0.5, "step": 2}, + {"eval_win_rate": 0.5, "epoch": 1.0, "step": 4}, + {"eval_win_rate": 0.5, "epoch": 1.5, "step": 6}, + {"eval_win_rate": 0.5, "epoch": 2.0, "step": 8}, + {"eval_win_rate": 0.5, "epoch": 2.5, "step": 10}, + {"eval_win_rate": 0.5, "epoch": 3.0, "step": 12}, + ] + + def tokenize_function(examples): + out = self.tokenizer(examples["prompt"], padding="max_length", max_length=16, truncation=True) + out["labels"] = out["input_ids"].copy() + return out + + self.dataset = dataset.map(tokenize_function, batched=True) + + self.generation_config = GenerationConfig(max_length=32) + self.judge = HalfPairwiseJudge() + + def test_basic(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = TrainingArguments( + output_dir=tmp_dir, + eval_strategy="steps", + eval_steps=2, # evaluate every 2 steps + per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch + per_device_eval_batch_size=2, + report_to="none", + ) + trainer = TrainerWithRefModel( + model=self.model, + ref_model=self.ref_model, + args=training_args, + train_dataset=self.dataset["train"], + eval_dataset=self.dataset["test"], + processing_class=self.tokenizer, + ) + win_rate_callback = WinRateCallback( + judge=self.judge, trainer=trainer, generation_config=self.generation_config + ) + trainer.add_callback(win_rate_callback) + trainer.train() + winrate_history = [h for h in trainer.state.log_history if "eval_win_rate" in h] + self.assertListEqual(winrate_history, self.expected_winrates) + + def test_without_ref_model(self): + # Same as before, but without the ref_model attribute. It should use the model attribute instead + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = TrainingArguments( + output_dir=tmp_dir, + eval_strategy="steps", + eval_steps=2, # evaluate every 2 steps + per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch + per_device_eval_batch_size=2, + report_to="none", + ) + trainer = Trainer( + model=self.model, + args=training_args, + train_dataset=self.dataset["train"], + eval_dataset=self.dataset["test"], + processing_class=self.tokenizer, + ) + win_rate_callback = WinRateCallback( + judge=self.judge, trainer=trainer, generation_config=self.generation_config + ) + trainer.add_callback(win_rate_callback) + trainer.train() + winrate_history = [h for h in trainer.state.log_history if "eval_win_rate" in h] + self.assertListEqual(winrate_history, self.expected_winrates) + + def test_soft_judge(self): + """Test that the soft judge functionality works correctly""" + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = TrainingArguments( + output_dir=tmp_dir, + eval_strategy="steps", + eval_steps=2, # evaluate every 2 steps + per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch + per_device_eval_batch_size=2, + report_to="none", + ) + trainer = TrainerWithRefModel( + model=self.model, + ref_model=self.ref_model, + args=training_args, + train_dataset=self.dataset["train"], + eval_dataset=self.dataset["test"], + processing_class=self.tokenizer, + ) + win_rate_callback = WinRateCallback( + judge=self.judge, trainer=trainer, generation_config=self.generation_config, use_soft_judge=True + ) + trainer.add_callback(win_rate_callback) + trainer.train() + + # Expected values based on judge returning [0.3, 0.9] for each pair + expected_soft_winrates = [ + {"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 0.0, "step": 0}, + {"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 0.5, "step": 2}, + {"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 1.0, "step": 4}, + {"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 1.5, "step": 6}, + {"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 2.0, "step": 8}, + {"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 2.5, "step": 10}, + {"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 3.0, "step": 12}, + ] + + winrate_history = [ + {k: h[k] for k in ["eval_avg_win_prob", "eval_win_rate", "epoch", "step"]} + for h in trainer.state.log_history + if "eval_avg_win_prob" in h + ] + self.assertListEqual(winrate_history, expected_soft_winrates) + + @require_peft + def test_lora(self): + with tempfile.TemporaryDirectory() as tmp_dir: + peft_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + self.model.add_adapter(peft_config) + training_args = TrainingArguments( + output_dir=tmp_dir, + eval_strategy="steps", + eval_steps=2, # evaluate every 2 steps + per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch + per_device_eval_batch_size=2, + report_to="none", + ) + trainer = Trainer( + model=self.model, + args=training_args, + train_dataset=self.dataset["train"], + eval_dataset=self.dataset["test"], + processing_class=self.tokenizer, + ) + win_rate_callback = WinRateCallback( + judge=self.judge, trainer=trainer, generation_config=self.generation_config + ) + trainer.add_callback(win_rate_callback) + trainer.train() + winrate_history = [h for h in trainer.state.log_history if "eval_win_rate" in h] + self.assertListEqual(winrate_history, self.expected_winrates) + + +@require_wandb +class LogCompletionsCallbackTester(unittest.TestCase): + def setUp(self): + self.model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + self.tokenizer.pad_token = self.tokenizer.eos_token + dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") + dataset["train"] = dataset["train"].select(range(8)) + + def tokenize_function(examples): + out = self.tokenizer(examples["prompt"], padding="max_length", max_length=16, truncation=True) + out["labels"] = out["input_ids"].copy() + return out + + self.dataset = dataset.map(tokenize_function, batched=True) + + self.generation_config = GenerationConfig(max_length=32) + + def test_basic(self): + import wandb + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = TrainingArguments( + output_dir=tmp_dir, + eval_strategy="steps", + eval_steps=2, # evaluate every 2 steps + per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch + per_device_eval_batch_size=2, + report_to="wandb", + ) + trainer = Trainer( + model=self.model, + args=training_args, + train_dataset=self.dataset["train"], + eval_dataset=self.dataset["test"], + processing_class=self.tokenizer, + ) + completions_callback = LogCompletionsCallback(trainer, self.generation_config, num_prompts=2) + trainer.add_callback(completions_callback) + trainer.train() + + # Get the current run + completions_path = wandb.run.summary.completions["path"] + json_path = os.path.join(wandb.run.dir, completions_path) + with open(json_path) as f: + completions = json.load(f) + + # Check that the columns are correct + self.assertIn("step", completions["columns"]) + self.assertIn("prompt", completions["columns"]) + self.assertIn("completion", completions["columns"]) + + # Check that the prompt is in the log + self.assertIn(self.dataset["test"][0]["prompt"], completions["data"][0]) diff --git a/testbed/huggingface__trl/tests/test_cli.py b/testbed/huggingface__trl/tests/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..0a34995ecd552b20da5befff5d28241292da3c67 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_cli.py @@ -0,0 +1,44 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import subprocess +import sys +import unittest + + +class CLITester(unittest.TestCase): + @unittest.skipIf(sys.platform.startswith("win"), "Skipping on Windows") + def test_sft_cli(self): + try: + subprocess.run( + "trl sft --max_steps 1 --output_dir tmp-sft --model_name_or_path trl-internal-testing/tiny-random-LlamaForCausalLM --dataset_name stanfordnlp/imdb --learning_rate 1e-4 --lr_scheduler_type cosine", + shell=True, + check=True, + ) + except BaseException: + self.fail("An error occurred while running the CLI, please double check") + + @unittest.skipIf(sys.platform.startswith("win"), "Skipping on Windows") + def test_dpo_cli(self): + try: + subprocess.run( + "trl dpo --max_steps 1 --output_dir tmp-dpo --model_name_or_path trl-internal-testing/tiny-random-LlamaForCausalLM --dataset_name trl-internal-testing/tiny-ultrafeedback-binarized --learning_rate 1e-4 --lr_scheduler_type cosine", + shell=True, + check=True, + ) + except BaseException: + self.fail("An error occurred while running the CLI, please double check") + + def test_env_cli(self): + output = subprocess.run("trl env", capture_output=True, text=True, shell=True, check=True) + self.assertIn("- Python version: ", output.stdout) diff --git a/testbed/huggingface__trl/tests/test_core.py b/testbed/huggingface__trl/tests/test_core.py new file mode 100644 index 0000000000000000000000000000000000000000..2d8531d5916e33af9c02037d828b9a35c7d695e8 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_core.py @@ -0,0 +1,41 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +import torch + +from trl.core import masked_mean, masked_var, masked_whiten, whiten + + +class CoreTester(unittest.TestCase): + """ + A wrapper class for testing core utils functions + """ + + def setUp(self): + self.test_input = torch.Tensor([1, 2, 3, 4]) + self.test_mask = torch.Tensor([0, 1, 1, 0]) + self.test_input_unmasked = self.test_input[1:3] + + def test_masked_mean(self): + self.assertEqual(torch.mean(self.test_input_unmasked), masked_mean(self.test_input, self.test_mask)) + + def test_masked_var(self): + self.assertEqual(torch.var(self.test_input_unmasked), masked_var(self.test_input, self.test_mask)) + + def test_masked_whiten(self): + whiten_unmasked = whiten(self.test_input_unmasked) + whiten_masked = masked_whiten(self.test_input, self.test_mask)[1:3] + diffs = (whiten_unmasked - whiten_masked).sum() + self.assertLess(abs(diffs.item()), 0.00001) diff --git a/testbed/huggingface__trl/tests/test_cpo_trainer.py b/testbed/huggingface__trl/tests/test_cpo_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..e44ec4492fc5ac6e851d574119fb9cd45dc28176 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_cpo_trainer.py @@ -0,0 +1,153 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import tempfile +import unittest + +import torch +from datasets import load_dataset +from parameterized import parameterized +from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer +from transformers.testing_utils import require_peft + +from trl import CPOConfig, CPOTrainer + + +class CPOTrainerTester(unittest.TestCase): + def setUp(self): + self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" + self.model = AutoModelForCausalLM.from_pretrained(self.model_id) + self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) + self.tokenizer.pad_token = self.tokenizer.eos_token + + # get t5 as seq2seq example: + model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab" + self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) + self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) + + @parameterized.expand( + [ + ["gpt2", "sigmoid", "standard_preference"], + ["t5", "hinge", "standard_implicit_prompt_preference"], + ["gpt2", "ipo", "conversational_preference"], + ["t5", "ipo", "conversational_implicit_prompt_preference"], + ["gpt2", "simpo", "standard_preference"], + ["t5", "simpo", "standard_implicit_prompt_preference"], + ["gpt2", "hinge", "conversational_preference"], + ] + ) + def test_cpo_trainer(self, name, loss_type, config_name): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = CPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + loss_type=loss_type, + cpo_alpha=1.0, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) + + if name == "gpt2": + model = self.model + tokenizer = self.tokenizer + elif name == "t5": + model = self.t5_model + tokenizer = self.t5_tokenizer + training_args.is_encoder_decoder = True + + trainer = CPOTrainer( + model=model, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param, new_param)) + + @parameterized.expand( + [ + ("standard_preference",), + ("standard_implicit_prompt_preference",), + ("conversational_preference",), + ("conversational_implicit_prompt_preference",), + ] + ) + @require_peft + def test_cpo_trainer_with_lora(self, config_name): + from peft import LoraConfig + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = CPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + cpo_alpha=1.0, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) + + trainer = CPOTrainer( + model=self.model, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + if "lora" in n: + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param, new_param)) diff --git a/testbed/huggingface__trl/tests/test_data_collator_completion_only.py b/testbed/huggingface__trl/tests/test_data_collator_completion_only.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae0c7f3a69e82ef3a0951bf05406d6e64bbd820 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_data_collator_completion_only.py @@ -0,0 +1,144 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +import torch +from transformers import AutoTokenizer + +from trl import DataCollatorForCompletionOnlyLM + + +class DataCollatorForCompletionOnlyLMTester(unittest.TestCase): + def test_data_collator_finds_response_template_llama2_tokenizer(self): + # this should ideally be tested with meta-llama/Llama-2-7b-hf + self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + self.instruction = """### System: You are a helpful assistant. + +### User: How much is 2+2? + +### Assistant: 2+2 equals 4""" + self.instruction_template = "\n### User:" + self.response_template = "\n### Assistant:" + + # GPT2Tokenizer: [198, 21017, 11787, 25] -> [21017, 11787, 25] + # Llama2Tokenizer: [29871, 13, 2277, 29937, 4911, 29901] -> [2277, 29937, 4911, 29901] + # Note: If this test is ever switched to Llama2Tokenizer, this should be double checked, + # and possibly switched back to [2:] instead of [1:]. + # With GPT2Tokenizer, [1:] is correct - we want the 21017 token included, which is ###. + self.tokenized_instruction_w_context = self.tokenizer.encode( + self.instruction_template, add_special_tokens=False + )[1:] + + # GPT2Tokenizer: [198, 21017, 15286, 25] -> [15286, 25] + # Llama2Tokenizer: [29871, 13, 2277, 29937, 4007, 22137, 29901] -> [2277, 29937, 4007, 22137, 29901] + self.tokenized_response_w_context = self.tokenizer.encode(self.response_template, add_special_tokens=False)[2:] + + # Plain check on string + self.assertIn(self.response_template, self.instruction) + self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) + + # Test the fix for #598 + # Pass already tokenized (w context) and truncated response_template so token_ids are like in the instruction + response + self.collator = DataCollatorForCompletionOnlyLM(self.tokenized_response_w_context, tokenizer=self.tokenizer) + self.collator.torch_call([self.tokenized_instruction]) + + # Test for PR #749 + # Pass already tokenized (w context) instruction and response both so token_ids are like in the instruction + response + self.collator = DataCollatorForCompletionOnlyLM( + self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer + ) + self.collator.torch_call([self.tokenized_instruction]) + + # Test for PR #1185 + # We pass in a string where the first user template is different than the rest. + # Usually this would happen due to context-sensitive tokenization, but here we + # explicitly change the template to test the fix. + self.instruction = """## User: First instruction + +### Assistant: First response + +### User: Second instruction + +### Assistant: Second response""" + self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) + self.collator = DataCollatorForCompletionOnlyLM( + self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer + ) + collator_output = self.collator.torch_call([self.tokenized_instruction]) + collator_text = self.tokenizer.decode( + collator_output["labels"][torch.where(collator_output["labels"] != -100)] + ) + expected_text = " First response\n\n Second response" "" + self.assertEqual(collator_text, expected_text) + + def test_data_collator_handling_of_long_sequences(self): + self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + self.instruction = """### System: You are a helpful assistant. + +### User: How much is 2+2? I'm asking because I'm not sure. And I'm not sure because I'm not good at math. +""" + self.response_template = "\n### Assistant:" + # check DataCollatorForCompletionOnlyLM using response template only + self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) + self.collator = DataCollatorForCompletionOnlyLM(self.response_template, tokenizer=self.tokenizer) + encoded_instance = self.collator.torch_call([self.tokenized_instruction]) + result = torch.all(encoded_instance["labels"] == -100) + self.assertTrue(result, "Not all values in the tensor are -100.") + + # check DataCollatorForCompletionOnlyLM using response template and instruction template + self.instruction_template = "\n### User:" + self.collator = DataCollatorForCompletionOnlyLM( + self.response_template, self.instruction_template, tokenizer=self.tokenizer + ) + encoded_instance = self.collator.torch_call([self.tokenized_instruction]) + result = torch.all(encoded_instance["labels"] == -100) + self.assertTrue(result, "Not all values in the tensor are -100.") + + def test_padding_free(self): + tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + tokenizer.pad_token_id = tokenizer.eos_token_id + inst1 = "### System: You are a helpful assistant.\n\n### User: How much is 2+2?\n\n### Assistant: 2+2 equals 4" + inst2 = "### System: You are a honest and helpful assistant.\n\n### User: What is the answer of 22x22?\n\n### Assistant: 22x22 equals 484" + + response_template = "\n### Assistant:" + collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer) + collator_paddingfree = DataCollatorForCompletionOnlyLM( + response_template, tokenizer=tokenizer, padding_free=True + ) + + tokenized_instruction = [tokenizer(x, add_special_tokens=False) for x in [inst1, inst2]] + batch = collator(tokenized_instruction) + batch_paddingfree = collator_paddingfree(tokenized_instruction) + + self.assertNotIn("attention_mask", batch_paddingfree) + self.assertIn("input_ids", batch_paddingfree) + self.assertIn("labels", batch_paddingfree) + self.assertIn("position_ids", batch_paddingfree) + self.assertEqual(batch_paddingfree["input_ids"].size(), batch_paddingfree["labels"].size()) + self.assertEqual(batch_paddingfree["labels"].size(), batch_paddingfree["position_ids"].size()) + + attn_mask = batch["attention_mask"] + input_ids_remove_pad = batch["input_ids"][attn_mask.bool()].unsqueeze(0) + expected_position_ids = attn_mask.cumsum(1)[attn_mask.bool()].unsqueeze(0) - 1 + expected_labels = [] + for idx in range(batch["input_ids"].size(0)): + expected_labels.append(batch["labels"][idx][attn_mask[idx].bool()]) + expected_labels[-1][0] = collator.ignore_index + expected_labels = torch.cat(expected_labels).unsqueeze(0) + + self.assertTrue((input_ids_remove_pad == batch_paddingfree["input_ids"]).all()) + self.assertTrue((expected_position_ids == batch_paddingfree["position_ids"]).all()) + self.assertTrue((expected_labels == batch_paddingfree["labels"]).all()) diff --git a/testbed/huggingface__trl/tests/test_dataset_formatting.py b/testbed/huggingface__trl/tests/test_dataset_formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..15f6c63a67d0706e09281b282829e492c0c9defd --- /dev/null +++ b/testbed/huggingface__trl/tests/test_dataset_formatting.py @@ -0,0 +1,158 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from typing import Callable + +from datasets import Dataset, load_dataset +from transformers import AutoModelForCausalLM, AutoTokenizer + +from trl.extras.dataset_formatting import get_formatting_func_from_dataset +from trl.models.utils import ChatMlSpecialTokens, setup_chat_format + + +class DatasetFormattingTestCase(unittest.TestCase): + def setUp(self): + self.llama_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer") + self.chatml_tokenizer = AutoTokenizer.from_pretrained("philschmid/gpt2-chatml-tokenizer") + + def test_get_formatting_func_from_dataset_with_chatml_messages(self): + dataset = Dataset.from_dict( + { + "messages": [ + [ + {"role": "system", "content": "You are helpful"}, + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi, how can I help you?"}, + ] + ] + } + ) + + # Llama tokenizer + formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer) + self.assertIsInstance(formatting_func, Callable) + formatted_text = formatting_func(dataset[0]) + expected = "[INST] <>\nYou are helpful\n<>\n\nHello [/INST] Hi, how can I help you? " + self.assertEqual(formatted_text, expected) + formatted_text = formatting_func(dataset[0:1]) + self.assertListEqual(formatted_text, [expected]) + + # ChatML tokenizer + formatting_func = get_formatting_func_from_dataset(dataset, self.chatml_tokenizer) + formatted_text = formatting_func(dataset[0]) + expected = "<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n" + self.assertEqual(formatted_text, expected) + formatted_text = formatting_func(dataset[0:1]) + self.assertListEqual(formatted_text, [expected]) + + def test_get_formatting_func_from_dataset_with_chatml_conversations(self): + dataset = Dataset.from_dict( + { + "conversations": [ + [ + {"role": "system", "content": "You are helpful"}, + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi, how can I help you?"}, + ] + ] + } + ) + # Llama tokenizer + formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer) + self.assertIsInstance(formatting_func, Callable) + formatted_text = formatting_func(dataset[0]) + expected = "[INST] <>\nYou are helpful\n<>\n\nHello [/INST] Hi, how can I help you? " + self.assertEqual(formatted_text, expected) + formatted_text = formatting_func(dataset[0:1]) + self.assertListEqual(formatted_text, [expected]) + + # ChatML tokenizer + formatting_func = get_formatting_func_from_dataset(dataset, self.chatml_tokenizer) + formatted_text = formatting_func(dataset[0]) + expected = "<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n" + self.assertEqual(formatted_text, expected) + formatted_text = formatting_func(dataset[0:1]) + self.assertListEqual(formatted_text, [expected]) + + def test_get_formatting_func_from_dataset_with_instruction(self): + dataset = Dataset.from_list( + [{"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}] + ) + formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer) + self.assertIsNotNone(formatting_func) + self.assertIsInstance(formatting_func, Callable) + formatted_text = formatting_func(dataset[0]) + self.assertEqual(formatted_text, "[INST] What is 2+2? [/INST] 4 ") + formatted_text = formatting_func(dataset[0:1]) + self.assertListEqual(formatted_text, ["[INST] What is 2+2? [/INST] 4 "]) + + def test_get_formatting_func_from_dataset_from_hub(self): + ds_1 = load_dataset("philschmid/trl-test-instruction", split="train") + ds_2 = load_dataset("philschmid/dolly-15k-oai-style", split="train") + for ds in [ds_1, ds_2]: + formatting_func = get_formatting_func_from_dataset(ds, self.llama_tokenizer) + self.assertIsNotNone(formatting_func) + self.assertIsInstance(formatting_func, Callable) + ds_3 = load_dataset("philschmid/guanaco-sharegpt-style", split="train") + formatting_func = get_formatting_func_from_dataset(ds_3, self.llama_tokenizer) + self.assertIsNone(formatting_func) + + def test_get_formatting_func_from_dataset_with_unknown_format(self): + dataset = Dataset.from_dict({"text": "test"}) + formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer) + self.assertIsNone(formatting_func) + + +class SetupChatFormatTestCase(unittest.TestCase): + def setUp(self): + self.tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer") + self.model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") + # remove built-in chat_template to simulate a model having no chat_template + self.tokenizer.chat_template = None + + def test_setup_chat_format(self): + original_tokenizer_len = len(self.tokenizer) + modified_model, modified_tokenizer = setup_chat_format( + self.model, self.tokenizer, format="chatml", resize_to_multiple_of=64 + ) + + _chatml = ChatMlSpecialTokens() + # Check if special tokens are correctly set + self.assertEqual(modified_tokenizer.eos_token, "<|im_end|>") + self.assertEqual(modified_tokenizer.pad_token, "<|im_end|>") + self.assertEqual(modified_tokenizer.bos_token, "<|im_start|>") + self.assertEqual(modified_tokenizer.eos_token, _chatml.eos_token) + self.assertEqual(modified_tokenizer.pad_token, _chatml.pad_token) + self.assertEqual(modified_tokenizer.bos_token, _chatml.bos_token) + self.assertEqual(len(modified_tokenizer), (original_tokenizer_len + 2)) + self.assertEqual((self.model.get_input_embeddings().weight.shape[0] % 64), 0) + self.assertEqual(self.model.get_input_embeddings().weight.shape[0], (original_tokenizer_len + 64)) + + def test_example_with_setup_model(self): + modified_model, modified_tokenizer = setup_chat_format( + self.model, + self.tokenizer, + ) + messages = [ + {"role": "system", "content": "You are helpful"}, + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi, how can I help you?"}, + ] + prompt = modified_tokenizer.apply_chat_template(messages, tokenize=False) + + self.assertEqual( + prompt, + "<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n", + ) diff --git a/testbed/huggingface__trl/tests/test_ddpo_trainer.py b/testbed/huggingface__trl/tests/test_ddpo_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..65a626589a362fe85c3d3368e358850768a18784 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_ddpo_trainer.py @@ -0,0 +1,128 @@ +# Copyright 2023 metric-space, The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import unittest + +import torch +from transformers.utils import is_peft_available + +from trl import is_diffusers_available + +from .testing_utils import require_diffusers + + +if is_diffusers_available() and is_peft_available(): + from trl import DDPOConfig, DDPOTrainer, DefaultDDPOStableDiffusionPipeline + + +def scorer_function(images, prompts, metadata): + return torch.randn(1) * 3.0, {} + + +def prompt_function(): + return ("cabbages", {}) + + +@require_diffusers +class DDPOTrainerTester(unittest.TestCase): + """ + Test the DDPOTrainer class. + """ + + def setUp(self): + self.training_args = DDPOConfig( + num_epochs=2, + train_gradient_accumulation_steps=1, + per_prompt_stat_tracking_buffer_size=32, + sample_num_batches_per_epoch=2, + sample_batch_size=2, + mixed_precision=None, + save_freq=1000000, + ) + pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch" + pretrained_revision = "main" + + pipeline = DefaultDDPOStableDiffusionPipeline( + pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=False + ) + + self.trainer = DDPOTrainer(self.training_args, scorer_function, prompt_function, pipeline) + + return super().setUp() + + def tearDown(self) -> None: + gc.collect() + + def test_loss(self): + advantage = torch.tensor([-1.0]) + clip_range = 0.0001 + ratio = torch.tensor([1.0]) + loss = self.trainer.loss(advantage, clip_range, ratio) + self.assertEqual(loss.item(), 1.0) + + def test_generate_samples(self): + samples, output_pairs = self.trainer._generate_samples(1, 2) + self.assertEqual(len(samples), 1) + self.assertEqual(len(output_pairs), 1) + self.assertEqual(len(output_pairs[0][0]), 2) + + def test_calculate_loss(self): + samples, _ = self.trainer._generate_samples(1, 2) + sample = samples[0] + + latents = sample["latents"][0, 0].unsqueeze(0) + next_latents = sample["next_latents"][0, 0].unsqueeze(0) + log_probs = sample["log_probs"][0, 0].unsqueeze(0) + timesteps = sample["timesteps"][0, 0].unsqueeze(0) + prompt_embeds = sample["prompt_embeds"] + advantage = torch.tensor([1.0], device=prompt_embeds.device) + + self.assertTupleEqual(latents.shape, (1, 4, 64, 64)) + self.assertTupleEqual(next_latents.shape, (1, 4, 64, 64)) + self.assertTupleEqual(log_probs.shape, (1,)) + self.assertTupleEqual(timesteps.shape, (1,)) + self.assertTupleEqual(prompt_embeds.shape, (2, 77, 32)) + loss, approx_kl, clipfrac = self.trainer.calculate_loss( + latents, timesteps, next_latents, log_probs, advantage, prompt_embeds + ) + + self.assertTrue(torch.isfinite(loss.cpu())) + + +@require_diffusers +class DDPOTrainerWithLoRATester(DDPOTrainerTester): + """ + Test the DDPOTrainer class. + """ + + def setUp(self): + self.training_args = DDPOConfig( + num_epochs=2, + train_gradient_accumulation_steps=1, + per_prompt_stat_tracking_buffer_size=32, + sample_num_batches_per_epoch=2, + sample_batch_size=2, + mixed_precision=None, + save_freq=1000000, + ) + pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch" + pretrained_revision = "main" + + pipeline = DefaultDDPOStableDiffusionPipeline( + pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=True + ) + + self.trainer = DDPOTrainer(self.training_args, scorer_function, prompt_function, pipeline) + + return super().setUp() diff --git a/testbed/huggingface__trl/tests/test_dpo_trainer.py b/testbed/huggingface__trl/tests/test_dpo_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..693d4f4b83bcf3d990d9db70e8997104cd2800f3 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_dpo_trainer.py @@ -0,0 +1,1224 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest +from unittest.mock import MagicMock + +import numpy as np +import torch +from datasets import Dataset, features, load_dataset +from parameterized import parameterized +from transformers import ( + AutoModelForCausalLM, + AutoModelForSeq2SeqLM, + AutoModelForVision2Seq, + AutoProcessor, + AutoTokenizer, + PreTrainedTokenizerBase, + is_vision_available, +) +from transformers.testing_utils import require_peft, require_torch_gpu_if_bnb_not_multi_backend_enabled, require_vision + +from trl import DPOConfig, DPOTrainer, FDivergenceType + +from .testing_utils import require_bitsandbytes, require_no_wandb + + +if is_vision_available(): + from PIL import Image + + +class TestTokenizeRow(unittest.TestCase): + def setUp(self): + # Set up the mock tokenizer with specific behaviors + self.tokenizer = MagicMock(spec=PreTrainedTokenizerBase) + self.tokenizer.bos_token_id = 0 + self.tokenizer.eos_token_id = 2 + + # Define mock return values for the tokenizer's 'input_ids' for the different text inputs + self.tokenizer.return_value = { + "input_ids": {"The sky is": [464, 6766, 318], " blue": [4171], " green": [4077]} + } + + # Define tokenizer behavior when called + def mock_tokenizer_call(text, add_special_tokens): + token_map = { + "The sky is": {"input_ids": [464, 6766, 318]}, + " blue": {"input_ids": [4171]}, + " green": {"input_ids": [4077]}, + } + return token_map[text] + + self.tokenizer.side_effect = mock_tokenizer_call + + def test_tokenize_row_no_truncation_no_special_tokens(self): + # Define the input features + features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} + + # Call the method with no truncation and no special tokens + result = DPOTrainer.tokenize_row( + features=features, + processing_class=self.tokenizer, + max_prompt_length=None, + max_completion_length=None, + add_special_tokens=False, + ) + + # Assert the correct output without truncation or special tokens + self.assertEqual( + result, + { + "prompt_input_ids": [464, 6766, 318], + "chosen_input_ids": [4171, 2], # eos_token added + "rejected_input_ids": [4077, 2], # eos_token added + }, + ) + + def test_tokenize_row_with_truncation(self): + # Define the input features + features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} + + # Call the method with truncation + result = DPOTrainer.tokenize_row( + features=features, + processing_class=self.tokenizer, + max_prompt_length=2, + max_completion_length=1, + add_special_tokens=False, + ) + + # Assert the correct output with truncation applied + self.assertEqual( + result, + { + "prompt_input_ids": [6766, 318], # truncated to the last 2 tokens + "chosen_input_ids": [4171], # truncated to 1 token + "rejected_input_ids": [4077], # truncated to 1 token + }, + ) + + def test_tokenize_row_with_special_tokens(self): + # Define the input features + features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} + + # Call the method with special tokens + result = DPOTrainer.tokenize_row( + features=features, + processing_class=self.tokenizer, + max_prompt_length=None, + max_completion_length=None, + add_special_tokens=True, + ) + + # Assert the correct output with special tokens added + self.assertEqual( + result, + { + "prompt_input_ids": [0, 464, 6766, 318, 2], # bos_token and eos_token added + "chosen_input_ids": [4171, 2], # eos_token added + "rejected_input_ids": [4077, 2], # eos_token added + }, + ) + + def test_tokenize_row_with_truncation_and_special_tokens(self): + # Define the input features + features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} + + # Call the method with both truncation and special tokens + result = DPOTrainer.tokenize_row( + features=features, + processing_class=self.tokenizer, + max_prompt_length=4, + max_completion_length=1, + add_special_tokens=True, + ) + + # Assert the correct output with both truncation and special tokens + self.assertEqual( + result, + { + "prompt_input_ids": [464, 6766, 318, 2], # truncated to 4 tokens with bos_token and eos_token + "chosen_input_ids": [4171], # truncated to 1 token + "rejected_input_ids": [4077], # truncated to 1 token + }, + ) + + +class DPOTrainerTester(unittest.TestCase): + def setUp(self): + self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" + self.model = AutoModelForCausalLM.from_pretrained(self.model_id) + self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) + self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) + self.tokenizer.pad_token = self.tokenizer.eos_token + + # get t5 as seq2seq example: + model_id = "trl-internal-testing/T5ForConditionalGeneration-correct-vocab-calibrated" + self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) + self.t5_ref_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) + self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) + + @parameterized.expand( + [ + ["gpt2", "sigmoid", True], + ["t5", "hinge", False], + ["gpt2", "ipo", False], + ["t5", "ipo", True], + ["gpt2", "aot_pair", True], + ["t5", "aot_pair", False], + ["gpt2", "aot", True], + ["t5", "aot", False], + ["gpt2", "bco_pair", False], + ["t5", "bco_pair", True], + ["gpt2", "sppo_hard", False], + ["t5", "sppo_hard", True], + ["gpt2", "nca_pair", False], + ["t5", "nca_pair", True], + ["gpt2", "robust", True], + ["gpt2", "exo_pair", False], + ["t5", "exo_pair", True], + ["gpt2", "apo_zero", True], + ["t5", "apo_down", False], + ["gpt2", "discopop", False], + ] + ) + def test_dpo_trainer(self, name, loss_type, pre_compute): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + loss_type=loss_type, + precompute_ref_log_probs=pre_compute, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + if name == "gpt2": + model = self.model + ref_model = self.ref_model + tokenizer = self.tokenizer + elif name == "t5": + model = self.t5_model + ref_model = self.t5_ref_model + tokenizer = self.t5_tokenizer + + trainer = DPOTrainer( + model=model, + ref_model=ref_model, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) + + def test_dpo_trainer_with_weighting(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + loss_type="sigmoid", + precompute_ref_log_probs=False, + use_weighting=True, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + trainer = DPOTrainer( + model=self.model, + ref_model=self.ref_model, + args=training_args, + tokenizer=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) + + @parameterized.expand( + [ + [None, "Test when rpo_alpha is set to None"], + [0.5, "Test when rpo_alpha is set to 0.5"], + ] + ) + def test_dpo_trainer_without_providing_ref_model(self, rpo_alpha, _): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + precompute_ref_log_probs=True, + rpo_alpha=rpo_alpha, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + trainer = DPOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param, new_param)) + + def test_dpo_trainer_with_ref_model_is_model(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + with self.assertRaises(ValueError): + DPOTrainer( + model=self.model, + ref_model=self.model, # ref_model can't be the same as model + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + ) + + @require_peft + def test_dpo_trainer_without_providing_ref_model_with_lora(self): + from peft import LoraConfig + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + precompute_ref_log_probs=True, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + trainer = DPOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + if "lora" in n: + new_param = trainer.model.get_parameter(n) + # check the params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param, new_param)) + + def test_dpo_trainer_padding_token_is_none(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + tokenizer = AutoTokenizer.from_pretrained(self.model_id) + tokenizer.pad_token = None + + with self.assertRaisesRegex( + ValueError, + expected_regex=r"Can't find `pad_token_id` in the `processing_class`. " + r"Explicitly set `tokenizer.pad_token` \(e.g. `tokenizer.pad_token = tokenizer.eos_token`\) " + r"before instantiating the trainer.", + ): + trainer = DPOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + trainer.train() + + def test_dpo_trainer_w_dataset_num_proc(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + dataset_num_proc=5, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + tokenizer = AutoTokenizer.from_pretrained(self.model_id) + tokenizer.pad_token = None + + with self.assertRaisesRegex( + ValueError, + expected_regex=r"Can't find `pad_token_id` in the `processing_class`. " + r"Explicitly set `tokenizer.pad_token` \(e.g. `tokenizer.pad_token = tokenizer.eos_token`\) " + r"before instantiating the trainer.", + ): + trainer = DPOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + trainer.train() + + def test_tr_dpo_trainer(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + precompute_ref_log_probs=False, + sync_ref_model=True, + ref_model_mixup_alpha=0.5, + ref_model_sync_steps=1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + trainer = DPOTrainer( + model=self.model, + ref_model=self.ref_model, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + # params of the ref model as its the same as the model + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # check the params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.ref_model.get_parameter(n) + # check the ref model's params have changed - ignore 0 biases + if param.sum() != 0: + self.assertFalse(torch.equal(param, new_param)) + + @require_no_wandb + def test_dpo_trainer_generate_during_eval_no_wandb(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + generate_during_eval=True, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + with self.assertRaisesRegex( + ValueError, + expected_regex="`generate_during_eval=True` requires Weights and Biases to be installed." + " Please install `wandb` to resolve.", + ): + DPOTrainer( + model=self.model, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + @require_peft + def test_dpo_lora_save(self): + from peft import LoraConfig, get_peft_model + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + # lora model + model = AutoModelForCausalLM.from_pretrained(self.model_id) + model_peft = get_peft_model(model, lora_config) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + precompute_ref_log_probs=True, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model_peft, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + # train the model + trainer.train() + + # save peft adapter + trainer.save_model() + + try: + AutoModelForCausalLM.from_pretrained(tmp_dir) + except OSError: + self.fail("Loading the saved peft adapter failed") + + @require_peft + @require_torch_gpu_if_bnb_not_multi_backend_enabled + def test_dpo_lora_bf16_autocast_llama(self): + # Note this test only works on compute capability > 7 GPU devices + from peft import LoraConfig + + model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM" + tokenizer = AutoTokenizer.from_pretrained(model_id) + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + # lora model + model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + bf16=True, + beta=0.1, + generate_during_eval=True, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + # train the model + trainer.train() + + # save peft adapter + trainer.save_model() + + @parameterized.expand( + [ + ["gpt2", "sigmoid", False, False], + ["gpt2", "sigmoid", False, True], + ["gpt2", "sigmoid", True, False], + ["gpt2", "sigmoid", True, True], + ["gpt2", "ipo", False, False], + ["gpt2", "ipo", False, True], + ["gpt2", "ipo", True, False], + ["gpt2", "ipo", True, True], + ["gpt2", "aot_pair", False, False], + ["gpt2", "aot_pair", False, True], + ["gpt2", "aot_pair", True, False], + ["gpt2", "aot_pair", True, True], + ["gpt2", "aot", False, False], + ["gpt2", "aot", False, True], + ["gpt2", "aot", True, False], + ["gpt2", "aot", True, True], + ["gpt2", "bco_pair", False, False], + ["gpt2", "bco_pair", False, True], + ["gpt2", "bco_pair", True, False], + ["gpt2", "bco_pair", True, True], + ["gpt2", "robust", False, False], + ["gpt2", "robust", False, True], + ["gpt2", "robust", True, False], + ["gpt2", "robust", True, True], + ] + ) + @require_bitsandbytes + @require_peft + @unittest.skip("You need a GPU with bf16 support in order to run these tests") + def test_dpo_lora_bf16_autocast(self, name, loss_type, pre_compute, gen_during_eval): + # Note this test only works on compute capability > 7 GPU devices + from peft import LoraConfig + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + # lora model + model = AutoModelForCausalLM.from_pretrained(self.model_id, load_in_4bit=True) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + bf16=True, + beta=0.1, + generate_during_eval=gen_during_eval, + loss_type=loss_type, + precompute_ref_log_probs=pre_compute, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + # train the model + trainer.train() + + # save peft adapter + trainer.save_model() + + @require_peft + def test_dpo_lora_tags(self): + from peft import LoraConfig + + model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM" + tokenizer = AutoTokenizer.from_pretrained(model_id) + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + # lora model + model = AutoModelForCausalLM.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + for tag in ["dpo", "trl"]: + self.assertIn(tag, trainer.model.model_tags) + + @require_peft + def test_dpo_tags(self): + model_id = "HuggingFaceM4/tiny-random-LlamaForCausalLM" + tokenizer = AutoTokenizer.from_pretrained(model_id) + + # lora model + model = AutoModelForCausalLM.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + for tag in ["dpo", "trl"]: + self.assertIn(tag, trainer.model.model_tags) + + @require_peft + def test_dpo_lora_force_use_ref(self): + from peft import LoraConfig, get_peft_model + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + # lora model + model = AutoModelForCausalLM.from_pretrained(self.model_id) + model_peft = get_peft_model(model, lora_config) + + ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + with self.assertRaises(ValueError): + # passing a peft_model as model and ref_model should error out, + # unless you pass `force_use_ref_model` + trainer = DPOTrainer( + model=model_peft, + ref_model=ref_model, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + force_use_ref_model=True, + report_to="none", + ) + + trainer = DPOTrainer( + model=model_peft, + ref_model=ref_model, + args=training_args, + processing_class=self.tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + peft_config=lora_config, + ) + + # train the model + trainer.train() + + def test_dpo_trainer_torch_dtype(self): + # See https://github.com/huggingface/trl/issues/1751 + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=1, + model_init_kwargs={"torch_dtype": "float16"}, + ref_model_init_kwargs={"torch_dtype": "float16"}, + report_to="none", + ) + + trainer = DPOTrainer( + model=self.model_id, + ref_model=self.model_id, + processing_class=self.tokenizer, + args=training_args, + train_dataset=dummy_dataset["train"], + ) + self.assertEqual(trainer.model.config.torch_dtype, torch.float16) + self.assertEqual(trainer.ref_model.config.torch_dtype, torch.float16) + + # Now test when `torch_dtype` is provided but is wrong to either the model or the ref_model + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=1, + model_init_kwargs={"torch_dtype": -1}, + report_to="none", + ) + + with self.assertRaises(ValueError) as context: + _ = DPOTrainer( + model=self.model_id, + processing_class=self.tokenizer, + args=training_args, + train_dataset=dummy_dataset["train"], + ) + + self.assertIn( + "Invalid `torch_dtype` passed to the DPOConfig. Expected a string with either `torch.dtype` or 'auto', but got -1.", + str(context.exception), + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=1, + ref_model_init_kwargs={"torch_dtype": -1}, + report_to="none", + ) + + with self.assertRaises(ValueError) as context: + _ = DPOTrainer( + model=self.model_id, + ref_model=self.model_id, + processing_class=self.tokenizer, + args=training_args, + train_dataset=dummy_dataset["train"], + ) + + self.assertIn( + "Invalid `torch_dtype` passed to the DPOConfig. Expected a string with either `torch.dtype` or 'auto', but got -1.", + str(context.exception), + ) + + def test_dpo_loss_alpha_div_f(self): + model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM" + tokenizer = AutoTokenizer.from_pretrained(model_id) + + # lora model + model = AutoModelForCausalLM.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + f_divergence_type=FDivergenceType.ALPHA_DIVERGENCE.value, + f_alpha_divergence_coef=0.5, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + # Fake chosen and rejected log probs + policy_chosen_logps = torch.FloatTensor([410.0, 0.1]) + policy_rejected_logps = torch.FloatTensor([810.5, 0.2]) + reference_chosen_logps = torch.FloatTensor([-610.0, -0.1]) + reference_rejected_logps = torch.FloatTensor([110.6, 0.5]) + losses, _, _ = trainer.dpo_loss( + policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps + ) + self.assertTrue(torch.isfinite(losses).cpu().numpy().all()) + + def test_dpo_loss_js_div_f(self): + model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM" + tokenizer = AutoTokenizer.from_pretrained(model_id) + + # lora model + model = AutoModelForCausalLM.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + eval_strategy="steps", + f_divergence_type=FDivergenceType.JS_DIVERGENCE.value, + f_alpha_divergence_coef=0.5, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + processing_class=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + # Fake chosen and rejected log probs + policy_chosen_logps = torch.FloatTensor([410.0, 0.1]) + policy_rejected_logps = torch.FloatTensor([95.5, 0.2]) + reference_chosen_logps = torch.FloatTensor([-610.0, -0.1]) + reference_rejected_logps = torch.FloatTensor([5.5, 0.5]) + losses, _, _ = trainer.dpo_loss( + policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps + ) + self.assertTrue(torch.isfinite(losses).cpu().numpy().all()) + + def test_dpo_trainer_use_num_logits_to_keep(self): + model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM" + tokenizer = AutoTokenizer.from_pretrained(model_id) + + model = AutoModelForCausalLM.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=1, + learning_rate=9e-1, + eval_strategy="steps", + beta=0.1, + use_num_logits_to_keep=True, + rpo_alpha=0.5, + report_to="none", + ) + + dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + tokenizer=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + training_args.use_num_logits_to_keep = False + trainer2 = DPOTrainer( + model=model, + ref_model=None, + args=training_args, + tokenizer=tokenizer, + train_dataset=dummy_dataset["train"], + eval_dataset=dummy_dataset["test"], + ) + + # Fake batch + prompt_input_ids = torch.randint(1, 1000, (2, 10)) + chosen_input_ids = torch.randint(1, 1000, (2, 5)) + rejected_input_ids = torch.randint(1, 1000, (2, 7)) + prompt_attention_mask = torch.ones_like(prompt_input_ids) + chosen_attention_mask = torch.ones_like(chosen_input_ids) + rejected_attention_mask = torch.ones_like(rejected_input_ids) + + batch = { + "prompt_input_ids": prompt_input_ids.to(model.device), + "chosen_input_ids": chosen_input_ids.to(model.device), + "rejected_input_ids": rejected_input_ids.to(model.device), + "prompt_attention_mask": prompt_attention_mask.to(model.device), + "chosen_attention_mask": chosen_attention_mask.to(model.device), + "rejected_attention_mask": rejected_attention_mask.to(model.device), + } + + output = trainer.concatenated_forward(model, batch) + output2 = trainer2.concatenated_forward(model, batch) + + np.testing.assert_allclose(output["nll_loss"].item(), output2["nll_loss"].item(), atol=1e-5) + np.testing.assert_allclose( + output["mean_chosen_logits"].item(), output2["mean_chosen_logits"].item(), atol=1e-5 + ) + np.testing.assert_allclose( + output["mean_rejected_logits"].item(), output2["mean_rejected_logits"].item(), atol=1e-5 + ) + + for i in range(output["chosen_logps"].shape[0]): + np.testing.assert_allclose( + output["chosen_logps"][i].item(), output2["chosen_logps"][i].item(), atol=1e-5 + ) + np.testing.assert_allclose( + output["rejected_logps"][i].item(), output2["rejected_logps"][i].item(), atol=1e-5 + ) + + trainer.train() + + +@require_vision +class DPOVisionTrainerTester(unittest.TestCase): + @parameterized.expand( + [ + ["trl-internal-testing/tiny-random-idefics2"], + ["trl-internal-testing/tiny-random-paligemma"], + ["trl-internal-testing/tiny-random-llava-1.5"], + ] + ) + def test_vdpo_trainer(self, model_id): + # fmt: off + dataset_dict = { + "prompt": [ + [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Describe the image in great detail."}]}], + [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Is this bus in the USA?"}]}], + [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Give a thorough description of the image."}]}], + [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Who are the people in the image?"}]}], + [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What is written?"}]}], + ], + "chosen": [ + [{"role": "assistant", "content": [{"type": "text", "text": "The image features a modern, multi-colored train."}]}], + [{"role": "assistant", "content": [{"type": "text", "text": "Yes, it can be assumed that this bus is in the USA."}]}], + [{"role": "assistant", "content": [{"type": "text", "text": "The image features a forest path."}]}], + [{"role": "assistant", "content": [{"type": "text", "text": "There are two individuals, possibly girls or women."}]}], + [{"role": "assistant", "content": [{"type": "text", "text": '"ccpb".'}]}], + ], + "rejected": [ + [{"role": "assistant", "content": [{"type": "text", "text": "The image features a modern, colorful train."}]}], + [{"role": "assistant", "content": [{"type": "text", "text": "No, it's not in the USA."}]}], + [{"role": "assistant", "content": [{"type": "text", "text": "The image features a forest path surrounded by trees."}]}], + [{"role": "assistant", "content": [{"type": "text", "text": "In the image, there are two individuals."}]}], + [{"role": "assistant", "content": [{"type": "text", "text": '"ccpb".'}]}], + ], + "images": [ + [Image.fromarray(np.random.randint(0, 255, (92, 33, 3), dtype=np.uint8))], + [Image.fromarray(np.random.randint(0, 255, (64, 48, 3), dtype=np.uint8))], + [Image.fromarray(np.random.randint(0, 255, (80, 152, 3), dtype=np.uint8))], + [Image.fromarray(np.random.randint(0, 255, (57, 24, 3), dtype=np.uint8))], + [Image.fromarray(np.random.randint(0, 255, (102, 48, 3), dtype=np.uint8))], + ], + } + # fmt: on + dataset = Dataset.from_dict(dataset_dict) + dataset = dataset.cast_column("images", features.Sequence(features.Image())) + + # Instantiate the model and processor + model = AutoModelForVision2Seq.from_pretrained(model_id) + ref_model = AutoModelForVision2Seq.from_pretrained(model_id) + processor = AutoProcessor.from_pretrained(model_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = DPOConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_length=512, + max_prompt_length=512, + remove_unused_columns=False, + report_to="none", + ) + trainer = DPOTrainer( + model=model, + ref_model=ref_model, + args=training_args, + processing_class=processor, + train_dataset=dataset, + eval_dataset=dataset, + ) + + # Save the initial weights, so we can check if they have changed after training + previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) + + # Check that the trainable params have changed + for n, param in previous_trainable_params.items(): + new_param = trainer.model.get_parameter(n) + if param.sum() != 0: # ignore 0 biases + if model_id == "trl-internal-testing/tiny-random-llava-1.5" and ( + n.startswith("vision_tower.vision_model.encoder.layers.3") + or n == "vision_tower.vision_model.post_layernorm.weight" + ): + # For some reason, these params are not updated. This is probably not related to TRL, but to + # the model itself. We should investigate this further, but for now we just skip these params. + continue + self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) + + +if __name__ == "__main__": + unittest.main() diff --git a/testbed/huggingface__trl/tests/test_environments.py b/testbed/huggingface__trl/tests/test_environments.py new file mode 100644 index 0000000000000000000000000000000000000000..d2ee42f0403319e77dd1073b542303746cf7fe5f --- /dev/null +++ b/testbed/huggingface__trl/tests/test_environments.py @@ -0,0 +1,278 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from unittest.mock import patch + +import torch +from transformers import AutoTokenizer + +from trl import AutoModelForCausalLMWithValueHead, TextEnvironment, TextHistory + + +class DummyTool: + def __call__(self, text): + return text + + +def dummy_generate(histories): + for i in range(len(histories)): + histories[i].append_segment("test", torch.tensor([1, 2, 3]), system=False) + return histories + + +class TextHistoryTest(unittest.TestCase): + def test_text_history_init(self): + text = "Hello there!" + tokens = torch.tensor([1, 2, 3]) + + history = TextHistory(text, tokens) + self.assertEqual(history.text, text) + self.assertTrue(torch.equal(history.tokens, tokens)) + self.assertTrue(torch.equal(history.token_masks, torch.zeros_like(tokens))) + + history = TextHistory(text, tokens, system=False) + self.assertTrue(torch.equal(history.token_masks, torch.ones_like(tokens))) + + def test_text_history_append_segment(self): + text = "Hello there!" + tokens = torch.tensor([1, 2, 3]) + + history = TextHistory(text, tokens) + history.append_segment("General Kenobi!", torch.tensor([4, 5, 6]), system=False) + self.assertEqual(history.text, (text + "General Kenobi!")) + self.assertTrue(torch.equal(history.tokens, torch.tensor([1, 2, 3, 4, 5, 6]))) + self.assertTrue(torch.equal(history.token_masks, torch.tensor([0, 0, 0, 1, 1, 1]))) + + history.append_segment("You are a bold one!", torch.tensor([7, 8, 9])) + self.assertEqual(history.text, ((text + "General Kenobi!") + "You are a bold one!")) + self.assertTrue(torch.equal(history.tokens, torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]))) + self.assertTrue(torch.equal(history.token_masks, torch.tensor([0, 0, 0, 1, 1, 1, 0, 0, 0]))) + + def test_text_history_complete(self): + text = "Hello there!" + tokens = torch.tensor([1, 2, 3]) + history = TextHistory(text, tokens) + history.complete() + self.assertTrue(history.completed) + self.assertFalse(history.truncated) + + history.complete(truncated=True) + self.assertTrue(history.completed) + self.assertTrue(history.truncated) + + def test_text_history_last_segment(self): + text = "Hello there!" + tokens = torch.tensor([1, 2, 3]) + history = TextHistory(text, tokens) + history.append_segment("General Kenobi!", torch.tensor([4, 5, 6])) + history.append_segment("You are a bold one!", torch.tensor([7, 8, 9])) + self.assertEqual(history.last_text_segment, "You are a bold one!") + + def test_text_history_split_query_response(self): + text = "Hello there!" + tokens = torch.tensor([1, 2, 3]) + history = TextHistory(text, tokens) + history.append_segment("General Kenobi!", torch.tensor([4, 5, 6]), system=False) + history.append_segment("You are a bold one!", torch.tensor([7, 8, 9]), system=True) + query, response, mask = history.split_query_response_tokens() + + self.assertTrue(torch.equal(query, torch.tensor([1, 2, 3]))) + self.assertTrue(torch.equal(response, torch.tensor([4, 5, 6, 7, 8, 9]))) + self.assertTrue(torch.equal(mask, torch.tensor([1, 1, 1, 0, 0, 0]))) + + +class TextEnvironmentTester(unittest.TestCase): + def setUp(self): + # model_id + self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" + + # get models and tokenizer + self.gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.model_id) + self.gpt2_tokenizer = AutoTokenizer.from_pretrained(self.model_id) + self.gpt2_tokenizer.pad_token = self.gpt2_tokenizer.eos_token + + def test_text_environment_setup(self): + env = TextEnvironment( + self.gpt2_model, + self.gpt2_tokenizer, + tools=[DummyTool()], + reward_fn=lambda x: torch.tensor(1), + prompt="I am a prompt!\n", + ) + self.assertEqual(env.prompt, "I am a prompt!\n") + self.assertListEqual(list(env.tools.keys()), ["DummyTool"]) + self.assertIsInstance(env.tools["DummyTool"], DummyTool) + self.assertEqual(env.reward_fn("Hello there!"), 1) + + def test_text_environment_generate(self): + generation_kwargs = {"do_sample": False, "max_new_tokens": 4, "pad_token_id": self.gpt2_tokenizer.eos_token_id} + env = TextEnvironment( + self.gpt2_model, + self.gpt2_tokenizer, + tools=[DummyTool()], + reward_fn=lambda x: torch.tensor(1), + prompt="I am a prompt!\n", + generation_kwargs=generation_kwargs, + ) + + input_texts = ["this is a test", "this is another, longer test"] + + model_inputs = [self.gpt2_tokenizer(txt, return_tensors="pt").input_ids.squeeze() for txt in input_texts] + + generations_batched = env._generate_batched(model_inputs, batch_size=2) + generations_batched = self.gpt2_tokenizer.batch_decode(generations_batched) + + generations_single = [env._generate_batched([inputs], batch_size=1)[0] for inputs in model_inputs] + generations_single = self.gpt2_tokenizer.batch_decode(generations_single) + + self.assertEqual(generations_single, generations_batched) + + def test_text_environment_tool_call_parsing(self): + string_valid = "Something something Hello there!" + string_invalid_request = "Something something Hello there!" + string_invalid_call = "Something something Hello there!" + string_invalid_tool = "Something something |Tool2|Hello there!" + string_invalid_random = "<>abcdefghijklm<>nopqrstuvwxyz<>" + + env = TextEnvironment( + self.gpt2_model, + self.gpt2_tokenizer, + tools=[DummyTool()], + reward_fn=lambda x: torch.tensor(1), + prompt="I am a prompt!\n", + ) + tool, response = env.parse_tool_call(string_valid) + self.assertEqual(tool, "Tool1") + self.assertEqual(response, "Hello there!") + + tool, response = env.parse_tool_call(string_invalid_request) + self.assertIsNone(tool) + self.assertIsNone(response) + + tool, response = env.parse_tool_call(string_invalid_call) + self.assertIsNone(tool) + self.assertIsNone(response) + + tool, response = env.parse_tool_call(string_invalid_tool) + self.assertIsNone(tool) + self.assertIsNone(response) + + tool, response = env.parse_tool_call(string_invalid_random) + self.assertIsNone(tool) + self.assertIsNone(response) + + def test_text_environment_tool_truncation(self): + env = TextEnvironment( + self.gpt2_model, + self.gpt2_tokenizer, + tools={"dummy": lambda x: "a" * 1000}, + reward_fn=lambda x: torch.tensor(1), + prompt="I am a prompt!\n", + ) + + env.max_tool_response = 100 + history = env.step(TextHistory("Hello there!", torch.tensor([1, 2, 3]))) + self.assertEqual((len(history.last_text_segment) - len(env.response_token)), 100) + + env.max_tool_response = 500 + history = env.step(TextHistory("Hello there!", torch.tensor([1, 2, 3]))) + self.assertEqual((len(history.last_text_segment) - len(env.response_token)), 500) + + env.max_tool_response = 1001 + history = env.step(TextHistory("Hello there!", torch.tensor([1, 2, 3]))) + self.assertEqual((len(history.last_text_segment) - len(env.response_token)), 1000) + + env.max_tool_response = 2000 + history = env.step(TextHistory("Hello there!", torch.tensor([1, 2, 3]))) + self.assertEqual((len(history.last_text_segment) - len(env.response_token)), 1000) + + @patch.object(TextEnvironment, "generate", side_effect=dummy_generate) + def test_text_environment_max_calls(self, mock_generate): + env = TextEnvironment( + self.gpt2_model, + self.gpt2_tokenizer, + tools={"DummyTool": DummyTool()}, + reward_fn=lambda x: [torch.tensor(1) for _ in x], + prompt="I am a prompt!\n", + ) + + env.max_turns = 1 + _, _, _, _, histories = env.run(["test"]) + self.assertEqual( + histories[0].text, + ("I am a prompt!\n" + "test") + (1 * "testtest"), + ) + + env.max_turns = 2 + _, _, _, _, histories = env.run(["test"]) + self.assertEqual( + histories[0].text, + ("I am a prompt!\n" + "test") + (2 * "testtest"), + ) + + env.max_turns = 4 + _, _, _, _, histories = env.run(["test"]) + self.assertEqual( + histories[0].text, + ("I am a prompt!\n" + "test") + (4 * "testtest"), + ) + + def test_text_environment_compute_rewards(self): + env = TextEnvironment( + self.gpt2_model, + self.gpt2_tokenizer, + tools={"DummyTool": DummyTool()}, + reward_fn=lambda x: [torch.tensor(i) for i, _ in enumerate(x)], + prompt="I am a prompt!\n", + ) + + histories = [TextHistory("test", torch.tensor([1, 2, 3])) for _ in range(8)] + histories = env.compute_reward(histories) + + for i in range(8): + self.assertEqual(histories[i].reward, i) + + @patch.object(TextEnvironment, "generate", side_effect=dummy_generate) + def test_text_environment_run(self, mock_generate): + env = TextEnvironment( + self.gpt2_model, + self.gpt2_tokenizer, + tools={"DummyTool": DummyTool()}, + reward_fn=lambda x: [torch.tensor(i) for i, _ in enumerate(x)], + prompt="I am a prompt!\n", + max_turns=2, + ) + task_1 = "Hello there!" + task_2 = "Hello there! General Kenobi!" + + query, response, response_mask, reward, histories = env.run([task_1, task_2]) + self.assertEqual(len(query[0]), 9) + self.assertEqual(len(query[1]), 12) + self.assertEqual(len(response[0]), 14) + self.assertEqual(len(response[1]), 14) + self.assertEqual(response_mask[0].sum(), (2 * 3)) + # mocked generate always adds 3 toknes + self.assertEqual(response_mask[1].sum(), (2 * 3)) + # mocked generate always adds 3 toknes + self.assertEqual(reward[1], 1) + self.assertEqual( + histories[0].text, + ("I am a prompt!\n" + "Hello there!") + (2 * "testtest"), + ) + self.assertEqual( + histories[1].text, + ("I am a prompt!\n" + "Hello there! General Kenobi!") + + (2 * "testtest"), + ) diff --git a/testbed/huggingface__trl/tests/test_iterative_sft_trainer.py b/testbed/huggingface__trl/tests/test_iterative_sft_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..6248a2b90a66af36c74dda9ca7296e558792bf01 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_iterative_sft_trainer.py @@ -0,0 +1,116 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import tempfile +import unittest +from functools import partial + +import torch +from datasets import Dataset +from parameterized import parameterized +from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments + +from trl import IterativeSFTTrainer + + +class IterativeTrainerTester(unittest.TestCase): + def setUp(self): + self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" + self.model = AutoModelForCausalLM.from_pretrained(self.model_id) + self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) + self.tokenizer.pad_token = self.tokenizer.eos_token + + # get t5 as seq2seq example: + model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab-calibrated" + self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) + self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) + + def _init_tensor_dummy_dataset(self): + dummy_dataset_dict = { + "input_ids": [ + torch.tensor([5303, 3621, 3666, 1438, 318]), + torch.tensor([3666, 1438, 318, 3666, 1438, 318]), + torch.tensor([5303, 3621, 3666, 1438, 318]), + ], + "attention_mask": [ + torch.tensor([1, 1, 1, 1, 1]), + torch.tensor([1, 1, 1, 1, 1, 1]), + torch.tensor([1, 1, 1, 1, 1]), + ], + "labels": [ + torch.tensor([5303, 3621, 3666, 1438, 318]), + torch.tensor([3666, 1438, 318, 3666, 1438, 318]), + torch.tensor([5303, 3621, 3666, 1438, 318]), + ], + } + + dummy_dataset = Dataset.from_dict(dummy_dataset_dict) + dummy_dataset.set_format("torch") + return dummy_dataset + + def _init_textual_dummy_dataset(self): + dummy_dataset_dict = { + "texts": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], + "texts_labels": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], + } + + dummy_dataset = Dataset.from_dict(dummy_dataset_dict) + dummy_dataset.set_format("torch") + return dummy_dataset + + @parameterized.expand( + [ + ["gpt2", "tensor"], + ["gpt2", "text"], + ["t5", "tensor"], + ["t5", "text"], + ] + ) + def test_iterative_step_from_tensor(self, model_name, input_name): + with tempfile.TemporaryDirectory() as tmp_dir: + # initialize dataset + if input_name == "tensor": + dummy_dataset = self._init_tensor_dummy_dataset() + inputs = { + "input_ids": dummy_dataset["input_ids"], + "attention_mask": dummy_dataset["attention_mask"], + "labels": dummy_dataset["labels"], + } + else: + dummy_dataset = self._init_textual_dummy_dataset() + inputs = { + "texts": dummy_dataset["texts"], + "texts_labels": dummy_dataset["texts_labels"], + } + + if model_name == "gpt2": + model = self.model + tokenizer = self.tokenizer + else: + model = self.t5_model + tokenizer = self.t5_tokenizer + + training_args = TrainingArguments( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=2, + learning_rate=1e-3, + report_to="none", + ) + iterative_trainer = IterativeSFTTrainer(model=model, args=training_args, processing_class=tokenizer) + iterative_trainer.optimizer.zero_grad = partial(iterative_trainer.optimizer.zero_grad, set_to_none=False) + + iterative_trainer.step(**inputs) + + for param in iterative_trainer.model.parameters(): + self.assertIsNotNone(param.grad) diff --git a/testbed/huggingface__trl/tests/test_modeling_geometric_mixture_wrapper.py b/testbed/huggingface__trl/tests/test_modeling_geometric_mixture_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..227e1019da92e05dc0da206de3c7b31e19ba77bb --- /dev/null +++ b/testbed/huggingface__trl/tests/test_modeling_geometric_mixture_wrapper.py @@ -0,0 +1,65 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +import torch +from transformers import AutoModelForCausalLM, GenerationConfig + +from trl.models.modeling_base import GeometricMixtureWrapper, create_reference_model + + +class TestGeometricMixtureWrapper(unittest.TestCase): + def setUp(self): + self.model = AutoModelForCausalLM.from_pretrained("gpt2") + self.ref_model = create_reference_model(self.model) + self.generation_config = GenerationConfig.from_pretrained("gpt2") + self.mixture_coef = 0.5 + self.wrapper = GeometricMixtureWrapper( + self.model, self.ref_model, self.generation_config, mixture_coef=self.mixture_coef + ) + + def test_forward(self): + input_ids = torch.tensor([[1, 2, 3, 4, 5]]) + attention_mask = torch.ones_like(input_ids) + + output = self.wrapper(input_ids=input_ids, attention_mask=attention_mask) + + self.assertIsNotNone(output) + self.assertTrue(hasattr(output, "logits")) + self.assertEqual(output.logits.shape, (1, 5, self.model.config.vocab_size)) + + def test_mixture_coefficient(self): + input_ids = torch.tensor([[1, 2, 3, 4, 5]]) + attention_mask = torch.ones_like(input_ids) + + with torch.no_grad(): + model_output = self.model(input_ids=input_ids, attention_mask=attention_mask) + ref_model_output = self.ref_model(input_ids=input_ids, attention_mask=attention_mask) + wrapper_output = self.wrapper(input_ids=input_ids, attention_mask=attention_mask) + + expected_logits = torch.nn.functional.log_softmax( + self.mixture_coef * ref_model_output.logits + (1 - self.mixture_coef) * model_output.logits, dim=-1 + ) + + self.assertTrue(torch.allclose(wrapper_output.logits, expected_logits, atol=1e-5)) + + def test_prepare_inputs_for_generation(self): + input_ids = torch.tensor([[1, 2, 3, 4, 5]]) + attention_mask = torch.ones_like(input_ids) + + inputs = self.wrapper.prepare_inputs_for_generation(input_ids, attention_mask=attention_mask, use_cache=True) + + self.assertIn("input_ids", inputs) + self.assertIn("attention_mask", inputs) + self.assertFalse(inputs.get("use_cache", False)) diff --git a/testbed/huggingface__trl/tests/test_sft_trainer.py b/testbed/huggingface__trl/tests/test_sft_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..e615caf0431fe7271ad71089cb0511b962bf1162 --- /dev/null +++ b/testbed/huggingface__trl/tests/test_sft_trainer.py @@ -0,0 +1,1270 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import os +import tempfile +import unittest + +import numpy as np +import torch +from datasets import Dataset, Image, Sequence, load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoProcessor, + AutoTokenizer, + LlavaForConditionalGeneration, + TrainingArguments, + is_vision_available, +) +from transformers.testing_utils import require_peft, require_vision +from transformers.utils import is_peft_available + +from trl import SFTConfig, SFTTrainer +from trl.trainer import ConstantLengthDataset, DataCollatorForCompletionOnlyLM + + +def formatting_prompts_func(example): + text = f"### Question: {example['question']}\n ### Answer: {example['answer']}" + return text + + +def formatting_prompts_func_batched(example): + output_text = [] + for i, question in enumerate(example["question"]): + text = f"### Question: {question}\n ### Answer: {example['answer'][i]}" + output_text.append(text) + return output_text + + +if is_peft_available(): + from peft import LoraConfig, PeftModel + +if is_vision_available(): + from PIL import Image as PILImage + + +class SFTTrainerTester(unittest.TestCase): + r""" """ + + def setUp(self): + self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" + self.model = AutoModelForCausalLM.from_pretrained(self.model_id) + self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) + self.tokenizer.pad_token = self.tokenizer.eos_token + self.dummy_dataset = Dataset.from_dict( + { + "question": [ + "Does llamas know how to code?", + "Does llamas know how to fly?", + "Does llamas know how to talk?", + "Does llamas know how to code?", + "Does llamas know how to fly?", + "Does llamas know how to talk?", + "Does llamas know how to swim?", + ], + "answer": [ + "Yes, llamas are very good at coding.", + "No, llamas can't fly.", + "Yes, llamas are very good at talking.", + "Yes, llamas are very good at coding.", + "No, llamas can't fly.", + "Yes, llamas are very good at talking.", + "No, llamas can't swim.", + ], + "text": [ + "### Question: Does llamas know how to code?\n ### Answer: Yes, llamas are very good at coding.", + "### Question: Does llamas know how to fly?\n ### Answer: No, llamas can't fly.", + "### Question: Does llamas know how to talk?\n ### Answer: Yes, llamas are very good at talking.", + "### Question: Does llamas know how to code?\n ### Answer: Yes, llamas are very good at coding.", + "### Question: Does llamas know how to fly?\n ### Answer: No, llamas can't fly.", + "### Question: Does llamas know how to talk?\n ### Answer: Yes, llamas are very good at talking.", + "### Question: Does llamas know how to swim?\n ### Answer: No, llamas can't swim.", + ], + } + ) + self.conversational_lm_dataset = load_dataset("trl-internal-testing/zen", "conversational_language_modeling") + self.standard_prompt_completion_dataset = load_dataset( + "trl-internal-testing/zen", "standard_prompt_completion" + ) + + if is_vision_available(): + self.dummy_vsft_instruction_dataset = Dataset.from_dict( + { + "messages": [ + [ + { + "role": "user", + "content": [{"type": "text", "text": "What is in this image?"}, {"type": "image"}], + }, + { + "role": "assistant", + "content": [{"type": "text", "text": "It is random noise."}], + }, + { + "role": "user", + "content": [{"type": "text", "text": "Oh ye, you are right, what is 1+1"}], + }, + { + "role": "assistant", + "content": [{"type": "text", "text": "2"}], + }, + ], + [ + { + "role": "user", + "content": [{"type": "text", "text": "What is in this image?"}, {"type": "image"}], + }, + { + "role": "assistant", + "content": [{"type": "text", "text": "It is random noise."}], + }, + ], + ], + "images": [ + [PILImage.fromarray((np.random.rand(40, 50, 3) * 255).astype("uint8")).convert("RGBA")], + [PILImage.fromarray((np.random.rand(50, 60, 3) * 255).astype("uint8")).convert("RGBA")], + ], + } + ) + self.dummy_vsft_instruction_dataset.cast_column("images", Sequence(Image())) + self.dummy_vsft_instruction_dataset = self.dummy_vsft_instruction_dataset.cast_column( + "images", Sequence(Image()) + ) + + self.train_dataset = ConstantLengthDataset( + self.tokenizer, + self.dummy_dataset, + formatting_func=formatting_prompts_func, + seq_length=16, + num_of_sequences=16, + ) + + self.eval_dataset = ConstantLengthDataset( + self.tokenizer, + self.dummy_dataset, + formatting_func=formatting_prompts_func, + seq_length=16, + num_of_sequences=16, + ) + + def test_constant_length_dataset(self): + formatted_dataset = ConstantLengthDataset( + self.tokenizer, + self.dummy_dataset, + formatting_func=formatting_prompts_func, + ) + + self.assertEqual(len(formatted_dataset), len(self.dummy_dataset)) + self.assertGreater(len(formatted_dataset), 0) + + for example in formatted_dataset: + self.assertIn("input_ids", example) + self.assertIn("labels", example) + + self.assertEqual(len(example["input_ids"]), formatted_dataset.seq_length) + self.assertEqual(len(example["labels"]), formatted_dataset.seq_length) + + decoded_text = self.tokenizer.decode(example["input_ids"]) + self.assertTrue(("Question" in decoded_text) and ("Answer" in decoded_text)) + + def test_sft_trainer_backward_compatibility(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = TrainingArguments( + output_dir=tmp_dir, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + hub_token="not_a_real_token", + report_to="none", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + formatting_func=formatting_prompts_func, + ) + + self.assertEqual(trainer.args.hub_token, training_args.hub_token) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + def test_sft_trainer(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + packing=True, + report_to="none", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + def test_sft_trainer_uncorrect_data(self): + with tempfile.TemporaryDirectory() as tmp_dir: + # Shoud work as SFTTrainer natively supports conversational lm dataset + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + max_seq_length=32, # make sure there is at least 1 packed sequence + num_of_sequences=32, + packing=True, + report_to="none", + ) + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.conversational_lm_dataset["train"], + ) + + # Same, but without packing + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + packing=False, + report_to="none", + ) + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.conversational_lm_dataset["train"], + ) + + # Same, but with packing with `max_seq_length` + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + max_seq_length=16, # make sure there is at least 1 packed sequence + packing=True, + report_to="none", + ) + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.standard_prompt_completion_dataset["train"], + ) + + # Same but with prompt completion dataset + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + packing=False, + report_to="none", + ) + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.standard_prompt_completion_dataset["train"], + ) + + # Should work as dummy dataset are supported with a formatting function + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + max_seq_length=32, # make sure there is at least 1 packed sequence + packing=True, + report_to="none", + ) + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + formatting_func=formatting_prompts_func, + ) + + # This should not work because not enough data for one sample + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + max_seq_length=1024, # make sure there is NOT at least 1 packed sequence + packing=True, + report_to="none", + ) + with self.assertRaises(ValueError): + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + formatting_func=formatting_prompts_func, + ) + + # This should not work as well + with self.assertRaises(ValueError): + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + packing=False, + report_to="none", + ) + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + formatting_func=formatting_prompts_func, + ) + + # but this should work + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + packing=False, + report_to="none", + ) + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + formatting_func=formatting_prompts_func_batched, + ) + + def test_sft_trainer_with_model_num_train_epochs(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=2, + eval_steps=1, + save_steps=1, + num_train_epochs=2, + per_device_train_batch_size=2, + packing=True, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + save_steps=1, + num_train_epochs=2, + per_device_train_batch_size=2, + max_seq_length=16, + num_of_sequences=16, + packing=True, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + save_steps=1, + num_train_epochs=2, + per_device_train_batch_size=2, + max_seq_length=16, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-1")) + + def test_sft_trainer_with_model(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + packing=True, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + save_steps=1, + per_device_train_batch_size=2, + max_seq_length=16, + num_of_sequences=16, + packing=True, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + # with formatting_func + packed + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + save_steps=1, + per_device_train_batch_size=2, + max_seq_length=16, + num_of_sequences=16, + packing=True, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + formatting_func=formatting_prompts_func, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + # with formatting_func + packed + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + save_steps=1, + per_device_train_batch_size=2, + max_seq_length=16, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + formatting_func=formatting_prompts_func_batched, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=2, + save_steps=1, + per_device_train_batch_size=2, + max_seq_length=16, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-1")) + + def test_sft_trainer_with_multiple_eval_datasets(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=1, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + packing=True, + report_to="none", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset={ + "data1": self.eval_dataset, + "data2": self.eval_dataset, + }, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_data1_loss"]) + self.assertIsNotNone(trainer.state.log_history[1]["eval_data2_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-1")) + + def test_data_collator_completion_lm(self): + response_template = "### Response:\n" + data_collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=self.tokenizer, mlm=False) + + text = """\n\n### Instructions:\nHello all this should be masked\n\n### Response:\nI have not been masked correctly.""" + encoded_text = self.tokenizer(text) + + examples = [encoded_text] + + batch = data_collator(examples) + labels = batch["labels"] + last_pad_idx = np.where(labels == -100)[1][-1] + result_text = self.tokenizer.decode(batch["input_ids"][0, last_pad_idx + 1 :]) + self.assertEqual(result_text, "I have not been masked correctly.") + + def test_data_collator_completion_lm_with_multiple_text(self): + tokenizer = copy.deepcopy(self.tokenizer) + tokenizer.padding_side = "left" + + response_template = "### Response:\n" + data_collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer, mlm=False) + + text1 = """\n\n### Instructions:\nHello all this should be masked\n\n### Response:\nI have not been masked correctly.""" + text2 = """\n\n### Instructions:\nThis is another longer text that should also be masked. This text is significantly longer than the previous one.\n\n### Response:\nI have not been masked correctly.""" + + encoded_text1 = tokenizer(text1) + encoded_text2 = tokenizer(text2) + + examples = [encoded_text1, encoded_text2] + + batch = data_collator(examples) + + for i in range(2): + labels = batch["labels"][i] + last_pad_idx = np.where(labels == -100)[0][-1] + result_text = tokenizer.decode(batch["input_ids"][i, last_pad_idx + 1 :]) + self.assertEqual(result_text, "I have not been masked correctly.") + + def test_data_collator_chat_completion_lm(self): + instruction_template = "### Human:" + assistant_template = "### Assistant:" + data_collator = DataCollatorForCompletionOnlyLM( + response_template=assistant_template, + instruction_template=instruction_template, + tokenizer=self.tokenizer, + mlm=False, + ) + + text = """### Human: Hello all this should be masked.### Assistant: I should not be masked.### Human: All this should be masked too.### Assistant: I should not be masked too.""" + encoded_text = self.tokenizer(text) + + examples = [encoded_text] + + batch = data_collator(examples) + labels = batch["labels"] + non_masked_tokens = batch["input_ids"][labels != -100] + result_text = self.tokenizer.decode(non_masked_tokens) + self.assertEqual(result_text, " I should not be masked. I should not be masked too.") + + def test_data_collator_chat_completion_lm_with_multiple_text(self): + tokenizer = copy.deepcopy(self.tokenizer) + tokenizer.padding_side = "left" + + instruction_template = "### Human:" + assistant_template = "### Assistant:" + data_collator = DataCollatorForCompletionOnlyLM( + response_template=assistant_template, + instruction_template=instruction_template, + tokenizer=tokenizer, + mlm=False, + ) + + text1 = """### Human: Hello all this should be masked.### Assistant: I should not be masked.""" + text2 = """### Human: Hello all this should be masked.### Assistant: I should not be masked.### Human: All this should be masked too.### Assistant: I should not be masked too.""" + encoded_text1 = tokenizer(text1) + encoded_text2 = tokenizer(text2) + + examples = [encoded_text1, encoded_text2] + + batch = data_collator(examples) + labels = batch["labels"] + input_ids = batch["input_ids"] + + non_masked_tokens1 = input_ids[0][labels[0] != -100] + result_text1 = tokenizer.decode(non_masked_tokens1) + self.assertEqual(result_text1, " I should not be masked.") + + non_masked_tokens2 = input_ids[1][labels[1] != -100] + result_text2 = tokenizer.decode(non_masked_tokens2) + self.assertEqual(result_text2, " I should not be masked. I should not be masked too.") + + def test_sft_trainer_infinite_with_model(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=5, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + packing=True, + max_seq_length=500, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + self.assertTrue(trainer.train_dataset.infinite) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + # make sure the trainer did 5 steps + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-5")) + + def test_sft_trainer_infinite_with_model_epochs(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + num_train_epochs=1, + per_device_train_batch_size=2, + save_strategy="epoch", + packing=True, + max_seq_length=500, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + self.assertFalse(trainer.train_dataset.infinite) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + + # make sure the trainer did 5 steps + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-4")) + + def test_sft_trainer_with_model_neftune(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=2, + eval_steps=1, + save_steps=1, + per_device_train_batch_size=2, + neftune_noise_alpha=5, + packing=True, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + trainer.model = trainer._activate_neftune(trainer.model) + + device = trainer.model.get_input_embeddings().weight.device + trainer.model.train() + + torch.random.manual_seed(42) + embeds_neftune = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) + + torch.random.manual_seed(24) + embeds_neftune_2 = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) + + self.assertFalse(torch.allclose(embeds_neftune, embeds_neftune_2)) + self.assertGreater(len(trainer.model.get_input_embeddings()._forward_hooks), 0) + + trainer.neftune_hook_handle.remove() + + trainer.train() + + # Make sure forward pass works fine + _ = trainer.model(torch.LongTensor([[1, 0, 1]]).to(device)) + self.assertEqual(len(trainer.model.get_input_embeddings()._forward_hooks), 0) + + @require_peft + def test_peft_sft_trainer_str(self): + with tempfile.TemporaryDirectory() as tmp_dir: + peft_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + training_args = SFTConfig( + packing=True, + output_dir=tmp_dir, + report_to="none", + ) + + _ = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + peft_config=peft_config, + ) + + @require_peft + def test_peft_sft_trainer(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + packing=True, + report_to="none", + ) + + peft_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + peft_config=peft_config, + ) + + self.assertTrue(isinstance(trainer.model, PeftModel)) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + self.assertIn("adapter_model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + self.assertIn("adapter_config.json", os.listdir(tmp_dir + "/checkpoint-2")) + self.assertNotIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + @require_peft + def test_peft_sft_trainer_gc(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + gradient_checkpointing=True, + packing=True, + report_to="none", + ) + + peft_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + peft_config=peft_config, + ) + + self.assertIsInstance(trainer.model, PeftModel) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + self.assertIn("adapter_model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + self.assertIn("adapter_config.json", os.listdir(tmp_dir + "/checkpoint-2")) + self.assertNotIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + @require_peft + def test_peft_sft_trainer_neftune(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + neftune_noise_alpha=5, + packing=True, + report_to="none", + ) + + peft_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + peft_config=peft_config, + ) + + trainer.model = trainer._activate_neftune(trainer.model) + + self.assertIsInstance(trainer.model, PeftModel) + + device = trainer.model.get_input_embeddings().weight.device + trainer.model.train() + + torch.random.manual_seed(42) + embeds_neftune = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) + + torch.random.manual_seed(24) + embeds_neftune_2 = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) + + self.assertFalse(torch.allclose(embeds_neftune, embeds_neftune_2)) + self.assertGreater(len(trainer.model.get_input_embeddings()._forward_hooks), 0) + + trainer.neftune_hook_handle.remove() + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + self.assertIn("adapter_model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + self.assertIn("adapter_config.json", os.listdir(tmp_dir + "/checkpoint-2")) + self.assertNotIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + # Make sure forward pass works fine to check if embeddings forward is not broken. + _ = trainer.model(torch.LongTensor([[1, 0, 1]]).to(device)) + self.assertEqual(len(trainer.model.get_input_embeddings()._forward_hooks), 0) + + @require_peft + def test_peft_sft_trainer_tag(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + gradient_checkpointing=True, + packing=True, + report_to="none", + ) + + peft_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + peft_config=peft_config, + ) + + for tag in ["sft", "trl"]: + self.assertIn(tag, trainer.model.model_tags) + + @require_peft + def test_sft_trainer_tag(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + gradient_checkpointing=True, + packing=True, + report_to="none", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + for tag in ["sft", "trl"]: + self.assertIn(tag, trainer.model.model_tags) + + def test_sft_trainer_only_train_packing(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + gradient_checkpointing=True, + packing=True, + max_seq_length=16, # make sure there is at least 1 packed sequence + eval_packing=False, + report_to="none", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.conversational_lm_dataset["train"], + eval_dataset=self.conversational_lm_dataset["test"], + ) + + self.assertEqual(len(trainer.train_dataset["input_ids"]), 21) + self.assertEqual(len(trainer.eval_dataset["input_ids"]), len(self.conversational_lm_dataset["test"])) + + def test_sft_trainer_eval_packing(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + gradient_checkpointing=True, + max_seq_length=16, # make sure there is at least 1 packed sequence + packing=True, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.conversational_lm_dataset["train"], + eval_dataset=self.conversational_lm_dataset["test"], + ) + + self.assertEqual(len(trainer.train_dataset["input_ids"]), 21) + self.assertEqual(len(trainer.eval_dataset["input_ids"]), 2) + + def test_sft_trainer_no_packing(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + gradient_checkpointing=True, + max_seq_length=16, # make sure there is at least 1 packed sequence + packing=False, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.conversational_lm_dataset["train"], + eval_dataset=self.conversational_lm_dataset["test"], + ) + + self.assertEqual(len(trainer.train_dataset["input_ids"]), len(self.conversational_lm_dataset["train"])) + self.assertEqual(len(trainer.eval_dataset["input_ids"]), len(self.conversational_lm_dataset["test"])) + + @require_vision + def test_sft_trainer_skip_prepare_dataset(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + gradient_checkpointing=True, + remove_unused_columns=False, + dataset_kwargs={"skip_prepare_dataset": True}, + report_to="none", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.dummy_vsft_instruction_dataset, + eval_dataset=self.dummy_vsft_instruction_dataset, + ) + self.assertEqual(trainer.train_dataset.features, self.dummy_vsft_instruction_dataset.features) + self.assertEqual(trainer.eval_dataset.features, self.dummy_vsft_instruction_dataset.features) + + def test_sft_trainer_skip_prepare_dataset_with_no_packing(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + gradient_checkpointing=True, + remove_unused_columns=False, + packing=False, + dataset_kwargs={"skip_prepare_dataset": True}, + report_to="none", + ) + + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.dummy_dataset, + ) + self.assertEqual(trainer.train_dataset.features, self.dummy_dataset.features) + + @require_vision + def test_sft_trainer_llava(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + dataloader_drop_last=True, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + per_device_eval_batch_size=2, + remove_unused_columns=False, + dataset_kwargs={"skip_prepare_dataset": True}, + report_to="none", + ) + tiny_llava = LlavaForConditionalGeneration.from_pretrained( + "trl-internal-testing/tiny-random-LlavaForConditionalGeneration" + ) + processor = AutoProcessor.from_pretrained("trl-internal-testing/tiny-random-LlavaForConditionalGeneration") + + processor.chat_template = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" + + def collate_fn(examples): + # Get the texts and images, and apply the chat template + texts = [processor.apply_chat_template(example["messages"], tokenize=False) for example in examples] + images = [example["images"][0] for example in examples] + + # Tokenize the texts and process the images + batch = processor(texts, images, return_tensors="pt", padding=True) + + # The labels are the input_ids, and we mask the padding tokens in the loss computation + labels = batch["input_ids"].clone() + labels[labels == processor.tokenizer.pad_token_id] = -100 + batch["labels"] = labels + + return batch + + trainer = SFTTrainer( + model=tiny_llava, + args=training_args, + data_collator=collate_fn, + train_dataset=self.dummy_vsft_instruction_dataset, + eval_dataset=self.dummy_vsft_instruction_dataset, + ) + + trainer.train() + + self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) + self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) + + self.assertIn("model.safetensors", os.listdir(tmp_dir + "/checkpoint-2")) + + def test_sft_trainer_torch_dtype(self): + # See https://github.com/huggingface/trl/issues/1751 + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + model_init_kwargs={"torch_dtype": torch.float16}, + report_to="none", + ) + trainer = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + formatting_func=formatting_prompts_func, + ) + self.assertEqual(trainer.model.config.torch_dtype, torch.float16) + + # Now test when `torch_dtype` is provided but is wrong + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = SFTConfig( + output_dir=tmp_dir, + eval_strategy="steps", + max_steps=4, + eval_steps=2, + save_steps=2, + per_device_train_batch_size=2, + model_init_kwargs={"torch_dtype": -1}, + report_to="none", + ) + with self.assertRaises(ValueError) as context: + _ = SFTTrainer( + model=self.model_id, + args=training_args, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + ) + + self.assertIn( + "Invalid `torch_dtype` passed to the SFTConfig. Expected a string with either `torch.dtype` or 'auto', but got -1.", + str(context.exception), + )