ZTWHHH commited on
Commit
1a81ff5
·
verified ·
1 Parent(s): c84a673

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. parrot/lib/libbz2.so +3 -0
  3. parrot/lib/libz.so +3 -0
  4. parrot/lib/python3.10/idlelib/Icons/idle_16.gif +3 -0
  5. parrot/lib/python3.10/idlelib/Icons/tk.gif +3 -0
  6. parrot/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/einops/__pycache__/einops.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/einops/__pycache__/packing.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/einops/layers/__pycache__/__init__.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/einops/layers/__pycache__/gluon.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/INSTALLER +1 -0
  14. parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/METADATA +176 -0
  15. parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/RECORD +77 -0
  16. parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/REQUESTED +0 -0
  17. parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/WHEEL +4 -0
  18. parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/entry_points.txt +2 -0
  19. parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/licenses/COPYING +19 -0
  20. parrot/lib/python3.10/site-packages/timm/__init__.py +4 -0
  21. parrot/lib/python3.10/site-packages/timm/layers/activations_jit.py +90 -0
  22. parrot/lib/python3.10/site-packages/timm/layers/bottleneck_attn.py +157 -0
  23. parrot/lib/python3.10/site-packages/timm/layers/classifier.py +208 -0
  24. parrot/lib/python3.10/site-packages/timm/layers/create_norm.py +57 -0
  25. parrot/lib/python3.10/site-packages/timm/layers/drop.py +169 -0
  26. parrot/lib/python3.10/site-packages/timm/layers/global_context.py +67 -0
  27. parrot/lib/python3.10/site-packages/timm/layers/halo_attn.py +233 -0
  28. parrot/lib/python3.10/site-packages/timm/layers/interpolate.py +68 -0
  29. parrot/lib/python3.10/site-packages/timm/layers/mixed_conv2d.py +51 -0
  30. parrot/lib/python3.10/site-packages/timm/layers/padding.py +79 -0
  31. parrot/lib/python3.10/site-packages/timm/layers/pos_embed_sincos.py +444 -0
  32. parrot/lib/python3.10/site-packages/timm/layers/split_batchnorm.py +75 -0
  33. parrot/lib/python3.10/site-packages/timm/layers/trace_utils.py +13 -0
  34. parrot/lib/python3.10/site-packages/timm/optim/__init__.py +17 -0
  35. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/__init__.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/adabelief.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/adafactor.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/adamp.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/lars.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/lion.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/madgrad.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/nadamw.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/nvnovograd.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/rmsprop_tf.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/timm/optim/__pycache__/sgdp.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/timm/optim/adabelief.py +201 -0
  50. parrot/lib/python3.10/site-packages/timm/optim/adafactor.py +167 -0
.gitattributes CHANGED
@@ -73,3 +73,5 @@ parrot/bin/unxz filter=lfs diff=lfs merge=lfs -text
73
  parrot/lib/libncurses.so.6 filter=lfs diff=lfs merge=lfs -text
74
  parrot/bin/xzcat filter=lfs diff=lfs merge=lfs -text
75
  parrot/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text
 
 
 
73
  parrot/lib/libncurses.so.6 filter=lfs diff=lfs merge=lfs -text
74
  parrot/bin/xzcat filter=lfs diff=lfs merge=lfs -text
75
  parrot/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text
76
+ parrot/lib/libbz2.so filter=lfs diff=lfs merge=lfs -text
77
+ parrot/lib/libz.so filter=lfs diff=lfs merge=lfs -text
parrot/lib/libbz2.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4979469ae49ac144f62202f75bbdd69b17197aedb879d633337c8cf7e4aba301
3
+ size 229016
parrot/lib/libz.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b0e682a9dc7fd4895a6783288f851b793dc89633f28714027974fa4d66f3914
3
+ size 124744
parrot/lib/python3.10/idlelib/Icons/idle_16.gif ADDED

Git LFS Details

  • SHA256: fe3af292b38660a8a58b1a8b4fa4240aa190602e7e9a700ea0536b3181fc968e
  • Pointer size: 128 Bytes
  • Size of remote file: 634 Bytes
parrot/lib/python3.10/idlelib/Icons/tk.gif ADDED

Git LFS Details

  • SHA256: 7f16cb2e322891dbd9101302c09ffda0c2a3a72d053bb8c0927d507414c59cad
  • Pointer size: 127 Bytes
  • Size of remote file: 72 Bytes
parrot/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (695 Bytes). View file
 
parrot/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
parrot/lib/python3.10/site-packages/einops/__pycache__/einops.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
parrot/lib/python3.10/site-packages/einops/__pycache__/packing.cpython-310.pyc ADDED
Binary file (6.56 kB). View file
 
parrot/lib/python3.10/site-packages/einops/layers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
parrot/lib/python3.10/site-packages/einops/layers/__pycache__/gluon.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
parrot/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/METADATA ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.3
2
+ Name: jsonschema
3
+ Version: 4.23.0
4
+ Summary: An implementation of JSON Schema validation for Python
5
+ Project-URL: Homepage, https://github.com/python-jsonschema/jsonschema
6
+ Project-URL: Documentation, https://python-jsonschema.readthedocs.io/
7
+ Project-URL: Issues, https://github.com/python-jsonschema/jsonschema/issues/
8
+ Project-URL: Funding, https://github.com/sponsors/Julian
9
+ Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-jsonschema?utm_source=pypi-jsonschema&utm_medium=referral&utm_campaign=pypi-link
10
+ Project-URL: Changelog, https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst
11
+ Project-URL: Source, https://github.com/python-jsonschema/jsonschema
12
+ Author-email: Julian Berman <Julian+jsonschema@GrayVines.com>
13
+ License: MIT
14
+ License-File: COPYING
15
+ Keywords: data validation,json,json schema,jsonschema,validation
16
+ Classifier: Development Status :: 5 - Production/Stable
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: License :: OSI Approved :: MIT License
19
+ Classifier: Operating System :: OS Independent
20
+ Classifier: Programming Language :: Python
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: 3.10
24
+ Classifier: Programming Language :: Python :: 3.11
25
+ Classifier: Programming Language :: Python :: 3.12
26
+ Classifier: Programming Language :: Python :: 3.13
27
+ Classifier: Programming Language :: Python :: Implementation :: CPython
28
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
29
+ Classifier: Topic :: File Formats :: JSON
30
+ Classifier: Topic :: File Formats :: JSON :: JSON Schema
31
+ Requires-Python: >=3.8
32
+ Requires-Dist: attrs>=22.2.0
33
+ Requires-Dist: importlib-resources>=1.4.0; python_version < '3.9'
34
+ Requires-Dist: jsonschema-specifications>=2023.03.6
35
+ Requires-Dist: pkgutil-resolve-name>=1.3.10; python_version < '3.9'
36
+ Requires-Dist: referencing>=0.28.4
37
+ Requires-Dist: rpds-py>=0.7.1
38
+ Provides-Extra: format
39
+ Requires-Dist: fqdn; extra == 'format'
40
+ Requires-Dist: idna; extra == 'format'
41
+ Requires-Dist: isoduration; extra == 'format'
42
+ Requires-Dist: jsonpointer>1.13; extra == 'format'
43
+ Requires-Dist: rfc3339-validator; extra == 'format'
44
+ Requires-Dist: rfc3987; extra == 'format'
45
+ Requires-Dist: uri-template; extra == 'format'
46
+ Requires-Dist: webcolors>=1.11; extra == 'format'
47
+ Provides-Extra: format-nongpl
48
+ Requires-Dist: fqdn; extra == 'format-nongpl'
49
+ Requires-Dist: idna; extra == 'format-nongpl'
50
+ Requires-Dist: isoduration; extra == 'format-nongpl'
51
+ Requires-Dist: jsonpointer>1.13; extra == 'format-nongpl'
52
+ Requires-Dist: rfc3339-validator; extra == 'format-nongpl'
53
+ Requires-Dist: rfc3986-validator>0.1.0; extra == 'format-nongpl'
54
+ Requires-Dist: uri-template; extra == 'format-nongpl'
55
+ Requires-Dist: webcolors>=24.6.0; extra == 'format-nongpl'
56
+ Description-Content-Type: text/x-rst
57
+
58
+ ==========
59
+ jsonschema
60
+ ==========
61
+
62
+ |PyPI| |Pythons| |CI| |ReadTheDocs| |Precommit| |Zenodo|
63
+
64
+ .. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema.svg
65
+ :alt: PyPI version
66
+ :target: https://pypi.org/project/jsonschema/
67
+
68
+ .. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema.svg
69
+ :alt: Supported Python versions
70
+ :target: https://pypi.org/project/jsonschema/
71
+
72
+ .. |CI| image:: https://github.com/python-jsonschema/jsonschema/workflows/CI/badge.svg
73
+ :alt: Build status
74
+ :target: https://github.com/python-jsonschema/jsonschema/actions?query=workflow%3ACI
75
+
76
+ .. |ReadTheDocs| image:: https://readthedocs.org/projects/python-jsonschema/badge/?version=stable&style=flat
77
+ :alt: ReadTheDocs status
78
+ :target: https://python-jsonschema.readthedocs.io/en/stable/
79
+
80
+ .. |Precommit| image:: https://results.pre-commit.ci/badge/github/python-jsonschema/jsonschema/main.svg
81
+ :alt: pre-commit.ci status
82
+ :target: https://results.pre-commit.ci/latest/github/python-jsonschema/jsonschema/main
83
+
84
+ .. |Zenodo| image:: https://zenodo.org/badge/3072629.svg
85
+ :alt: Zenodo DOI
86
+ :target: https://zenodo.org/badge/latestdoi/3072629
87
+
88
+
89
+ ``jsonschema`` is an implementation of the `JSON Schema <https://json-schema.org>`_ specification for Python.
90
+
91
+ .. code:: python
92
+
93
+ >>> from jsonschema import validate
94
+
95
+ >>> # A sample schema, like what we'd get from json.load()
96
+ >>> schema = {
97
+ ... "type" : "object",
98
+ ... "properties" : {
99
+ ... "price" : {"type" : "number"},
100
+ ... "name" : {"type" : "string"},
101
+ ... },
102
+ ... }
103
+
104
+ >>> # If no exception is raised by validate(), the instance is valid.
105
+ >>> validate(instance={"name" : "Eggs", "price" : 34.99}, schema=schema)
106
+
107
+ >>> validate(
108
+ ... instance={"name" : "Eggs", "price" : "Invalid"}, schema=schema,
109
+ ... ) # doctest: +IGNORE_EXCEPTION_DETAIL
110
+ Traceback (most recent call last):
111
+ ...
112
+ ValidationError: 'Invalid' is not of type 'number'
113
+
114
+ It can also be used from the command line by installing `check-jsonschema <https://github.com/python-jsonschema/check-jsonschema>`_.
115
+
116
+ Features
117
+ --------
118
+
119
+ * Full support for `Draft 2020-12 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft202012Validator>`_, `Draft 2019-09 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft201909Validator>`_, `Draft 7 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft7Validator>`_, `Draft 6 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft6Validator>`_, `Draft 4 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft4Validator>`_ and `Draft 3 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft3Validator>`_
120
+
121
+ * `Lazy validation <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/protocols/#jsonschema.protocols.Validator.iter_errors>`_ that can iteratively report *all* validation errors.
122
+
123
+ * `Programmatic querying <https://python-jsonschema.readthedocs.io/en/latest/errors/>`_ of which properties or items failed validation.
124
+
125
+
126
+ Installation
127
+ ------------
128
+
129
+ ``jsonschema`` is available on `PyPI <https://pypi.org/project/jsonschema/>`_. You can install using `pip <https://pip.pypa.io/en/stable/>`_:
130
+
131
+ .. code:: bash
132
+
133
+ $ pip install jsonschema
134
+
135
+
136
+ Extras
137
+ ======
138
+
139
+ Two extras are available when installing the package, both currently related to ``format`` validation:
140
+
141
+ * ``format``
142
+ * ``format-nongpl``
143
+
144
+ They can be used when installing in order to include additional dependencies, e.g.:
145
+
146
+ .. code:: bash
147
+
148
+ $ pip install jsonschema'[format]'
149
+
150
+ Be aware that the mere presence of these dependencies – or even the specification of ``format`` checks in a schema – do *not* activate format checks (as per the specification).
151
+ Please read the `format validation documentation <https://python-jsonschema.readthedocs.io/en/latest/validate/#validating-formats>`_ for further details.
152
+
153
+ About
154
+ -----
155
+
156
+ I'm Julian Berman.
157
+
158
+ ``jsonschema`` is on `GitHub <https://github.com/python-jsonschema/jsonschema>`_.
159
+
160
+ Get in touch, via GitHub or otherwise, if you've got something to contribute, it'd be most welcome!
161
+
162
+ You can also generally find me on Libera (nick: ``Julian``) in various channels, including ``#python``.
163
+
164
+ If you feel overwhelmingly grateful, you can also `sponsor me <https://github.com/sponsors/Julian/>`_.
165
+
166
+ And for companies who appreciate ``jsonschema`` and its continued support and growth, ``jsonschema`` is also now supportable via `TideLift <https://tidelift.com/subscription/pkg/pypi-jsonschema?utm_source=pypi-jsonschema&utm_medium=referral&utm_campaign=readme>`_.
167
+
168
+
169
+ Release Information
170
+ -------------------
171
+
172
+ v4.23.0
173
+ =======
174
+
175
+ * Do not reorder dictionaries (schemas, instances) that are printed as part of validation errors.
176
+ * Declare support for Py3.13
parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/RECORD ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/jsonschema,sha256=ET9DZtsyLI1qBFCWG8TO9oRznNElzlWQBAszBzgnofg,225
2
+ jsonschema-4.23.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ jsonschema-4.23.0.dist-info/METADATA,sha256=Hd96gAfdO0v5RpFeT25qjyo7PvhASy56F4Jw3FUUTlo,7906
4
+ jsonschema-4.23.0.dist-info/RECORD,,
5
+ jsonschema-4.23.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ jsonschema-4.23.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
7
+ jsonschema-4.23.0.dist-info/entry_points.txt,sha256=vO7rX4Fs_xIVJy2pnAtKgTSxfpnozAVQ0DjCmpMxnWE,51
8
+ jsonschema-4.23.0.dist-info/licenses/COPYING,sha256=T5KgFaE8TRoEC-8BiqE0MLTxvHO0Gxa7hGw0Z2bedDk,1057
9
+ jsonschema/__init__.py,sha256=LkPwscySlJ9lTOp7ZB1M7jQ8mbG7-bYG41iBwbZ-o9s,3941
10
+ jsonschema/__main__.py,sha256=iLsZf2upUB3ilBKTlMnyK-HHt2Cnnfkwwxi_c6gLvSA,115
11
+ jsonschema/__pycache__/__init__.cpython-310.pyc,,
12
+ jsonschema/__pycache__/__main__.cpython-310.pyc,,
13
+ jsonschema/__pycache__/_format.cpython-310.pyc,,
14
+ jsonschema/__pycache__/_keywords.cpython-310.pyc,,
15
+ jsonschema/__pycache__/_legacy_keywords.cpython-310.pyc,,
16
+ jsonschema/__pycache__/_types.cpython-310.pyc,,
17
+ jsonschema/__pycache__/_typing.cpython-310.pyc,,
18
+ jsonschema/__pycache__/_utils.cpython-310.pyc,,
19
+ jsonschema/__pycache__/cli.cpython-310.pyc,,
20
+ jsonschema/__pycache__/exceptions.cpython-310.pyc,,
21
+ jsonschema/__pycache__/protocols.cpython-310.pyc,,
22
+ jsonschema/__pycache__/validators.cpython-310.pyc,,
23
+ jsonschema/_format.py,sha256=F_MA52IkrhOIxDqD8x-01bH37mG5nh0kyNrWUSLtWb8,14591
24
+ jsonschema/_keywords.py,sha256=r8_DrqAfn6QLwQnmXEggveiSU-UaIL2p2nuPINelfFc,14949
25
+ jsonschema/_legacy_keywords.py,sha256=2tWuwRPWbYS7EAl8wBIC_rabGuv1J4dfYLqNEPpShhA,15191
26
+ jsonschema/_types.py,sha256=HQ5QD_oL85zF1FSW2v-5rvfYF0967HJdxSR88kzw2mY,5367
27
+ jsonschema/_typing.py,sha256=NZhPhkBOn9INYZk8G69rDeuRamztgXCMLh10z9cfT6g,610
28
+ jsonschema/_utils.py,sha256=ODga3vrJ6K2wMGxerpgn4ipc9q7ZSqBsvwKU4embLEE,10642
29
+ jsonschema/benchmarks/__init__.py,sha256=A0sQrxDBVHSyQ-8ru3L11hMXf3q9gVuB9x_YgHb4R9M,70
30
+ jsonschema/benchmarks/__pycache__/__init__.cpython-310.pyc,,
31
+ jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-310.pyc,,
32
+ jsonschema/benchmarks/__pycache__/contains.cpython-310.pyc,,
33
+ jsonschema/benchmarks/__pycache__/issue232.cpython-310.pyc,,
34
+ jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-310.pyc,,
35
+ jsonschema/benchmarks/__pycache__/nested_schemas.cpython-310.pyc,,
36
+ jsonschema/benchmarks/__pycache__/subcomponents.cpython-310.pyc,,
37
+ jsonschema/benchmarks/__pycache__/unused_registry.cpython-310.pyc,,
38
+ jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-310.pyc,,
39
+ jsonschema/benchmarks/__pycache__/useless_keywords.cpython-310.pyc,,
40
+ jsonschema/benchmarks/__pycache__/validator_creation.cpython-310.pyc,,
41
+ jsonschema/benchmarks/const_vs_enum.py,sha256=DVFi3WDqBalZFOibnjpX1uTSr3Rxa2cPgFcowd7Ukrs,830
42
+ jsonschema/benchmarks/contains.py,sha256=gexQoUrCOwECofbt19BeosQZ7WFL6PDdkX49DWwBlOg,786
43
+ jsonschema/benchmarks/issue232.py,sha256=3LLYLIlBGQnVuyyo2iAv-xky5P6PRFHANx4-zIIQOoE,521
44
+ jsonschema/benchmarks/issue232/issue.json,sha256=eaPOZjMRu5u8RpKrsA9uk7ucPZS5tkKG4D_hkOTQ3Hk,117105
45
+ jsonschema/benchmarks/json_schema_test_suite.py,sha256=PvfabpUYcF4_7csYDTcTauED8rnFEGYbdY5RqTXD08s,320
46
+ jsonschema/benchmarks/nested_schemas.py,sha256=mo07dx-CIgmSOI62CNs4g5xu1FzHklLBpkQoDxWYcKs,1892
47
+ jsonschema/benchmarks/subcomponents.py,sha256=fEyiMzsWeK2pd7DEGCuuY-vzGunwhHczRBWEnBRLKIo,1113
48
+ jsonschema/benchmarks/unused_registry.py,sha256=hwRwONc9cefPtYzkoX_TYRO3GyUojriv0-YQaK3vnj0,940
49
+ jsonschema/benchmarks/useless_applicator_schemas.py,sha256=EVm5-EtOEFoLP_Vt2j4SrCwlx05NhPqNuZQ6LIMP1Dc,3342
50
+ jsonschema/benchmarks/useless_keywords.py,sha256=bj_zKr1oVctFlqyZaObCsYTgFjiiNgPzC0hr1Y868mE,867
51
+ jsonschema/benchmarks/validator_creation.py,sha256=UkUQlLAnussnr_KdCIdad6xx2pXxQLmYtsXoiirKeWQ,285
52
+ jsonschema/cli.py,sha256=SGy9JPg02mgXhNxugU8iXhYNivfSjBhKTNAgV90ty-M,8551
53
+ jsonschema/exceptions.py,sha256=RxE2T5xxgg_B6ttR8a3lCbZyh29RUtFe4oZKMoHPBAE,15035
54
+ jsonschema/protocols.py,sha256=7mpZxO1gfRNMCGXwldwsSN3nEugVfIVyKZ_HZgN1vSw,7174
55
+ jsonschema/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
+ jsonschema/tests/__pycache__/__init__.cpython-310.pyc,,
57
+ jsonschema/tests/__pycache__/_suite.cpython-310.pyc,,
58
+ jsonschema/tests/__pycache__/fuzz_validate.cpython-310.pyc,,
59
+ jsonschema/tests/__pycache__/test_cli.cpython-310.pyc,,
60
+ jsonschema/tests/__pycache__/test_deprecations.cpython-310.pyc,,
61
+ jsonschema/tests/__pycache__/test_exceptions.cpython-310.pyc,,
62
+ jsonschema/tests/__pycache__/test_format.cpython-310.pyc,,
63
+ jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-310.pyc,,
64
+ jsonschema/tests/__pycache__/test_types.cpython-310.pyc,,
65
+ jsonschema/tests/__pycache__/test_utils.cpython-310.pyc,,
66
+ jsonschema/tests/__pycache__/test_validators.cpython-310.pyc,,
67
+ jsonschema/tests/_suite.py,sha256=QAfBj34zMbJQ5_JJ2ogpiTlw9hQ6Is43dvo_bpS0EdM,8156
68
+ jsonschema/tests/fuzz_validate.py,sha256=fUA7yTJIihaCwJplkUehZeyB84HcXEcqtY5oPJXIO7I,1114
69
+ jsonschema/tests/test_cli.py,sha256=uFMu2YbIfbSDCnykhLL4-VR3-jg1tvQLJn2Bliwp_Bw,28587
70
+ jsonschema/tests/test_deprecations.py,sha256=9VxOCfWzMG1Tg4OD8riU_Znd6HDOQZkepzVgxsdUdU8,15760
71
+ jsonschema/tests/test_exceptions.py,sha256=JgC-E1ZFZK2puVBp35WFRnG8CNOiSWLYtyLjh9IvFKI,22591
72
+ jsonschema/tests/test_format.py,sha256=eVm5SMaWF2lOPO28bPAwNvkiQvHCQKy-MnuAgEchfEc,3188
73
+ jsonschema/tests/test_jsonschema_test_suite.py,sha256=a2saPs2Cwwg0sdRdu-uJ8goSXLbqrS-pC48QJy0K4DE,8674
74
+ jsonschema/tests/test_types.py,sha256=cF51KTDmdsx06MrIc4fXKt0X9fIsVgw5uhT8CamVa8U,6977
75
+ jsonschema/tests/test_utils.py,sha256=sao74o1PyYMxBfqweokQN48CFSS6yhJk5FkCfMJ5PsI,4163
76
+ jsonschema/tests/test_validators.py,sha256=eiaigsZMzHYYsniQ1UPygaS56a1d-_7-9NC4wVXAhzs,87975
77
+ jsonschema/validators.py,sha256=H31FwHdyB7LP5eunxdBrZ9E57hpvozfnRlZaOYy45jU,47045
parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/REQUESTED ADDED
File without changes
parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.25.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ jsonschema = jsonschema.cli:main
parrot/lib/python3.10/site-packages/jsonschema-4.23.0.dist-info/licenses/COPYING ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2013 Julian Berman
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to deal
5
+ in the Software without restriction, including without limitation the rights
6
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in
11
+ all copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ THE SOFTWARE.
parrot/lib/python3.10/site-packages/timm/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .version import __version__
2
+ from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable
3
+ from .models import create_model, list_models, list_pretrained, is_model, list_modules, model_entrypoint, \
4
+ is_model_pretrained, get_pretrained_cfg, get_pretrained_cfg_value
parrot/lib/python3.10/site-packages/timm/layers/activations_jit.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Activations
2
+
3
+ A collection of jit-scripted activations fn and modules with a common interface so that they can
4
+ easily be swapped. All have an `inplace` arg even if not used.
5
+
6
+ All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not
7
+ currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted
8
+ versions if they contain in-place ops.
9
+
10
+ Hacked together by / Copyright 2020 Ross Wightman
11
+ """
12
+
13
+ import torch
14
+ from torch import nn as nn
15
+ from torch.nn import functional as F
16
+
17
+
18
+ @torch.jit.script
19
+ def swish_jit(x, inplace: bool = False):
20
+ """Swish - Described in: https://arxiv.org/abs/1710.05941
21
+ """
22
+ return x.mul(x.sigmoid())
23
+
24
+
25
+ @torch.jit.script
26
+ def mish_jit(x, _inplace: bool = False):
27
+ """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
28
+ """
29
+ return x.mul(F.softplus(x).tanh())
30
+
31
+
32
+ class SwishJit(nn.Module):
33
+ def __init__(self, inplace: bool = False):
34
+ super(SwishJit, self).__init__()
35
+
36
+ def forward(self, x):
37
+ return swish_jit(x)
38
+
39
+
40
+ class MishJit(nn.Module):
41
+ def __init__(self, inplace: bool = False):
42
+ super(MishJit, self).__init__()
43
+
44
+ def forward(self, x):
45
+ return mish_jit(x)
46
+
47
+
48
+ @torch.jit.script
49
+ def hard_sigmoid_jit(x, inplace: bool = False):
50
+ # return F.relu6(x + 3.) / 6.
51
+ return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
52
+
53
+
54
+ class HardSigmoidJit(nn.Module):
55
+ def __init__(self, inplace: bool = False):
56
+ super(HardSigmoidJit, self).__init__()
57
+
58
+ def forward(self, x):
59
+ return hard_sigmoid_jit(x)
60
+
61
+
62
+ @torch.jit.script
63
+ def hard_swish_jit(x, inplace: bool = False):
64
+ # return x * (F.relu6(x + 3.) / 6)
65
+ return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
66
+
67
+
68
+ class HardSwishJit(nn.Module):
69
+ def __init__(self, inplace: bool = False):
70
+ super(HardSwishJit, self).__init__()
71
+
72
+ def forward(self, x):
73
+ return hard_swish_jit(x)
74
+
75
+
76
+ @torch.jit.script
77
+ def hard_mish_jit(x, inplace: bool = False):
78
+ """ Hard Mish
79
+ Experimental, based on notes by Mish author Diganta Misra at
80
+ https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
81
+ """
82
+ return 0.5 * x * (x + 2).clamp(min=0, max=2)
83
+
84
+
85
+ class HardMishJit(nn.Module):
86
+ def __init__(self, inplace: bool = False):
87
+ super(HardMishJit, self).__init__()
88
+
89
+ def forward(self, x):
90
+ return hard_mish_jit(x)
parrot/lib/python3.10/site-packages/timm/layers/bottleneck_attn.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Bottleneck Self Attention (Bottleneck Transformers)
2
+
3
+ Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
4
+
5
+ @misc{2101.11605,
6
+ Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani},
7
+ Title = {Bottleneck Transformers for Visual Recognition},
8
+ Year = {2021},
9
+ }
10
+
11
+ Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
12
+
13
+ This impl is a WIP but given that it is based on the ref gist likely not too far off.
14
+
15
+ Hacked together by / Copyright 2021 Ross Wightman
16
+ """
17
+ from typing import List
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from .helpers import to_2tuple, make_divisible
24
+ from .weight_init import trunc_normal_
25
+ from .trace_utils import _assert
26
+
27
+
28
+ def rel_logits_1d(q, rel_k, permute_mask: List[int]):
29
+ """ Compute relative logits along one dimension
30
+
31
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
32
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
33
+
34
+ Args:
35
+ q: (batch, heads, height, width, dim)
36
+ rel_k: (2 * width - 1, dim)
37
+ permute_mask: permute output dim according to this
38
+ """
39
+ B, H, W, dim = q.shape
40
+ x = (q @ rel_k.transpose(-1, -2))
41
+ x = x.reshape(-1, W, 2 * W -1)
42
+
43
+ # pad to shift from relative to absolute indexing
44
+ x_pad = F.pad(x, [0, 1]).flatten(1)
45
+ x_pad = F.pad(x_pad, [0, W - 1])
46
+
47
+ # reshape and slice out the padded elements
48
+ x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1)
49
+ x = x_pad[:, :W, W - 1:]
50
+
51
+ # reshape and tile
52
+ x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1)
53
+ return x.permute(permute_mask)
54
+
55
+
56
+ class PosEmbedRel(nn.Module):
57
+ """ Relative Position Embedding
58
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
59
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
60
+ """
61
+ def __init__(self, feat_size, dim_head, scale):
62
+ super().__init__()
63
+ self.height, self.width = to_2tuple(feat_size)
64
+ self.dim_head = dim_head
65
+ self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale)
66
+ self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale)
67
+
68
+ def forward(self, q):
69
+ B, HW, _ = q.shape
70
+
71
+ # relative logits in width dimension.
72
+ q = q.reshape(B, self.height, self.width, -1)
73
+ rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
74
+
75
+ # relative logits in height dimension.
76
+ q = q.transpose(1, 2)
77
+ rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
78
+
79
+ rel_logits = rel_logits_h + rel_logits_w
80
+ rel_logits = rel_logits.reshape(B, HW, HW)
81
+ return rel_logits
82
+
83
+
84
+ class BottleneckAttn(nn.Module):
85
+ """ Bottleneck Attention
86
+ Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
87
+
88
+ The internal dimensions of the attention module are controlled by the interaction of several arguments.
89
+ * the output dimension of the module is specified by dim_out, which falls back to input dim if not set
90
+ * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
91
+ * the query and key (qk) dimensions are determined by
92
+ * num_heads * dim_head if dim_head is not None
93
+ * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
94
+ * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
95
+
96
+ Args:
97
+ dim (int): input dimension to the module
98
+ dim_out (int): output dimension of the module, same as dim if not set
99
+ stride (int): output stride of the module, avg pool used if stride == 2 (default: 1).
100
+ num_heads (int): parallel attention heads (default: 4)
101
+ dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
102
+ qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
103
+ qkv_bias (bool): add bias to q, k, and v projections
104
+ scale_pos_embed (bool): scale the position embedding as well as Q @ K
105
+ """
106
+ def __init__(
107
+ self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None,
108
+ qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False):
109
+ super().__init__()
110
+ assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required'
111
+ dim_out = dim_out or dim
112
+ assert dim_out % num_heads == 0
113
+ self.num_heads = num_heads
114
+ self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
115
+ self.dim_head_v = dim_out // self.num_heads
116
+ self.dim_out_qk = num_heads * self.dim_head_qk
117
+ self.dim_out_v = num_heads * self.dim_head_v
118
+ self.scale = self.dim_head_qk ** -0.5
119
+ self.scale_pos_embed = scale_pos_embed
120
+
121
+ self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias)
122
+
123
+ # NOTE I'm only supporting relative pos embedding for now
124
+ self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale)
125
+
126
+ self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity()
127
+
128
+ self.reset_parameters()
129
+
130
+ def reset_parameters(self):
131
+ trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in
132
+ trunc_normal_(self.pos_embed.height_rel, std=self.scale)
133
+ trunc_normal_(self.pos_embed.width_rel, std=self.scale)
134
+
135
+ def forward(self, x):
136
+ B, C, H, W = x.shape
137
+ _assert(H == self.pos_embed.height, '')
138
+ _assert(W == self.pos_embed.width, '')
139
+
140
+ x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W
141
+
142
+ # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v
143
+ # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted.
144
+ q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1)
145
+ q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2)
146
+ k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k
147
+ v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2)
148
+
149
+ if self.scale_pos_embed:
150
+ attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W
151
+ else:
152
+ attn = (q @ k) * self.scale + self.pos_embed(q)
153
+ attn = attn.softmax(dim=-1)
154
+
155
+ out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W
156
+ out = self.pool(out)
157
+ return out
parrot/lib/python3.10/site-packages/timm/layers/classifier.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Classifier head and layer factory
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ from collections import OrderedDict
6
+ from functools import partial
7
+ from typing import Optional, Union, Callable
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ from torch.nn import functional as F
12
+
13
+ from .adaptive_avgmax_pool import SelectAdaptivePool2d
14
+ from .create_act import get_act_layer
15
+ from .create_norm import get_norm_layer
16
+
17
+
18
+ def _create_pool(
19
+ num_features: int,
20
+ num_classes: int,
21
+ pool_type: str = 'avg',
22
+ use_conv: bool = False,
23
+ input_fmt: Optional[str] = None,
24
+ ):
25
+ flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling
26
+ if not pool_type:
27
+ assert num_classes == 0 or use_conv,\
28
+ 'Pooling can only be disabled if classifier is also removed or conv classifier is used'
29
+ flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling)
30
+ global_pool = SelectAdaptivePool2d(
31
+ pool_type=pool_type,
32
+ flatten=flatten_in_pool,
33
+ input_fmt=input_fmt,
34
+ )
35
+ num_pooled_features = num_features * global_pool.feat_mult()
36
+ return global_pool, num_pooled_features
37
+
38
+
39
+ def _create_fc(num_features, num_classes, use_conv=False):
40
+ if num_classes <= 0:
41
+ fc = nn.Identity() # pass-through (no classifier)
42
+ elif use_conv:
43
+ fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
44
+ else:
45
+ fc = nn.Linear(num_features, num_classes, bias=True)
46
+ return fc
47
+
48
+
49
+ def create_classifier(
50
+ num_features: int,
51
+ num_classes: int,
52
+ pool_type: str = 'avg',
53
+ use_conv: bool = False,
54
+ input_fmt: str = 'NCHW',
55
+ drop_rate: Optional[float] = None,
56
+ ):
57
+ global_pool, num_pooled_features = _create_pool(
58
+ num_features,
59
+ num_classes,
60
+ pool_type,
61
+ use_conv=use_conv,
62
+ input_fmt=input_fmt,
63
+ )
64
+ fc = _create_fc(
65
+ num_pooled_features,
66
+ num_classes,
67
+ use_conv=use_conv,
68
+ )
69
+ if drop_rate is not None:
70
+ dropout = nn.Dropout(drop_rate)
71
+ return global_pool, dropout, fc
72
+ return global_pool, fc
73
+
74
+
75
+ class ClassifierHead(nn.Module):
76
+ """Classifier head w/ configurable global pooling and dropout."""
77
+
78
+ def __init__(
79
+ self,
80
+ in_features: int,
81
+ num_classes: int,
82
+ pool_type: str = 'avg',
83
+ drop_rate: float = 0.,
84
+ use_conv: bool = False,
85
+ input_fmt: str = 'NCHW',
86
+ ):
87
+ """
88
+ Args:
89
+ in_features: The number of input features.
90
+ num_classes: The number of classes for the final classifier layer (output).
91
+ pool_type: Global pooling type, pooling disabled if empty string ('').
92
+ drop_rate: Pre-classifier dropout rate.
93
+ """
94
+ super(ClassifierHead, self).__init__()
95
+ self.in_features = in_features
96
+ self.use_conv = use_conv
97
+ self.input_fmt = input_fmt
98
+
99
+ global_pool, fc = create_classifier(
100
+ in_features,
101
+ num_classes,
102
+ pool_type,
103
+ use_conv=use_conv,
104
+ input_fmt=input_fmt,
105
+ )
106
+ self.global_pool = global_pool
107
+ self.drop = nn.Dropout(drop_rate)
108
+ self.fc = fc
109
+ self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity()
110
+
111
+ def reset(self, num_classes, pool_type=None):
112
+ if pool_type is not None and pool_type != self.global_pool.pool_type:
113
+ self.global_pool, self.fc = create_classifier(
114
+ self.in_features,
115
+ num_classes,
116
+ pool_type=pool_type,
117
+ use_conv=self.use_conv,
118
+ input_fmt=self.input_fmt,
119
+ )
120
+ self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity()
121
+ else:
122
+ num_pooled_features = self.in_features * self.global_pool.feat_mult()
123
+ self.fc = _create_fc(
124
+ num_pooled_features,
125
+ num_classes,
126
+ use_conv=self.use_conv,
127
+ )
128
+
129
+ def forward(self, x, pre_logits: bool = False):
130
+ x = self.global_pool(x)
131
+ x = self.drop(x)
132
+ if pre_logits:
133
+ return self.flatten(x)
134
+ x = self.fc(x)
135
+ return self.flatten(x)
136
+
137
+
138
+ class NormMlpClassifierHead(nn.Module):
139
+
140
+ def __init__(
141
+ self,
142
+ in_features: int,
143
+ num_classes: int,
144
+ hidden_size: Optional[int] = None,
145
+ pool_type: str = 'avg',
146
+ drop_rate: float = 0.,
147
+ norm_layer: Union[str, Callable] = 'layernorm2d',
148
+ act_layer: Union[str, Callable] = 'tanh',
149
+ ):
150
+ """
151
+ Args:
152
+ in_features: The number of input features.
153
+ num_classes: The number of classes for the final classifier layer (output).
154
+ hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
155
+ pool_type: Global pooling type, pooling disabled if empty string ('').
156
+ drop_rate: Pre-classifier dropout rate.
157
+ norm_layer: Normalization layer type.
158
+ act_layer: MLP activation layer type (only used if hidden_size is not None).
159
+ """
160
+ super().__init__()
161
+ self.in_features = in_features
162
+ self.hidden_size = hidden_size
163
+ self.num_features = in_features
164
+ self.use_conv = not pool_type
165
+ norm_layer = get_norm_layer(norm_layer)
166
+ act_layer = get_act_layer(act_layer)
167
+ linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
168
+
169
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
170
+ self.norm = norm_layer(in_features)
171
+ self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
172
+ if hidden_size:
173
+ self.pre_logits = nn.Sequential(OrderedDict([
174
+ ('fc', linear_layer(in_features, hidden_size)),
175
+ ('act', act_layer()),
176
+ ]))
177
+ self.num_features = hidden_size
178
+ else:
179
+ self.pre_logits = nn.Identity()
180
+ self.drop = nn.Dropout(drop_rate)
181
+ self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
182
+
183
+ def reset(self, num_classes, global_pool=None):
184
+ if global_pool is not None:
185
+ self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
186
+ self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
187
+ self.use_conv = self.global_pool.is_identity()
188
+ linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
189
+ if self.hidden_size:
190
+ if ((isinstance(self.pre_logits.fc, nn.Conv2d) and not self.use_conv) or
191
+ (isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv)):
192
+ with torch.no_grad():
193
+ new_fc = linear_layer(self.in_features, self.hidden_size)
194
+ new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape))
195
+ new_fc.bias.copy_(self.pre_logits.fc.bias)
196
+ self.pre_logits.fc = new_fc
197
+ self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
198
+
199
+ def forward(self, x, pre_logits: bool = False):
200
+ x = self.global_pool(x)
201
+ x = self.norm(x)
202
+ x = self.flatten(x)
203
+ x = self.pre_logits(x)
204
+ x = self.drop(x)
205
+ if pre_logits:
206
+ return x
207
+ x = self.fc(x)
208
+ return x
parrot/lib/python3.10/site-packages/timm/layers/create_norm.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Norm Layer Factory
2
+
3
+ Create norm modules by string (to mirror create_act and creat_norm-act fns)
4
+
5
+ Copyright 2022 Ross Wightman
6
+ """
7
+ import functools
8
+ import types
9
+ from typing import Type
10
+
11
+ import torch.nn as nn
12
+
13
+ from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm
14
+ from torchvision.ops.misc import FrozenBatchNorm2d
15
+
16
+ _NORM_MAP = dict(
17
+ batchnorm=nn.BatchNorm2d,
18
+ batchnorm2d=nn.BatchNorm2d,
19
+ batchnorm1d=nn.BatchNorm1d,
20
+ groupnorm=GroupNorm,
21
+ groupnorm1=GroupNorm1,
22
+ layernorm=LayerNorm,
23
+ layernorm2d=LayerNorm2d,
24
+ rmsnorm=RmsNorm,
25
+ frozenbatchnorm2d=FrozenBatchNorm2d,
26
+ )
27
+ _NORM_TYPES = {m for n, m in _NORM_MAP.items()}
28
+
29
+
30
+ def create_norm_layer(layer_name, num_features, **kwargs):
31
+ layer = get_norm_layer(layer_name)
32
+ layer_instance = layer(num_features, **kwargs)
33
+ return layer_instance
34
+
35
+
36
+ def get_norm_layer(norm_layer):
37
+ if norm_layer is None:
38
+ return None
39
+ assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))
40
+ norm_kwargs = {}
41
+
42
+ # unbind partial fn, so args can be rebound later
43
+ if isinstance(norm_layer, functools.partial):
44
+ norm_kwargs.update(norm_layer.keywords)
45
+ norm_layer = norm_layer.func
46
+
47
+ if isinstance(norm_layer, str):
48
+ if not norm_layer:
49
+ return None
50
+ layer_name = norm_layer.replace('_', '')
51
+ norm_layer = _NORM_MAP[layer_name]
52
+ else:
53
+ norm_layer = norm_layer
54
+
55
+ if norm_kwargs:
56
+ norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args
57
+ return norm_layer
parrot/lib/python3.10/site-packages/timm/layers/drop.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ DropBlock, DropPath
2
+
3
+ PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
4
+
5
+ Papers:
6
+ DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
7
+
8
+ Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
9
+
10
+ Code:
11
+ DropBlock impl inspired by two Tensorflow impl that I liked:
12
+ - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
13
+ - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
14
+
15
+ Hacked together by / Copyright 2020 Ross Wightman
16
+ """
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+
21
+
22
+ def drop_block_2d(
23
+ x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0,
24
+ with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
25
+ """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
26
+
27
+ DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
28
+ runs with success, but needs further validation and possibly optimization for lower runtime impact.
29
+ """
30
+ B, C, H, W = x.shape
31
+ total_size = W * H
32
+ clipped_block_size = min(block_size, min(W, H))
33
+ # seed_drop_rate, the gamma parameter
34
+ gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
35
+ (W - block_size + 1) * (H - block_size + 1))
36
+
37
+ # Forces the block to be inside the feature map.
38
+ w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))
39
+ valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
40
+ ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
41
+ valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
42
+
43
+ if batchwise:
44
+ # one mask for whole batch, quite a bit faster
45
+ uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
46
+ else:
47
+ uniform_noise = torch.rand_like(x)
48
+ block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
49
+ block_mask = -F.max_pool2d(
50
+ -block_mask,
51
+ kernel_size=clipped_block_size, # block_size,
52
+ stride=1,
53
+ padding=clipped_block_size // 2)
54
+
55
+ if with_noise:
56
+ normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
57
+ if inplace:
58
+ x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
59
+ else:
60
+ x = x * block_mask + normal_noise * (1 - block_mask)
61
+ else:
62
+ normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
63
+ if inplace:
64
+ x.mul_(block_mask * normalize_scale)
65
+ else:
66
+ x = x * block_mask * normalize_scale
67
+ return x
68
+
69
+
70
+ def drop_block_fast_2d(
71
+ x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,
72
+ gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False):
73
+ """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
74
+
75
+ DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
76
+ block mask at edges.
77
+ """
78
+ B, C, H, W = x.shape
79
+ total_size = W * H
80
+ clipped_block_size = min(block_size, min(W, H))
81
+ gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
82
+ (W - block_size + 1) * (H - block_size + 1))
83
+
84
+ block_mask = torch.empty_like(x).bernoulli_(gamma)
85
+ block_mask = F.max_pool2d(
86
+ block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
87
+
88
+ if with_noise:
89
+ normal_noise = torch.empty_like(x).normal_()
90
+ if inplace:
91
+ x.mul_(1. - block_mask).add_(normal_noise * block_mask)
92
+ else:
93
+ x = x * (1. - block_mask) + normal_noise * block_mask
94
+ else:
95
+ block_mask = 1 - block_mask
96
+ normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype)
97
+ if inplace:
98
+ x.mul_(block_mask * normalize_scale)
99
+ else:
100
+ x = x * block_mask * normalize_scale
101
+ return x
102
+
103
+
104
+ class DropBlock2d(nn.Module):
105
+ """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ drop_prob: float = 0.1,
111
+ block_size: int = 7,
112
+ gamma_scale: float = 1.0,
113
+ with_noise: bool = False,
114
+ inplace: bool = False,
115
+ batchwise: bool = False,
116
+ fast: bool = True):
117
+ super(DropBlock2d, self).__init__()
118
+ self.drop_prob = drop_prob
119
+ self.gamma_scale = gamma_scale
120
+ self.block_size = block_size
121
+ self.with_noise = with_noise
122
+ self.inplace = inplace
123
+ self.batchwise = batchwise
124
+ self.fast = fast # FIXME finish comparisons of fast vs not
125
+
126
+ def forward(self, x):
127
+ if not self.training or not self.drop_prob:
128
+ return x
129
+ if self.fast:
130
+ return drop_block_fast_2d(
131
+ x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace)
132
+ else:
133
+ return drop_block_2d(
134
+ x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
135
+
136
+
137
+ def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
138
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
139
+
140
+ This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
141
+ the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
142
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
143
+ changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
144
+ 'survival rate' as the argument.
145
+
146
+ """
147
+ if drop_prob == 0. or not training:
148
+ return x
149
+ keep_prob = 1 - drop_prob
150
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
151
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
152
+ if keep_prob > 0.0 and scale_by_keep:
153
+ random_tensor.div_(keep_prob)
154
+ return x * random_tensor
155
+
156
+
157
+ class DropPath(nn.Module):
158
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
159
+ """
160
+ def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
161
+ super(DropPath, self).__init__()
162
+ self.drop_prob = drop_prob
163
+ self.scale_by_keep = scale_by_keep
164
+
165
+ def forward(self, x):
166
+ return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
167
+
168
+ def extra_repr(self):
169
+ return f'drop_prob={round(self.drop_prob,3):0.3f}'
parrot/lib/python3.10/site-packages/timm/layers/global_context.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Global Context Attention Block
2
+
3
+ Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond`
4
+ - https://arxiv.org/abs/1904.11492
5
+
6
+ Official code consulted as reference: https://github.com/xvjiarui/GCNet
7
+
8
+ Hacked together by / Copyright 2021 Ross Wightman
9
+ """
10
+ from torch import nn as nn
11
+ import torch.nn.functional as F
12
+
13
+ from .create_act import create_act_layer, get_act_layer
14
+ from .helpers import make_divisible
15
+ from .mlp import ConvMlp
16
+ from .norm import LayerNorm2d
17
+
18
+
19
+ class GlobalContext(nn.Module):
20
+
21
+ def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False,
22
+ rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'):
23
+ super(GlobalContext, self).__init__()
24
+ act_layer = get_act_layer(act_layer)
25
+
26
+ self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None
27
+
28
+ if rd_channels is None:
29
+ rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
30
+ if fuse_add:
31
+ self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
32
+ else:
33
+ self.mlp_add = None
34
+ if fuse_scale:
35
+ self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
36
+ else:
37
+ self.mlp_scale = None
38
+
39
+ self.gate = create_act_layer(gate_layer)
40
+ self.init_last_zero = init_last_zero
41
+ self.reset_parameters()
42
+
43
+ def reset_parameters(self):
44
+ if self.conv_attn is not None:
45
+ nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu')
46
+ if self.mlp_add is not None:
47
+ nn.init.zeros_(self.mlp_add.fc2.weight)
48
+
49
+ def forward(self, x):
50
+ B, C, H, W = x.shape
51
+
52
+ if self.conv_attn is not None:
53
+ attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W)
54
+ attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1)
55
+ context = x.reshape(B, C, H * W).unsqueeze(1) @ attn
56
+ context = context.view(B, C, 1, 1)
57
+ else:
58
+ context = x.mean(dim=(2, 3), keepdim=True)
59
+
60
+ if self.mlp_scale is not None:
61
+ mlp_x = self.mlp_scale(context)
62
+ x = x * self.gate(mlp_x)
63
+ if self.mlp_add is not None:
64
+ mlp_x = self.mlp_add(context)
65
+ x = x + mlp_x
66
+
67
+ return x
parrot/lib/python3.10/site-packages/timm/layers/halo_attn.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Halo Self Attention
2
+
3
+ Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
4
+ - https://arxiv.org/abs/2103.12731
5
+
6
+ @misc{2103.12731,
7
+ Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and
8
+ Jonathon Shlens},
9
+ Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones},
10
+ Year = {2021},
11
+ }
12
+
13
+ Status:
14
+ This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me.
15
+ The attention mechanism works but it's slow as implemented.
16
+
17
+ Hacked together by / Copyright 2021 Ross Wightman
18
+ """
19
+ from typing import List
20
+
21
+ import torch
22
+ from torch import nn
23
+ import torch.nn.functional as F
24
+
25
+ from .helpers import make_divisible
26
+ from .weight_init import trunc_normal_
27
+ from .trace_utils import _assert
28
+
29
+
30
+ def rel_logits_1d(q, rel_k, permute_mask: List[int]):
31
+ """ Compute relative logits along one dimension
32
+
33
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
34
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
35
+
36
+ Args:
37
+ q: (batch, height, width, dim)
38
+ rel_k: (2 * window - 1, dim)
39
+ permute_mask: permute output dim according to this
40
+ """
41
+ B, H, W, dim = q.shape
42
+ rel_size = rel_k.shape[0]
43
+ win_size = (rel_size + 1) // 2
44
+
45
+ x = (q @ rel_k.transpose(-1, -2))
46
+ x = x.reshape(-1, W, rel_size)
47
+
48
+ # pad to shift from relative to absolute indexing
49
+ x_pad = F.pad(x, [0, 1]).flatten(1)
50
+ x_pad = F.pad(x_pad, [0, rel_size - W])
51
+
52
+ # reshape and slice out the padded elements
53
+ x_pad = x_pad.reshape(-1, W + 1, rel_size)
54
+ x = x_pad[:, :W, win_size - 1:]
55
+
56
+ # reshape and tile
57
+ x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1)
58
+ return x.permute(permute_mask)
59
+
60
+
61
+ class PosEmbedRel(nn.Module):
62
+ """ Relative Position Embedding
63
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
64
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
65
+
66
+ """
67
+ def __init__(self, block_size, win_size, dim_head, scale):
68
+ """
69
+ Args:
70
+ block_size (int): block size
71
+ win_size (int): neighbourhood window size
72
+ dim_head (int): attention head dim
73
+ scale (float): scale factor (for init)
74
+ """
75
+ super().__init__()
76
+ self.block_size = block_size
77
+ self.dim_head = dim_head
78
+ self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
79
+ self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
80
+
81
+ def forward(self, q):
82
+ B, BB, HW, _ = q.shape
83
+
84
+ # relative logits in width dimension.
85
+ q = q.reshape(-1, self.block_size, self.block_size, self.dim_head)
86
+ rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
87
+
88
+ # relative logits in height dimension.
89
+ q = q.transpose(1, 2)
90
+ rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
91
+
92
+ rel_logits = rel_logits_h + rel_logits_w
93
+ rel_logits = rel_logits.reshape(B, BB, HW, -1)
94
+ return rel_logits
95
+
96
+
97
+ class HaloAttn(nn.Module):
98
+ """ Halo Attention
99
+
100
+ Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
101
+ - https://arxiv.org/abs/2103.12731
102
+
103
+ The internal dimensions of the attention module are controlled by the interaction of several arguments.
104
+ * the output dimension of the module is specified by dim_out, which falls back to input dim if not set
105
+ * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
106
+ * the query and key (qk) dimensions are determined by
107
+ * num_heads * dim_head if dim_head is not None
108
+ * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
109
+ * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
110
+
111
+ Args:
112
+ dim (int): input dimension to the module
113
+ dim_out (int): output dimension of the module, same as dim if not set
114
+ feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda)
115
+ stride: output stride of the module, query downscaled if > 1 (default: 1).
116
+ num_heads: parallel attention heads (default: 8).
117
+ dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
118
+ block_size (int): size of blocks. (default: 8)
119
+ halo_size (int): size of halo overlap. (default: 3)
120
+ qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
121
+ qkv_bias (bool) : add bias to q, k, and v projections
122
+ avg_down (bool): use average pool downsample instead of strided query blocks
123
+ scale_pos_embed (bool): scale the position embedding as well as Q @ K
124
+ """
125
+ def __init__(
126
+ self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3,
127
+ qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False):
128
+ super().__init__()
129
+ dim_out = dim_out or dim
130
+ assert dim_out % num_heads == 0
131
+ assert stride in (1, 2)
132
+ self.num_heads = num_heads
133
+ self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
134
+ self.dim_head_v = dim_out // self.num_heads
135
+ self.dim_out_qk = num_heads * self.dim_head_qk
136
+ self.dim_out_v = num_heads * self.dim_head_v
137
+ self.scale = self.dim_head_qk ** -0.5
138
+ self.scale_pos_embed = scale_pos_embed
139
+ self.block_size = self.block_size_ds = block_size
140
+ self.halo_size = halo_size
141
+ self.win_size = block_size + halo_size * 2 # neighbourhood window size
142
+ self.block_stride = 1
143
+ use_avg_pool = False
144
+ if stride > 1:
145
+ use_avg_pool = avg_down or block_size % stride != 0
146
+ self.block_stride = 1 if use_avg_pool else stride
147
+ self.block_size_ds = self.block_size // self.block_stride
148
+
149
+ # FIXME not clear if this stride behaviour is what the paper intended
150
+ # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving
151
+ # data in unfolded block form. I haven't wrapped my head around how that'd look.
152
+ self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias)
153
+ self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias)
154
+
155
+ self.pos_embed = PosEmbedRel(
156
+ block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale)
157
+
158
+ self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity()
159
+
160
+ self.reset_parameters()
161
+
162
+ def reset_parameters(self):
163
+ std = self.q.weight.shape[1] ** -0.5 # fan-in
164
+ trunc_normal_(self.q.weight, std=std)
165
+ trunc_normal_(self.kv.weight, std=std)
166
+ trunc_normal_(self.pos_embed.height_rel, std=self.scale)
167
+ trunc_normal_(self.pos_embed.width_rel, std=self.scale)
168
+
169
+ def forward(self, x):
170
+ B, C, H, W = x.shape
171
+ _assert(H % self.block_size == 0, '')
172
+ _assert(W % self.block_size == 0, '')
173
+ num_h_blocks = H // self.block_size
174
+ num_w_blocks = W // self.block_size
175
+ num_blocks = num_h_blocks * num_w_blocks
176
+
177
+ q = self.q(x)
178
+ # unfold
179
+ q = q.reshape(
180
+ -1, self.dim_head_qk,
181
+ num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4)
182
+ # B, num_heads * dim_head * block_size ** 2, num_blocks
183
+ q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3)
184
+ # B * num_heads, num_blocks, block_size ** 2, dim_head
185
+
186
+ kv = self.kv(x)
187
+ # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not
188
+ # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach.
189
+ # FIXME figure out how to switch impl between this and conv2d if XLA being used.
190
+ kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size])
191
+ kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape(
192
+ B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1)
193
+ k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1)
194
+ # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v
195
+
196
+ if self.scale_pos_embed:
197
+ attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale
198
+ else:
199
+ attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q)
200
+ # B * num_heads, num_blocks, block_size ** 2, win_size ** 2
201
+ attn = attn.softmax(dim=-1)
202
+
203
+ out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks
204
+ # fold
205
+ out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks)
206
+ out = out.permute(0, 3, 1, 4, 2).contiguous().view(
207
+ B, self.dim_out_v, H // self.block_stride, W // self.block_stride)
208
+ # B, dim_out, H // block_stride, W // block_stride
209
+ out = self.pool(out)
210
+ return out
211
+
212
+
213
+ """ Three alternatives for overlapping windows.
214
+
215
+ `.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold()
216
+
217
+ if is_xla:
218
+ # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is
219
+ # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment.
220
+ WW = self.win_size ** 2
221
+ pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size)
222
+ kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size)
223
+ elif self.stride_tricks:
224
+ kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous()
225
+ kv = kv.as_strided((
226
+ B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks),
227
+ stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size))
228
+ else:
229
+ kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size)
230
+
231
+ kv = kv.reshape(
232
+ B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3)
233
+ """
parrot/lib/python3.10/site-packages/timm/layers/interpolate.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Interpolation helpers for timm layers
2
+
3
+ RegularGridInterpolator from https://github.com/sbarratt/torch_interpolations
4
+ Copyright Shane Barratt, Apache 2.0 license
5
+ """
6
+ import torch
7
+ from itertools import product
8
+
9
+
10
+ class RegularGridInterpolator:
11
+ """ Interpolate data defined on a rectilinear grid with even or uneven spacing.
12
+ Produces similar results to scipy RegularGridInterpolator or interp2d
13
+ in 'linear' mode.
14
+
15
+ Taken from https://github.com/sbarratt/torch_interpolations
16
+ """
17
+
18
+ def __init__(self, points, values):
19
+ self.points = points
20
+ self.values = values
21
+
22
+ assert isinstance(self.points, tuple) or isinstance(self.points, list)
23
+ assert isinstance(self.values, torch.Tensor)
24
+
25
+ self.ms = list(self.values.shape)
26
+ self.n = len(self.points)
27
+
28
+ assert len(self.ms) == self.n
29
+
30
+ for i, p in enumerate(self.points):
31
+ assert isinstance(p, torch.Tensor)
32
+ assert p.shape[0] == self.values.shape[i]
33
+
34
+ def __call__(self, points_to_interp):
35
+ assert self.points is not None
36
+ assert self.values is not None
37
+
38
+ assert len(points_to_interp) == len(self.points)
39
+ K = points_to_interp[0].shape[0]
40
+ for x in points_to_interp:
41
+ assert x.shape[0] == K
42
+
43
+ idxs = []
44
+ dists = []
45
+ overalls = []
46
+ for p, x in zip(self.points, points_to_interp):
47
+ idx_right = torch.bucketize(x, p)
48
+ idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1
49
+ idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1)
50
+ dist_left = x - p[idx_left]
51
+ dist_right = p[idx_right] - x
52
+ dist_left[dist_left < 0] = 0.
53
+ dist_right[dist_right < 0] = 0.
54
+ both_zero = (dist_left == 0) & (dist_right == 0)
55
+ dist_left[both_zero] = dist_right[both_zero] = 1.
56
+
57
+ idxs.append((idx_left, idx_right))
58
+ dists.append((dist_left, dist_right))
59
+ overalls.append(dist_left + dist_right)
60
+
61
+ numerator = 0.
62
+ for indexer in product([0, 1], repeat=self.n):
63
+ as_s = [idx[onoff] for onoff, idx in zip(indexer, idxs)]
64
+ bs_s = [dist[1 - onoff] for onoff, dist in zip(indexer, dists)]
65
+ numerator += self.values[as_s] * \
66
+ torch.prod(torch.stack(bs_s), dim=0)
67
+ denominator = torch.prod(torch.stack(overalls), dim=0)
68
+ return numerator / denominator
parrot/lib/python3.10/site-packages/timm/layers/mixed_conv2d.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch Mixed Convolution
2
+
3
+ Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595)
4
+
5
+ Hacked together by / Copyright 2020 Ross Wightman
6
+ """
7
+
8
+ import torch
9
+ from torch import nn as nn
10
+
11
+ from .conv2d_same import create_conv2d_pad
12
+
13
+
14
+ def _split_channels(num_chan, num_groups):
15
+ split = [num_chan // num_groups for _ in range(num_groups)]
16
+ split[0] += num_chan - sum(split)
17
+ return split
18
+
19
+
20
+ class MixedConv2d(nn.ModuleDict):
21
+ """ Mixed Grouped Convolution
22
+
23
+ Based on MDConv and GroupedConv in MixNet impl:
24
+ https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py
25
+ """
26
+ def __init__(self, in_channels, out_channels, kernel_size=3,
27
+ stride=1, padding='', dilation=1, depthwise=False, **kwargs):
28
+ super(MixedConv2d, self).__init__()
29
+
30
+ kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size]
31
+ num_groups = len(kernel_size)
32
+ in_splits = _split_channels(in_channels, num_groups)
33
+ out_splits = _split_channels(out_channels, num_groups)
34
+ self.in_channels = sum(in_splits)
35
+ self.out_channels = sum(out_splits)
36
+ for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)):
37
+ conv_groups = in_ch if depthwise else 1
38
+ # use add_module to keep key space clean
39
+ self.add_module(
40
+ str(idx),
41
+ create_conv2d_pad(
42
+ in_ch, out_ch, k, stride=stride,
43
+ padding=padding, dilation=dilation, groups=conv_groups, **kwargs)
44
+ )
45
+ self.splits = in_splits
46
+
47
+ def forward(self, x):
48
+ x_split = torch.split(x, self.splits, 1)
49
+ x_out = [c(x_split[i]) for i, c in enumerate(self.values())]
50
+ x = torch.cat(x_out, 1)
51
+ return x
parrot/lib/python3.10/site-packages/timm/layers/padding.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Padding Helpers
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ import math
6
+ from typing import List, Tuple
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+
11
+
12
+ # Calculate symmetric padding for a convolution
13
+ def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
14
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
15
+ return padding
16
+
17
+
18
+ # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
19
+ def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int):
20
+ if isinstance(x, torch.Tensor):
21
+ return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0)
22
+ else:
23
+ return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
24
+
25
+
26
+ # Can SAME padding for given args be done statically?
27
+ def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
28
+ return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
29
+
30
+
31
+ def pad_same_arg(
32
+ input_size: List[int],
33
+ kernel_size: List[int],
34
+ stride: List[int],
35
+ dilation: List[int] = (1, 1),
36
+ ) -> List[int]:
37
+ ih, iw = input_size
38
+ kh, kw = kernel_size
39
+ pad_h = get_same_padding(ih, kh, stride[0], dilation[0])
40
+ pad_w = get_same_padding(iw, kw, stride[1], dilation[1])
41
+ return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
42
+
43
+
44
+ # Dynamically pad input x with 'SAME' padding for conv with specified args
45
+ def pad_same(
46
+ x,
47
+ kernel_size: List[int],
48
+ stride: List[int],
49
+ dilation: List[int] = (1, 1),
50
+ value: float = 0,
51
+ ):
52
+ ih, iw = x.size()[-2:]
53
+ pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0])
54
+ pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1])
55
+ x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value)
56
+ return x
57
+
58
+
59
+ def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
60
+ dynamic = False
61
+ if isinstance(padding, str):
62
+ # for any string padding, the padding will be calculated for you, one of three ways
63
+ padding = padding.lower()
64
+ if padding == 'same':
65
+ # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
66
+ if is_static_pad(kernel_size, **kwargs):
67
+ # static case, no extra overhead
68
+ padding = get_padding(kernel_size, **kwargs)
69
+ else:
70
+ # dynamic 'SAME' padding, has runtime/GPU memory overhead
71
+ padding = 0
72
+ dynamic = True
73
+ elif padding == 'valid':
74
+ # 'VALID' padding, same as padding=0
75
+ padding = 0
76
+ else:
77
+ # Default to PyTorch style 'same'-ish symmetric padding
78
+ padding = get_padding(kernel_size, **kwargs)
79
+ return padding, dynamic
parrot/lib/python3.10/site-packages/timm/layers/pos_embed_sincos.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Sin-cos, fourier, rotary position embedding modules and functions
2
+
3
+ Hacked together by / Copyright 2022 Ross Wightman
4
+ """
5
+ import math
6
+ from typing import List, Tuple, Optional, Union
7
+
8
+ import torch
9
+ from torch import nn as nn
10
+
11
+ from .trace_utils import _assert
12
+
13
+
14
+ def pixel_freq_bands(
15
+ num_bands: int,
16
+ max_freq: float = 224.,
17
+ linear_bands: bool = True,
18
+ dtype: torch.dtype = torch.float32,
19
+ device: Optional[torch.device] = None,
20
+ ):
21
+ if linear_bands:
22
+ bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=dtype, device=device)
23
+ else:
24
+ bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=dtype, device=device)
25
+ return bands * torch.pi
26
+
27
+
28
+ def freq_bands(
29
+ num_bands: int,
30
+ temperature: float = 10000.,
31
+ step: int = 2,
32
+ dtype: torch.dtype = torch.float32,
33
+ device: Optional[torch.device] = None,
34
+ ) -> torch.Tensor:
35
+ bands = 1. / (temperature ** (torch.arange(0, num_bands, step, dtype=dtype, device=device) / num_bands))
36
+ return bands
37
+
38
+
39
+ def build_sincos2d_pos_embed(
40
+ feat_shape: List[int],
41
+ dim: int = 64,
42
+ temperature: float = 10000.,
43
+ reverse_coord: bool = False,
44
+ interleave_sin_cos: bool = False,
45
+ dtype: torch.dtype = torch.float32,
46
+ device: Optional[torch.device] = None
47
+ ) -> torch.Tensor:
48
+ """
49
+
50
+ Args:
51
+ feat_shape:
52
+ dim:
53
+ temperature:
54
+ reverse_coord: stack grid order W, H instead of H, W
55
+ interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos
56
+ dtype:
57
+ device:
58
+
59
+ Returns:
60
+
61
+ """
62
+ assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding'
63
+ pos_dim = dim // 4
64
+ bands = freq_bands(pos_dim, temperature=temperature, step=1, dtype=dtype, device=device)
65
+
66
+ if reverse_coord:
67
+ feat_shape = feat_shape[::-1] # stack W, H instead of H, W
68
+ grid = torch.stack(torch.meshgrid(
69
+ [torch.arange(s, device=device, dtype=dtype) for s in feat_shape])).flatten(1).transpose(0, 1)
70
+ pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0)
71
+ # FIXME add support for unflattened spatial dim?
72
+
73
+ stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos
74
+ pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1)
75
+ return pos_emb
76
+
77
+
78
+ def build_fourier_pos_embed(
79
+ feat_shape: List[int],
80
+ bands: Optional[torch.Tensor] = None,
81
+ num_bands: int = 64,
82
+ max_res: int = 224,
83
+ temperature: float = 10000.,
84
+ linear_bands: bool = False,
85
+ include_grid: bool = False,
86
+ in_pixels: bool = True,
87
+ ref_feat_shape: Optional[List[int]] = None,
88
+ dtype: torch.dtype = torch.float32,
89
+ device: Optional[torch.device] = None,
90
+ ) -> List[torch.Tensor]:
91
+ """
92
+
93
+ Args:
94
+ feat_shape: Feature shape for embedding.
95
+ bands: Pre-calculated frequency bands.
96
+ num_bands: Number of frequency bands (determines output dim).
97
+ max_res: Maximum resolution for pixel based freq.
98
+ temperature: Temperature for non-pixel freq.
99
+ linear_bands: Linear band spacing for pixel based freq.
100
+ include_grid: Include the spatial grid in output.
101
+ in_pixels: Output in pixel freq.
102
+ ref_feat_shape: Reference feature shape for resize / fine-tune.
103
+ dtype: Output dtype.
104
+ device: Output device.
105
+
106
+ Returns:
107
+
108
+ """
109
+ if bands is None:
110
+ if in_pixels:
111
+ bands = pixel_freq_bands(
112
+ num_bands,
113
+ float(max_res),
114
+ linear_bands=linear_bands,
115
+ dtype=dtype,
116
+ device=device,
117
+ )
118
+ else:
119
+ bands = freq_bands(
120
+ num_bands,
121
+ temperature=temperature,
122
+ step=1,
123
+ dtype=dtype,
124
+ device=device,
125
+ )
126
+ else:
127
+ if device is None:
128
+ device = bands.device
129
+ if dtype is None:
130
+ dtype = bands.dtype
131
+
132
+ if in_pixels:
133
+ t = [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in feat_shape]
134
+ else:
135
+ t = [torch.arange(s, device=device, dtype=dtype) for s in feat_shape]
136
+
137
+ if ref_feat_shape is not None:
138
+ # eva's scheme for resizing rope embeddings (ref shape = pretrain)
139
+ t = [x / f * r for x, f, r in zip(t, feat_shape, ref_feat_shape)]
140
+
141
+ grid = torch.stack(torch.meshgrid(t), dim=-1)
142
+ grid = grid.unsqueeze(-1)
143
+ pos = grid * bands
144
+
145
+ pos_sin, pos_cos = pos.sin(), pos.cos()
146
+ out = [grid, pos_sin, pos_cos] if include_grid else [pos_sin, pos_cos]
147
+ return out
148
+
149
+
150
+ class FourierEmbed(nn.Module):
151
+
152
+ def __init__(
153
+ self,
154
+ max_res: int = 224,
155
+ num_bands: int = 64,
156
+ concat_grid=True,
157
+ keep_spatial=False,
158
+ ):
159
+ super().__init__()
160
+ self.max_res = max_res
161
+ self.num_bands = num_bands
162
+ self.concat_grid = concat_grid
163
+ self.keep_spatial = keep_spatial
164
+ self.register_buffer(
165
+ 'bands',
166
+ pixel_freq_bands(max_res, num_bands),
167
+ persistent=False,
168
+ )
169
+
170
+ def forward(self, x):
171
+ B, C = x.shape[:2]
172
+ feat_shape = x.shape[2:]
173
+ emb = build_fourier_pos_embed(
174
+ feat_shape,
175
+ self.bands,
176
+ include_grid=self.concat_grid,
177
+ dtype=x.dtype,
178
+ device=x.device,
179
+ )
180
+ emb = torch.cat(emb, dim=-1)
181
+ emb = emb.transpose(-1, -2).flatten(len(feat_shape))
182
+ batch_expand = (B,) + (-1,) * (x.ndim - 1)
183
+
184
+ # FIXME support nD
185
+ if self.keep_spatial:
186
+ x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1)
187
+ else:
188
+ x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1)
189
+ x = x.reshape(B, feat_shape.numel(), -1)
190
+
191
+ return x
192
+
193
+
194
+ def rot(x):
195
+ return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape)
196
+
197
+
198
+ def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb):
199
+ if sin_emb.ndim == 3:
200
+ return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x)
201
+ return x * cos_emb + rot(x) * sin_emb
202
+
203
+
204
+ def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb):
205
+ if isinstance(x, torch.Tensor):
206
+ x = [x]
207
+ return [t * cos_emb + rot(t) * sin_emb for t in x]
208
+
209
+
210
+ def apply_rot_embed_cat(x: torch.Tensor, emb):
211
+ sin_emb, cos_emb = emb.tensor_split(2, -1)
212
+ if sin_emb.ndim == 3:
213
+ return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x)
214
+ return x * cos_emb + rot(x) * sin_emb
215
+
216
+
217
+ def apply_keep_indices_nlc(x, pos_embed, keep_indices):
218
+ pos_embed = pos_embed.unsqueeze(0).expand(x.shape[0], -1, -1)
219
+ pos_embed = pos_embed.gather(1, keep_indices.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1]))
220
+ return pos_embed
221
+
222
+
223
+ def build_rotary_pos_embed(
224
+ feat_shape: List[int],
225
+ bands: Optional[torch.Tensor] = None,
226
+ dim: int = 64,
227
+ max_res: int = 224,
228
+ temperature: float = 10000.,
229
+ linear_bands: bool = False,
230
+ in_pixels: bool = True,
231
+ ref_feat_shape: Optional[List[int]] = None,
232
+ dtype: torch.dtype = torch.float32,
233
+ device: Optional[torch.device] = None,
234
+ ):
235
+ """
236
+
237
+ Args:
238
+ feat_shape: Spatial shape of the target tensor for embedding.
239
+ bands: Optional pre-generated frequency bands
240
+ dim: Output dimension of embedding tensor.
241
+ max_res: Maximum resolution for pixel mode.
242
+ temperature: Temperature (inv freq) for non-pixel mode
243
+ linear_bands: Linearly (instead of log) spaced bands for pixel mode
244
+ in_pixels: Pixel vs language (inv freq) mode.
245
+ dtype: Output dtype.
246
+ device: Output device.
247
+
248
+ Returns:
249
+
250
+ """
251
+ sin_emb, cos_emb = build_fourier_pos_embed(
252
+ feat_shape,
253
+ bands=bands,
254
+ num_bands=dim // 4,
255
+ max_res=max_res,
256
+ temperature=temperature,
257
+ linear_bands=linear_bands,
258
+ in_pixels=in_pixels,
259
+ ref_feat_shape=ref_feat_shape,
260
+ device=device,
261
+ dtype=dtype,
262
+ )
263
+ num_spatial_dim = 1
264
+ # this would be much nicer as a .numel() call to torch.Size(), but torchscript sucks
265
+ for x in feat_shape:
266
+ num_spatial_dim *= x
267
+ sin_emb = sin_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1)
268
+ cos_emb = cos_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1)
269
+ return sin_emb, cos_emb
270
+
271
+
272
+ class RotaryEmbedding(nn.Module):
273
+ """ Rotary position embedding
274
+
275
+ NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not
276
+ been well tested, and will likely change. It will be moved to its own file.
277
+
278
+ The following impl/resources were referenced for this impl:
279
+ * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py
280
+ * https://blog.eleuther.ai/rotary-embeddings/
281
+ """
282
+
283
+ def __init__(
284
+ self,
285
+ dim,
286
+ max_res=224,
287
+ temperature=10000,
288
+ in_pixels=True,
289
+ linear_bands: bool = False,
290
+ feat_shape: Optional[List[int]] = None,
291
+ ref_feat_shape: Optional[List[int]] = None,
292
+ ):
293
+ super().__init__()
294
+ self.dim = dim
295
+ self.max_res = max_res
296
+ self.temperature = temperature
297
+ self.in_pixels = in_pixels
298
+ self.feat_shape = feat_shape
299
+ self.ref_feat_shape = ref_feat_shape
300
+
301
+ if feat_shape is None:
302
+ # only cache bands
303
+ if in_pixels:
304
+ bands = pixel_freq_bands(
305
+ dim // 4,
306
+ float(max_res),
307
+ linear_bands=linear_bands,
308
+ )
309
+ else:
310
+ bands = freq_bands(
311
+ dim // 4,
312
+ temperature=temperature,
313
+ step=1,
314
+ )
315
+ print(bands)
316
+ self.register_buffer(
317
+ 'bands',
318
+ bands,
319
+ persistent=False,
320
+ )
321
+ self.pos_embed_sin = None
322
+ self.pos_embed_cos = None
323
+ else:
324
+ # cache full sin/cos embeddings if shape provided up front
325
+ emb_sin, emb_cos = build_rotary_pos_embed(
326
+ feat_shape=feat_shape,
327
+ dim=dim,
328
+ max_res=max_res,
329
+ linear_bands=linear_bands,
330
+ in_pixels=in_pixels,
331
+ ref_feat_shape=self.ref_feat_shape,
332
+ )
333
+ self.bands = None
334
+ self.register_buffer(
335
+ 'pos_embed_sin',
336
+ emb_sin,
337
+ persistent=False,
338
+ )
339
+ self.register_buffer(
340
+ 'pos_embed_cos',
341
+ emb_cos,
342
+ persistent=False,
343
+ )
344
+
345
+ def get_embed(self, shape: Optional[List[int]] = None):
346
+ if self.bands is not None:
347
+ # rebuild embeddings every call, use if target shape changes
348
+ assert shape is not None
349
+ return build_rotary_pos_embed(
350
+ shape,
351
+ self.bands,
352
+ in_pixels=self.in_pixels,
353
+ )
354
+ else:
355
+ return self.pos_embed_sin, self.pos_embed_cos
356
+
357
+ def forward(self, x):
358
+ # assuming channel-first tensor where spatial dim are >= 2
359
+ sin_emb, cos_emb = self.get_embed(x.shape[2:])
360
+ return apply_rot_embed(x, sin_emb, cos_emb)
361
+
362
+
363
+ class RotaryEmbeddingCat(nn.Module):
364
+ """ Rotary position embedding w/ concatenatd sin & cos
365
+
366
+ The following impl/resources were referenced for this impl:
367
+ * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py
368
+ * https://blog.eleuther.ai/rotary-embeddings/
369
+ """
370
+
371
+ def __init__(
372
+ self,
373
+ dim,
374
+ max_res=224,
375
+ temperature=10000,
376
+ in_pixels=True,
377
+ linear_bands: bool = False,
378
+ feat_shape: Optional[List[int]] = None,
379
+ ref_feat_shape: Optional[List[int]] = None,
380
+ ):
381
+ super().__init__()
382
+ self.dim = dim
383
+ self.max_res = max_res
384
+ self.temperature = temperature
385
+ self.in_pixels = in_pixels
386
+ self.feat_shape = feat_shape
387
+ self.ref_feat_shape = ref_feat_shape
388
+
389
+ if feat_shape is None:
390
+ # only cache bands
391
+ if in_pixels:
392
+ bands = pixel_freq_bands(
393
+ dim // 4,
394
+ float(max_res),
395
+ linear_bands=linear_bands,
396
+ )
397
+ else:
398
+ bands = freq_bands(
399
+ dim // 4,
400
+ temperature=temperature,
401
+ step=1,
402
+ )
403
+ self.register_buffer(
404
+ 'bands',
405
+ bands,
406
+ persistent=False,
407
+ )
408
+ self.pos_embed = None
409
+ else:
410
+ # cache full sin/cos embeddings if shape provided up front
411
+ embeds = build_rotary_pos_embed(
412
+ feat_shape=feat_shape,
413
+ dim=dim,
414
+ max_res=max_res,
415
+ linear_bands=linear_bands,
416
+ in_pixels=in_pixels,
417
+ ref_feat_shape=self.ref_feat_shape,
418
+ )
419
+ self.bands = None
420
+ self.register_buffer(
421
+ 'pos_embed',
422
+ torch.cat(embeds, -1),
423
+ persistent=False,
424
+ )
425
+
426
+ def get_embed(self, shape: Optional[List[int]] = None):
427
+ if self.bands is not None and shape is not None:
428
+ # rebuild embeddings every call, use if target shape changes
429
+ embeds = build_rotary_pos_embed(
430
+ shape,
431
+ self.bands,
432
+ in_pixels=self.in_pixels,
433
+ ref_feat_shape=self.ref_feat_shape,
434
+ )
435
+ return torch.cat(embeds, -1)
436
+ elif self.pos_embed is not None:
437
+ return self.pos_embed
438
+ else:
439
+ assert False, "get_embed() requires pre-computed pos_embed or valid shape w/ pre-computed bands"
440
+
441
+ def forward(self, x):
442
+ # assuming channel-first tensor where spatial dim are >= 2
443
+ pos_embed = self.get_embed(x.shape[2:])
444
+ return apply_rot_embed_cat(x, pos_embed)
parrot/lib/python3.10/site-packages/timm/layers/split_batchnorm.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Split BatchNorm
2
+
3
+ A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through
4
+ a separate BN layer. The first split is passed through the parent BN layers with weight/bias
5
+ keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn'
6
+ namespace.
7
+
8
+ This allows easily removing the auxiliary BN layers after training to efficiently
9
+ achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2,
10
+ 'Disentangled Learning via An Auxiliary BN'
11
+
12
+ Hacked together by / Copyright 2020 Ross Wightman
13
+ """
14
+ import torch
15
+ import torch.nn as nn
16
+
17
+
18
+ class SplitBatchNorm2d(torch.nn.BatchNorm2d):
19
+
20
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
21
+ track_running_stats=True, num_splits=2):
22
+ super().__init__(num_features, eps, momentum, affine, track_running_stats)
23
+ assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)'
24
+ self.num_splits = num_splits
25
+ self.aux_bn = nn.ModuleList([
26
+ nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)])
27
+
28
+ def forward(self, input: torch.Tensor):
29
+ if self.training: # aux BN only relevant while training
30
+ split_size = input.shape[0] // self.num_splits
31
+ assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits"
32
+ split_input = input.split(split_size)
33
+ x = [super().forward(split_input[0])]
34
+ for i, a in enumerate(self.aux_bn):
35
+ x.append(a(split_input[i + 1]))
36
+ return torch.cat(x, dim=0)
37
+ else:
38
+ return super().forward(input)
39
+
40
+
41
+ def convert_splitbn_model(module, num_splits=2):
42
+ """
43
+ Recursively traverse module and its children to replace all instances of
44
+ ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`.
45
+ Args:
46
+ module (torch.nn.Module): input module
47
+ num_splits: number of separate batchnorm layers to split input across
48
+ Example::
49
+ >>> # model is an instance of torch.nn.Module
50
+ >>> model = timm.models.convert_splitbn_model(model, num_splits=2)
51
+ """
52
+ mod = module
53
+ if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):
54
+ return module
55
+ if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
56
+ mod = SplitBatchNorm2d(
57
+ module.num_features, module.eps, module.momentum, module.affine,
58
+ module.track_running_stats, num_splits=num_splits)
59
+ mod.running_mean = module.running_mean
60
+ mod.running_var = module.running_var
61
+ mod.num_batches_tracked = module.num_batches_tracked
62
+ if module.affine:
63
+ mod.weight.data = module.weight.data.clone().detach()
64
+ mod.bias.data = module.bias.data.clone().detach()
65
+ for aux in mod.aux_bn:
66
+ aux.running_mean = module.running_mean.clone()
67
+ aux.running_var = module.running_var.clone()
68
+ aux.num_batches_tracked = module.num_batches_tracked.clone()
69
+ if module.affine:
70
+ aux.weight.data = module.weight.data.clone().detach()
71
+ aux.bias.data = module.bias.data.clone().detach()
72
+ for name, child in module.named_children():
73
+ mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits))
74
+ del module
75
+ return mod
parrot/lib/python3.10/site-packages/timm/layers/trace_utils.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from torch import _assert
3
+ except ImportError:
4
+ def _assert(condition: bool, message: str):
5
+ assert condition, message
6
+
7
+
8
+ def _float_to_int(x: float) -> int:
9
+ """
10
+ Symbolic tracing helper to substitute for inbuilt `int`.
11
+ Hint: Inbuilt `int` can't accept an argument of type `Proxy`
12
+ """
13
+ return int(x)
parrot/lib/python3.10/site-packages/timm/optim/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .adabelief import AdaBelief
2
+ from .adafactor import Adafactor
3
+ from .adahessian import Adahessian
4
+ from .adamp import AdamP
5
+ from .adamw import AdamW
6
+ from .adan import Adan
7
+ from .lamb import Lamb
8
+ from .lars import Lars
9
+ from .lookahead import Lookahead
10
+ from .madgrad import MADGRAD
11
+ from .nadam import Nadam
12
+ from .nvnovograd import NvNovoGrad
13
+ from .radam import RAdam
14
+ from .rmsprop_tf import RMSpropTF
15
+ from .sgdp import SGDP
16
+ from .lion import Lion
17
+ from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (880 Bytes). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/adabelief.cpython-310.pyc ADDED
Binary file (6.52 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/adafactor.cpython-310.pyc ADDED
Binary file (5.42 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/adamp.cpython-310.pyc ADDED
Binary file (3.14 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/lars.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/lion.cpython-310.pyc ADDED
Binary file (5.11 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc ADDED
Binary file (2.65 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/madgrad.cpython-310.pyc ADDED
Binary file (4.95 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc ADDED
Binary file (3.24 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/nadamw.cpython-310.pyc ADDED
Binary file (8.59 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/nvnovograd.cpython-310.pyc ADDED
Binary file (3.8 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/rmsprop_tf.cpython-310.pyc ADDED
Binary file (4.71 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/__pycache__/sgdp.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
parrot/lib/python3.10/site-packages/timm/optim/adabelief.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch.optim.optimizer import Optimizer
4
+
5
+
6
+ class AdaBelief(Optimizer):
7
+ r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch
8
+
9
+ Arguments:
10
+ params (iterable): iterable of parameters to optimize or dicts defining
11
+ parameter groups
12
+ lr (float, optional): learning rate (default: 1e-3)
13
+ betas (Tuple[float, float], optional): coefficients used for computing
14
+ running averages of gradient and its square (default: (0.9, 0.999))
15
+ eps (float, optional): term added to the denominator to improve
16
+ numerical stability (default: 1e-16)
17
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
18
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
19
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
20
+ (default: False)
21
+ decoupled_decay (boolean, optional): (default: True) If set as True, then
22
+ the optimizer uses decoupled weight decay as in AdamW
23
+ fixed_decay (boolean, optional): (default: False) This is used when weight_decouple
24
+ is set as True.
25
+ When fixed_decay == True, the weight decay is performed as
26
+ $W_{new} = W_{old} - W_{old} \times decay$.
27
+ When fixed_decay == False, the weight decay is performed as
28
+ $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the
29
+ weight decay ratio decreases with learning rate (lr).
30
+ rectify (boolean, optional): (default: True) If set as True, then perform the rectified
31
+ update similar to RAdam
32
+ degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update
33
+ when variance of gradient is high
34
+ reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020
35
+
36
+ For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer'
37
+ For example train/args for EfficientNet see these gists
38
+ - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037
39
+ - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3
40
+ """
41
+
42
+ def __init__(
43
+ self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False,
44
+ decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True):
45
+
46
+ if not 0.0 <= lr:
47
+ raise ValueError("Invalid learning rate: {}".format(lr))
48
+ if not 0.0 <= eps:
49
+ raise ValueError("Invalid epsilon value: {}".format(eps))
50
+ if not 0.0 <= betas[0] < 1.0:
51
+ raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
52
+ if not 0.0 <= betas[1] < 1.0:
53
+ raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
54
+
55
+ if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
56
+ for param in params:
57
+ if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
58
+ param['buffer'] = [[None, None, None] for _ in range(10)]
59
+
60
+ defaults = dict(
61
+ lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad,
62
+ degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify,
63
+ fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)])
64
+ super(AdaBelief, self).__init__(params, defaults)
65
+
66
+ def __setstate__(self, state):
67
+ super(AdaBelief, self).__setstate__(state)
68
+ for group in self.param_groups:
69
+ group.setdefault('amsgrad', False)
70
+
71
+ @torch.no_grad()
72
+ def reset(self):
73
+ for group in self.param_groups:
74
+ for p in group['params']:
75
+ state = self.state[p]
76
+ amsgrad = group['amsgrad']
77
+
78
+ # State initialization
79
+ state['step'] = 0
80
+ # Exponential moving average of gradient values
81
+ state['exp_avg'] = torch.zeros_like(p)
82
+
83
+ # Exponential moving average of squared gradient values
84
+ state['exp_avg_var'] = torch.zeros_like(p)
85
+ if amsgrad:
86
+ # Maintains max of all exp. moving avg. of sq. grad. values
87
+ state['max_exp_avg_var'] = torch.zeros_like(p)
88
+
89
+ @torch.no_grad()
90
+ def step(self, closure=None):
91
+ """Performs a single optimization step.
92
+ Arguments:
93
+ closure (callable, optional): A closure that reevaluates the model
94
+ and returns the loss.
95
+ """
96
+ loss = None
97
+ if closure is not None:
98
+ with torch.enable_grad():
99
+ loss = closure()
100
+
101
+ for group in self.param_groups:
102
+ for p in group['params']:
103
+ if p.grad is None:
104
+ continue
105
+ grad = p.grad
106
+ if grad.dtype in {torch.float16, torch.bfloat16}:
107
+ grad = grad.float()
108
+ if grad.is_sparse:
109
+ raise RuntimeError(
110
+ 'AdaBelief does not support sparse gradients, please consider SparseAdam instead')
111
+
112
+ p_fp32 = p
113
+ if p.dtype in {torch.float16, torch.bfloat16}:
114
+ p_fp32 = p_fp32.float()
115
+
116
+ amsgrad = group['amsgrad']
117
+ beta1, beta2 = group['betas']
118
+ state = self.state[p]
119
+ # State initialization
120
+ if len(state) == 0:
121
+ state['step'] = 0
122
+ # Exponential moving average of gradient values
123
+ state['exp_avg'] = torch.zeros_like(p_fp32)
124
+ # Exponential moving average of squared gradient values
125
+ state['exp_avg_var'] = torch.zeros_like(p_fp32)
126
+ if amsgrad:
127
+ # Maintains max of all exp. moving avg. of sq. grad. values
128
+ state['max_exp_avg_var'] = torch.zeros_like(p_fp32)
129
+
130
+ # perform weight decay, check if decoupled weight decay
131
+ if group['decoupled_decay']:
132
+ if not group['fixed_decay']:
133
+ p_fp32.mul_(1.0 - group['lr'] * group['weight_decay'])
134
+ else:
135
+ p_fp32.mul_(1.0 - group['weight_decay'])
136
+ else:
137
+ if group['weight_decay'] != 0:
138
+ grad.add_(p_fp32, alpha=group['weight_decay'])
139
+
140
+ # get current state variable
141
+ exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
142
+
143
+ state['step'] += 1
144
+ bias_correction1 = 1 - beta1 ** state['step']
145
+ bias_correction2 = 1 - beta2 ** state['step']
146
+
147
+ # Update first and second moment running average
148
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
149
+ grad_residual = grad - exp_avg
150
+ exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2)
151
+
152
+ if amsgrad:
153
+ max_exp_avg_var = state['max_exp_avg_var']
154
+ # Maintains the maximum of all 2nd moment running avg. till now
155
+ torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var)
156
+
157
+ # Use the max. for normalizing running avg. of gradient
158
+ denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
159
+ else:
160
+ denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
161
+
162
+ # update
163
+ if not group['rectify']:
164
+ # Default update
165
+ step_size = group['lr'] / bias_correction1
166
+ p_fp32.addcdiv_(exp_avg, denom, value=-step_size)
167
+ else:
168
+ # Rectified update, forked from RAdam
169
+ buffered = group['buffer'][int(state['step'] % 10)]
170
+ if state['step'] == buffered[0]:
171
+ num_sma, step_size = buffered[1], buffered[2]
172
+ else:
173
+ buffered[0] = state['step']
174
+ beta2_t = beta2 ** state['step']
175
+ num_sma_max = 2 / (1 - beta2) - 1
176
+ num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
177
+ buffered[1] = num_sma
178
+
179
+ # more conservative since it's an approximated value
180
+ if num_sma >= 5:
181
+ step_size = math.sqrt(
182
+ (1 - beta2_t) *
183
+ (num_sma - 4) / (num_sma_max - 4) *
184
+ (num_sma - 2) / num_sma *
185
+ num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step'])
186
+ elif group['degenerated_to_sgd']:
187
+ step_size = 1.0 / (1 - beta1 ** state['step'])
188
+ else:
189
+ step_size = -1
190
+ buffered[2] = step_size
191
+
192
+ if num_sma >= 5:
193
+ denom = exp_avg_var.sqrt().add_(group['eps'])
194
+ p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr'])
195
+ elif step_size > 0:
196
+ p_fp32.add_(exp_avg, alpha=-step_size * group['lr'])
197
+
198
+ if p.dtype in {torch.float16, torch.bfloat16}:
199
+ p.copy_(p_fp32)
200
+
201
+ return loss
parrot/lib/python3.10/site-packages/timm/optim/adafactor.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Adafactor Optimizer
2
+
3
+ Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py
4
+
5
+ Original header/copyright below.
6
+
7
+ """
8
+ # Copyright (c) Facebook, Inc. and its affiliates.
9
+ #
10
+ # This source code is licensed under the MIT license found in the
11
+ # LICENSE file in the root directory of this source tree.
12
+ import torch
13
+ import math
14
+
15
+
16
+ class Adafactor(torch.optim.Optimizer):
17
+ """Implements Adafactor algorithm.
18
+ This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
19
+ (see https://arxiv.org/abs/1804.04235)
20
+
21
+ Note that this optimizer internally adjusts the learning rate depending on the
22
+ *scale_parameter*, *relative_step* and *warmup_init* options.
23
+
24
+ To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
25
+ `relative_step=False`.
26
+
27
+ Arguments:
28
+ params (iterable): iterable of parameters to optimize or dicts defining parameter groups
29
+ lr (float, optional): external learning rate (default: None)
30
+ eps (tuple[float, float]): regularization constants for square gradient
31
+ and parameter scale respectively (default: (1e-30, 1e-3))
32
+ clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0)
33
+ decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8)
34
+ beta1 (float): coefficient used for computing running averages of gradient (default: None)
35
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
36
+ scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True)
37
+ warmup_init (bool): time-dependent learning rate computation depends on
38
+ whether warm-up initialization is being used (default: False)
39
+ """
40
+
41
+ def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0,
42
+ decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):
43
+ relative_step = not lr
44
+ if warmup_init and not relative_step:
45
+ raise ValueError('warmup_init requires relative_step=True')
46
+
47
+ beta1 = None if betas is None else betas[0] # make it compat with standard betas arg
48
+ defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate,
49
+ beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,
50
+ relative_step=relative_step, warmup_init=warmup_init)
51
+ super(Adafactor, self).__init__(params, defaults)
52
+
53
+ @staticmethod
54
+ def _get_lr(param_group, param_state):
55
+ if param_group['relative_step']:
56
+ min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2
57
+ lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))
58
+ param_scale = 1.0
59
+ if param_group['scale_parameter']:
60
+ param_scale = max(param_group['eps_scale'], param_state['RMS'])
61
+ param_group['lr'] = lr_t * param_scale
62
+ return param_group['lr']
63
+
64
+ @staticmethod
65
+ def _get_options(param_group, param_shape):
66
+ factored = len(param_shape) >= 2
67
+ use_first_moment = param_group['beta1'] is not None
68
+ return factored, use_first_moment
69
+
70
+ @staticmethod
71
+ def _rms(tensor):
72
+ return tensor.norm(2) / (tensor.numel() ** 0.5)
73
+
74
+ def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
75
+ r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
76
+ c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
77
+ return torch.mul(r_factor, c_factor)
78
+
79
+ @torch.no_grad()
80
+ def step(self, closure=None):
81
+ """Performs a single optimization step.
82
+ Arguments:
83
+ closure (callable, optional): A closure that reevaluates the model and returns the loss.
84
+ """
85
+ loss = None
86
+ if closure is not None:
87
+ with torch.enable_grad():
88
+ loss = closure()
89
+
90
+ for group in self.param_groups:
91
+ for p in group['params']:
92
+ if p.grad is None:
93
+ continue
94
+ grad = p.grad
95
+ if grad.dtype in {torch.float16, torch.bfloat16}:
96
+ grad = grad.float()
97
+ if grad.is_sparse:
98
+ raise RuntimeError('Adafactor does not support sparse gradients.')
99
+
100
+ state = self.state[p]
101
+
102
+ factored, use_first_moment = self._get_options(group, grad.shape)
103
+ # State Initialization
104
+ if len(state) == 0:
105
+ state['step'] = 0
106
+
107
+ if use_first_moment:
108
+ # Exponential moving average of gradient values
109
+ state['exp_avg'] = torch.zeros_like(grad)
110
+ if factored:
111
+ state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad)
112
+ state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad)
113
+ else:
114
+ state['exp_avg_sq'] = torch.zeros_like(grad)
115
+
116
+ state['RMS'] = 0
117
+ else:
118
+ if use_first_moment:
119
+ state['exp_avg'] = state['exp_avg'].to(grad)
120
+ if factored:
121
+ state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
122
+ state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
123
+ else:
124
+ state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
125
+
126
+ p_fp32 = p
127
+ if p.dtype in {torch.float16, torch.bfloat16}:
128
+ p_fp32 = p_fp32.float()
129
+
130
+ state['step'] += 1
131
+ state['RMS'] = self._rms(p_fp32)
132
+ lr_t = self._get_lr(group, state)
133
+
134
+ beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
135
+ update = grad ** 2 + group['eps']
136
+ if factored:
137
+ exp_avg_sq_row = state['exp_avg_sq_row']
138
+ exp_avg_sq_col = state['exp_avg_sq_col']
139
+
140
+ exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t)
141
+ exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)
142
+
143
+ # Approximation of exponential moving average of square of gradient
144
+ update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
145
+ update.mul_(grad)
146
+ else:
147
+ exp_avg_sq = state['exp_avg_sq']
148
+
149
+ exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
150
+ update = exp_avg_sq.rsqrt().mul_(grad)
151
+
152
+ update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
153
+ update.mul_(lr_t)
154
+
155
+ if use_first_moment:
156
+ exp_avg = state['exp_avg']
157
+ exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1'])
158
+ update = exp_avg
159
+
160
+ if group['weight_decay'] != 0:
161
+ p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t)
162
+
163
+ p_fp32.add_(-update)
164
+ if p.dtype in {torch.float16, torch.bfloat16}:
165
+ p.copy_(p_fp32)
166
+
167
+ return loss