ZTWHHH commited on
Commit
f7307d9
·
verified ·
1 Parent(s): 9cbb62b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/INSTALLER +1 -0
  3. evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/LICENSE +201 -0
  4. evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/METADATA +124 -0
  5. evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/RECORD +22 -0
  6. evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/REQUESTED +0 -0
  7. evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/WHEEL +6 -0
  8. evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/top_level.txt +1 -0
  9. evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/INSTALLER +1 -0
  10. evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/LICENSE +20 -0
  11. evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/METADATA +150 -0
  12. evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/RECORD +13 -0
  13. evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/REQUESTED +0 -0
  14. evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/WHEEL +5 -0
  15. evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/top_level.txt +1 -0
  16. evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/INSTALLER +1 -0
  17. evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/License.txt +31 -0
  18. evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/METADATA +35 -0
  19. evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/RECORD +17 -0
  20. evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/WHEEL +5 -0
  21. evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/top_level.txt +1 -0
  22. evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/INSTALLER +1 -0
  23. evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/LICENSE +13 -0
  24. evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/METADATA +106 -0
  25. evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/RECORD +42 -0
  26. evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/REQUESTED +0 -0
  27. evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/WHEEL +4 -0
  28. evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/entry_points.txt +8 -0
  29. evalkit_internvl/lib/python3.10/site-packages/safetensors/__init__.py +9 -0
  30. evalkit_internvl/lib/python3.10/site-packages/safetensors/__init__.pyi +73 -0
  31. evalkit_internvl/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc +0 -0
  32. evalkit_internvl/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc +0 -0
  33. evalkit_internvl/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc +0 -0
  34. evalkit_internvl/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc +0 -0
  35. evalkit_internvl/lib/python3.10/site-packages/safetensors/flax.py +138 -0
  36. evalkit_internvl/lib/python3.10/site-packages/safetensors/mlx.py +138 -0
  37. evalkit_internvl/lib/python3.10/site-packages/safetensors/numpy.py +176 -0
  38. evalkit_internvl/lib/python3.10/site-packages/safetensors/paddle.py +138 -0
  39. evalkit_internvl/lib/python3.10/site-packages/safetensors/py.typed +0 -0
  40. evalkit_internvl/lib/python3.10/site-packages/safetensors/tensorflow.py +137 -0
  41. evalkit_internvl/lib/python3.10/site-packages/safetensors/torch.py +503 -0
  42. evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-310.pyc +3 -0
  43. evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.cu +383 -0
  44. evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.h +59 -0
  45. evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.cu +154 -0
  46. evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.h +39 -0
  47. evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/torch_extension.cpp +78 -0
  48. evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h +9 -0
  49. evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h +79 -0
  50. evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu +588 -0
.gitattributes CHANGED
@@ -1603,3 +1603,4 @@ evalkit_cambrian/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-bui
1603
  evalkit_internvl/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1604
  evalkit_internvl/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1605
  evalkit_internvl/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1603
  evalkit_internvl/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1604
  evalkit_internvl/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1605
  evalkit_internvl/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1606
+ evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "{}"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright {yyyy} {name of copyright owner}
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/METADATA ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: asttokens
3
+ Version: 2.4.1
4
+ Summary: Annotate AST trees with source code positions
5
+ Home-page: https://github.com/gristlabs/asttokens
6
+ Author: Dmitry Sagalovskiy, Grist Labs
7
+ Author-email: dmitry@getgrist.com
8
+ License: Apache 2.0
9
+ Keywords: code,ast,parse,tokenize,refactor
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
13
+ Classifier: Topic :: Software Development :: Code Generators
14
+ Classifier: Topic :: Software Development :: Compilers
15
+ Classifier: Topic :: Software Development :: Interpreters
16
+ Classifier: Topic :: Software Development :: Pre-processors
17
+ Classifier: Environment :: Console
18
+ Classifier: Operating System :: OS Independent
19
+ Classifier: Programming Language :: Python :: 2
20
+ Classifier: Programming Language :: Python :: 2.7
21
+ Classifier: Programming Language :: Python :: 3
22
+ Classifier: Programming Language :: Python :: 3.5
23
+ Classifier: Programming Language :: Python :: 3.6
24
+ Classifier: Programming Language :: Python :: 3.7
25
+ Classifier: Programming Language :: Python :: 3.8
26
+ Classifier: Programming Language :: Python :: 3.9
27
+ Classifier: Programming Language :: Python :: 3.10
28
+ Classifier: Programming Language :: Python :: 3.11
29
+ Classifier: Programming Language :: Python :: 3.12
30
+ Classifier: Programming Language :: Python :: Implementation :: CPython
31
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
32
+ License-File: LICENSE
33
+ Requires-Dist: six >=1.12.0
34
+ Requires-Dist: typing ; python_version < "3.5"
35
+ Provides-Extra: astroid
36
+ Requires-Dist: astroid <2,>=1 ; (python_version < "3") and extra == 'astroid'
37
+ Requires-Dist: astroid <4,>=2 ; (python_version >= "3") and extra == 'astroid'
38
+ Provides-Extra: test
39
+ Requires-Dist: pytest ; extra == 'test'
40
+ Requires-Dist: astroid <2,>=1 ; (python_version < "3") and extra == 'test'
41
+ Requires-Dist: astroid <4,>=2 ; (python_version >= "3") and extra == 'test'
42
+
43
+ ASTTokens
44
+ =========
45
+
46
+ .. image:: https://img.shields.io/pypi/v/asttokens.svg
47
+ :target: https://pypi.python.org/pypi/asttokens/
48
+ .. image:: https://img.shields.io/pypi/pyversions/asttokens.svg
49
+ :target: https://pypi.python.org/pypi/asttokens/
50
+ .. image:: https://github.com/gristlabs/asttokens/actions/workflows/build-and-test.yml/badge.svg
51
+ :target: https://github.com/gristlabs/asttokens/actions/workflows/build-and-test.yml
52
+ .. image:: https://readthedocs.org/projects/asttokens/badge/?version=latest
53
+ :target: http://asttokens.readthedocs.io/en/latest/index.html
54
+ .. image:: https://coveralls.io/repos/github/gristlabs/asttokens/badge.svg
55
+ :target: https://coveralls.io/github/gristlabs/asttokens
56
+
57
+ .. Start of user-guide
58
+
59
+ The ``asttokens`` module annotates Python abstract syntax trees (ASTs) with the positions of tokens
60
+ and text in the source code that generated them.
61
+
62
+ It makes it possible for tools that work with logical AST nodes to find the particular text that
63
+ resulted in those nodes, for example for automated refactoring or highlighting.
64
+
65
+ Installation
66
+ ------------
67
+ asttokens is available on PyPI: https://pypi.python.org/pypi/asttokens/::
68
+
69
+ pip install asttokens
70
+
71
+ The code is on GitHub: https://github.com/gristlabs/asttokens.
72
+
73
+ The API Reference is here: http://asttokens.readthedocs.io/en/latest/api-index.html.
74
+
75
+ Usage
76
+ -----
77
+ ASTTokens works with both Python2 and Python3.
78
+
79
+ ASTTokens can annotate both trees built by `ast <https://docs.python.org/2/library/ast.html>`_,
80
+ AND those built by `astroid <https://github.com/PyCQA/astroid>`_.
81
+
82
+ Here's an example:
83
+
84
+ .. code-block:: python
85
+
86
+ import asttokens, ast
87
+ source = "Robot('blue').walk(steps=10*n)"
88
+ atok = asttokens.ASTTokens(source, parse=True)
89
+
90
+ Once the tree has been marked, nodes get ``.first_token``, ``.last_token`` attributes, and
91
+ the ``ASTTokens`` object offers helpful methods:
92
+
93
+ .. code-block:: python
94
+
95
+ attr_node = next(n for n in ast.walk(atok.tree) if isinstance(n, ast.Attribute))
96
+ print(atok.get_text(attr_node))
97
+ start, end = attr_node.last_token.startpos, attr_node.last_token.endpos
98
+ print(atok.text[:start] + 'RUN' + atok.text[end:])
99
+
100
+ Which produces this output:
101
+
102
+ .. code-block:: text
103
+
104
+ Robot('blue').walk
105
+ Robot('blue').RUN(steps=10*n)
106
+
107
+ The ``ASTTokens`` object also offers methods to walk and search the list of tokens that make up
108
+ the code (or a particular AST node), which is more useful and powerful than dealing with the text
109
+ directly.
110
+
111
+
112
+ Contribute
113
+ ----------
114
+
115
+ To contribute:
116
+
117
+ 1. Fork this repository, and clone your fork.
118
+ 2. Install the package with test dependencies (ideally in a virtualenv) with::
119
+
120
+ pip install -e '.[test]'
121
+
122
+ 3. Run tests in your current interpreter with the command ``pytest`` or ``python -m pytest``.
123
+ 4. Run tests across all supported interpreters with the ``tox`` command. You will need to have the interpreters installed separately. We recommend ``pyenv`` for that. Use ``tox -p auto`` to run the tests in parallel.
124
+ 5. By default certain tests which take a very long time to run are skipped, but they are run on travis CI. To run them locally, set the environment variable ``ASTTOKENS_SLOW_TESTS``. For example run ``ASTTOKENS_SLOW_TESTS=1 tox`` to run the full suite of tests.
evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/RECORD ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ asttokens-2.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ asttokens-2.4.1.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
3
+ asttokens-2.4.1.dist-info/METADATA,sha256=NVktxMNmzWSV0jf8-LgkKQZ2w7HmHI_4ZHcuLTg6y-A,5197
4
+ asttokens-2.4.1.dist-info/RECORD,,
5
+ asttokens-2.4.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ asttokens-2.4.1.dist-info/WHEEL,sha256=iYlv5fX357PQyRT2o6tw1bN-YcKFFHKqB_LwHO5wP-g,110
7
+ asttokens-2.4.1.dist-info/top_level.txt,sha256=nJDweSD7_NBhOlR3c8bkKJMKM-pxlAS8Kyh8GcCT2dk,10
8
+ asttokens/__init__.py,sha256=8eONA3X-9s93-v-2gEoz4649fDUpvzBthFB5Ld7dHAg,962
9
+ asttokens/__pycache__/__init__.cpython-310.pyc,,
10
+ asttokens/__pycache__/astroid_compat.cpython-310.pyc,,
11
+ asttokens/__pycache__/asttokens.cpython-310.pyc,,
12
+ asttokens/__pycache__/line_numbers.cpython-310.pyc,,
13
+ asttokens/__pycache__/mark_tokens.cpython-310.pyc,,
14
+ asttokens/__pycache__/util.cpython-310.pyc,,
15
+ asttokens/__pycache__/version.cpython-310.pyc,,
16
+ asttokens/astroid_compat.py,sha256=ilaVBRWcHpQ3ZLBSBs9usUwnLW3Orfn6sM89cMN8zNI,586
17
+ asttokens/asttokens.py,sha256=WIExmOOKNK4OMzCwgmFKK7pJSvp90a40zf27_Ht03W4,18867
18
+ asttokens/line_numbers.py,sha256=z3E38XvQaocXm_5MW8-jimFr-In5iMExFkmLPHBxenY,2842
19
+ asttokens/mark_tokens.py,sha256=Yw9sNJ8BgQ7BVohzKjCAuSowj2fT4tVrEnby9D4g0gA,22956
20
+ asttokens/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
+ asttokens/util.py,sha256=VzwdnLd_ZLc89mt6BBPGkDhdWKhNRfuFTJnFOVzC5_Q,17889
22
+ asttokens/version.py,sha256=LgDSW5laOqA_7i2VW0cZ9QumZREigUxs3ZCBzJ1EG0o,22
evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/REQUESTED ADDED
File without changes
evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.41.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
6
+
evalkit_internvl/lib/python3.10/site-packages/asttokens-2.4.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ asttokens
evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/LICENSE ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2014-2024 Thomas Kemmer
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
6
+ this software and associated documentation files (the "Software"), to deal in
7
+ the Software without restriction, including without limitation the rights to
8
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9
+ the Software, and to permit persons to whom the Software is furnished to do so,
10
+ subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/METADATA ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: cachetools
3
+ Version: 5.5.0
4
+ Summary: Extensible memoizing collections and decorators
5
+ Home-page: https://github.com/tkem/cachetools/
6
+ Author: Thomas Kemmer
7
+ Author-email: tkemmer@computer.org
8
+ License: MIT
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Environment :: Other Environment
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Operating System :: OS Independent
14
+ Classifier: Programming Language :: Python
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.7
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
+ Requires-Python: >=3.7
24
+ License-File: LICENSE
25
+
26
+ cachetools
27
+ ========================================================================
28
+
29
+ .. image:: https://img.shields.io/pypi/v/cachetools
30
+ :target: https://pypi.org/project/cachetools/
31
+ :alt: Latest PyPI version
32
+
33
+ .. image:: https://img.shields.io/github/actions/workflow/status/tkem/cachetools/ci.yml
34
+ :target: https://github.com/tkem/cachetools/actions/workflows/ci.yml
35
+ :alt: CI build status
36
+
37
+ .. image:: https://img.shields.io/readthedocs/cachetools
38
+ :target: https://cachetools.readthedocs.io/
39
+ :alt: Documentation build status
40
+
41
+ .. image:: https://img.shields.io/codecov/c/github/tkem/cachetools/master.svg
42
+ :target: https://codecov.io/gh/tkem/cachetools
43
+ :alt: Test coverage
44
+
45
+ .. image:: https://img.shields.io/librariesio/sourcerank/pypi/cachetools
46
+ :target: https://libraries.io/pypi/cachetools
47
+ :alt: Libraries.io SourceRank
48
+
49
+ .. image:: https://img.shields.io/github/license/tkem/cachetools
50
+ :target: https://raw.github.com/tkem/cachetools/master/LICENSE
51
+ :alt: License
52
+
53
+ .. image:: https://img.shields.io/badge/code%20style-black-000000.svg
54
+ :target: https://github.com/psf/black
55
+ :alt: Code style: black
56
+
57
+
58
+ This module provides various memoizing collections and decorators,
59
+ including variants of the Python Standard Library's `@lru_cache`_
60
+ function decorator.
61
+
62
+ .. code-block:: python
63
+
64
+ from cachetools import cached, LRUCache, TTLCache
65
+
66
+ # speed up calculating Fibonacci numbers with dynamic programming
67
+ @cached(cache={})
68
+ def fib(n):
69
+ return n if n < 2 else fib(n - 1) + fib(n - 2)
70
+
71
+ # cache least recently used Python Enhancement Proposals
72
+ @cached(cache=LRUCache(maxsize=32))
73
+ def get_pep(num):
74
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
75
+ with urllib.request.urlopen(url) as s:
76
+ return s.read()
77
+
78
+ # cache weather data for no longer than ten minutes
79
+ @cached(cache=TTLCache(maxsize=1024, ttl=600))
80
+ def get_weather(place):
81
+ return owm.weather_at_place(place).get_weather()
82
+
83
+ For the purpose of this module, a *cache* is a mutable_ mapping_ of a
84
+ fixed maximum size. When the cache is full, i.e. by adding another
85
+ item the cache would exceed its maximum size, the cache must choose
86
+ which item(s) to discard based on a suitable `cache algorithm`_.
87
+
88
+ This module provides multiple cache classes based on different cache
89
+ algorithms, as well as decorators for easily memoizing function and
90
+ method calls.
91
+
92
+
93
+ Installation
94
+ ------------------------------------------------------------------------
95
+
96
+ cachetools is available from PyPI_ and can be installed by running::
97
+
98
+ pip install cachetools
99
+
100
+ Typing stubs for this package are provided by typeshed_ and can be
101
+ installed by running::
102
+
103
+ pip install types-cachetools
104
+
105
+
106
+ Project Resources
107
+ ------------------------------------------------------------------------
108
+
109
+ - `Documentation`_
110
+ - `Issue tracker`_
111
+ - `Source code`_
112
+ - `Change log`_
113
+
114
+
115
+ Related Projects
116
+ ------------------------------------------------------------------------
117
+
118
+ - asyncache_: Helpers to use cachetools with async functions
119
+ - cacheing_: Pure Python Cacheing Library
120
+ - CacheToolsUtils_: Cachetools Utilities
121
+ - kids.cache_: Kids caching library
122
+ - shelved-cache_: Persistent cache for Python cachetools
123
+
124
+
125
+ License
126
+ ------------------------------------------------------------------------
127
+
128
+ Copyright (c) 2014-2024 Thomas Kemmer.
129
+
130
+ Licensed under the `MIT License`_.
131
+
132
+
133
+ .. _@lru_cache: https://docs.python.org/3/library/functools.html#functools.lru_cache
134
+ .. _mutable: https://docs.python.org/dev/glossary.html#term-mutable
135
+ .. _mapping: https://docs.python.org/dev/glossary.html#term-mapping
136
+ .. _cache algorithm: https://en.wikipedia.org/wiki/Cache_algorithms
137
+
138
+ .. _PyPI: https://pypi.org/project/cachetools/
139
+ .. _typeshed: https://github.com/python/typeshed/
140
+ .. _Documentation: https://cachetools.readthedocs.io/
141
+ .. _Issue tracker: https://github.com/tkem/cachetools/issues/
142
+ .. _Source code: https://github.com/tkem/cachetools/
143
+ .. _Change log: https://github.com/tkem/cachetools/blob/master/CHANGELOG.rst
144
+ .. _MIT License: https://raw.github.com/tkem/cachetools/master/LICENSE
145
+
146
+ .. _asyncache: https://pypi.org/project/asyncache/
147
+ .. _cacheing: https://github.com/breid48/cacheing
148
+ .. _CacheToolsUtils: https://pypi.org/project/CacheToolsUtils/
149
+ .. _kids.cache: https://pypi.org/project/kids.cache/
150
+ .. _shelved-cache: https://pypi.org/project/shelved-cache/
evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/RECORD ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cachetools-5.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ cachetools-5.5.0.dist-info/LICENSE,sha256=L00v8F8Fxdo4efQCkrdgAzLXddx-0yDUPdQvPNfZLJs,1085
3
+ cachetools-5.5.0.dist-info/METADATA,sha256=M3uxLfHUouQRjhEU0_g6gvWBGUQwHYZ3MtAtkCT6Rto,5328
4
+ cachetools-5.5.0.dist-info/RECORD,,
5
+ cachetools-5.5.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ cachetools-5.5.0.dist-info/WHEEL,sha256=HiCZjzuy6Dw0hdX5R3LCFPDmFS4BWl8H-8W39XfmgX4,91
7
+ cachetools-5.5.0.dist-info/top_level.txt,sha256=ai2FH78TGwoBcCgVfoqbzk5IQCtnDukdSs4zKuVPvDs,11
8
+ cachetools/__init__.py,sha256=IKVmVhoreKii0OUU1MKZIoq4_giSdsmBkBtQjMI_px4,25557
9
+ cachetools/__pycache__/__init__.cpython-310.pyc,,
10
+ cachetools/__pycache__/func.cpython-310.pyc,,
11
+ cachetools/__pycache__/keys.cpython-310.pyc,,
12
+ cachetools/func.py,sha256=aOVfSkuNWMRADpkHZGK7LeJ_VZ8wljzbRwIAliOuhAg,3719
13
+ cachetools/keys.py,sha256=AOgfoi-oioBOnEEk115_9qs0HKISrYnbcV4F0hyZ1yk,1777
evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/REQUESTED ADDED
File without changes
evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (72.2.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
evalkit_internvl/lib/python3.10/site-packages/cachetools-5.5.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ cachetools
evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/License.txt ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions
6
+ are met:
7
+ * Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+ * Redistributions in binary form must reproduce the above copyright
10
+ notice, this list of conditions and the following disclaimer in the
11
+ documentation and/or other materials provided with the distribution.
12
+ * Neither the name of NVIDIA CORPORATION, Lawrence Berkeley National
13
+ Laboratory, the U.S. Department of Energy, nor the names of their
14
+ contributors may be used to endorse or promote products derived
15
+ from this software without specific prior written permission.
16
+
17
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
18
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
25
+ OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+ The U.S. Department of Energy funded the development of this software
30
+ under subcontract 7078610 with Lawrence Berkeley National Laboratory.
31
+
evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/METADATA ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-nccl-cu11
3
+ Version: 2.19.3
4
+ Summary: NVIDIA Collective Communication Library (NCCL) Runtime
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: cuda_installer@nvidia.com
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+
35
+ NCCL (pronounced "Nickel") is a stand-alone library of standard collective communication routines for GPUs, implementing all-reduce, all-gather, reduce, broadcast, and reduce-scatter. It has been optimized to achieve high bandwidth on any platform using PCIe, NVLink, NVswitch, as well as networking using InfiniBand Verbs or TCP/IP sockets.
evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/RECORD ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/nccl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/nccl/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/nccl/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ nvidia/nccl/include/__pycache__/__init__.cpython-310.pyc,,
7
+ nvidia/nccl/include/nccl.h,sha256=r5ktDhEQdKl4Jo6fQzuUNAmhq6jm3NMiEHvvGF6wAQ0,18641
8
+ nvidia/nccl/include/nccl_net.h,sha256=MDno5IdD4TfRBCFA5Xzh5bOyrpgMyv3pps5zWmhsW0k,18463
9
+ nvidia/nccl/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ nvidia/nccl/lib/__pycache__/__init__.cpython-310.pyc,,
11
+ nvidia/nccl/lib/libnccl.so.2,sha256=ZIZ4O2rd6lNjYd8x0qFrRzObSibwswhZQ3r5cV8UKmo,176493424
12
+ nvidia_nccl_cu11-2.19.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
13
+ nvidia_nccl_cu11-2.19.3.dist-info/License.txt,sha256=92n6LTYyE_WZNm2kbiqNZQyG6q6EWuxNRLL1_QHU7Fk,1735
14
+ nvidia_nccl_cu11-2.19.3.dist-info/METADATA,sha256=yDTVrPtUix-jZOcrTGJAzSG53_1RnzNN5PbNokCmgfY,1834
15
+ nvidia_nccl_cu11-2.19.3.dist-info/RECORD,,
16
+ nvidia_nccl_cu11-2.19.3.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
17
+ nvidia_nccl_cu11-2.19.3.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux1_x86_64
5
+
evalkit_internvl/lib/python3.10/site-packages/nvidia_nccl_cu11-2.19.3.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/LICENSE ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ https://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/METADATA ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: rsa
3
+ Version: 4.9
4
+ Summary: Pure-Python RSA implementation
5
+ Home-page: https://stuvel.eu/rsa
6
+ License: Apache-2.0
7
+ Author: Sybren A. Stüvel
8
+ Author-email: sybren@stuvel.eu
9
+ Requires-Python: >=3.6,<4
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Information Technology
14
+ Classifier: License :: OSI Approved :: Apache Software License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.6
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: Implementation :: CPython
24
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
25
+ Classifier: Topic :: Security :: Cryptography
26
+ Requires-Dist: pyasn1 (>=0.1.3)
27
+ Project-URL: Repository, https://github.com/sybrenstuvel/python-rsa
28
+ Description-Content-Type: text/markdown
29
+
30
+ # Pure Python RSA implementation
31
+
32
+ [![PyPI](https://img.shields.io/pypi/v/rsa.svg)](https://pypi.org/project/rsa/)
33
+ [![Build Status](https://travis-ci.org/sybrenstuvel/python-rsa.svg?branch=master)](https://travis-ci.org/sybrenstuvel/python-rsa)
34
+ [![Coverage Status](https://coveralls.io/repos/github/sybrenstuvel/python-rsa/badge.svg?branch=master)](https://coveralls.io/github/sybrenstuvel/python-rsa?branch=master)
35
+ [![Code Climate](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/maintainability)](https://codeclimate.com/github/codeclimate/codeclimate/maintainability)
36
+
37
+ [Python-RSA](https://stuvel.eu/rsa) is a pure-Python RSA implementation. It supports
38
+ encryption and decryption, signing and verifying signatures, and key
39
+ generation according to PKCS#1 version 1.5. It can be used as a Python
40
+ library as well as on the commandline. The code was mostly written by
41
+ Sybren A. Stüvel.
42
+
43
+ Documentation can be found at the [Python-RSA homepage](https://stuvel.eu/rsa). For all changes, check [the changelog](https://github.com/sybrenstuvel/python-rsa/blob/master/CHANGELOG.md).
44
+
45
+ Download and install using:
46
+
47
+ pip install rsa
48
+
49
+ or download it from the [Python Package Index](https://pypi.org/project/rsa/).
50
+
51
+ The source code is maintained at [GitHub](https://github.com/sybrenstuvel/python-rsa/) and is
52
+ licensed under the [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
53
+
54
+ ## Security
55
+
56
+ Because of how Python internally stores numbers, it is very hard (if not impossible) to make a pure-Python program secure against timing attacks. This library is no exception, so use it with care. See https://securitypitfalls.wordpress.com/2018/08/03/constant-time-compare-in-python/ for more info.
57
+
58
+ ## Setup of Development Environment
59
+
60
+ ```
61
+ python3 -m venv .venv
62
+ . ./.venv/bin/activate
63
+ pip install poetry
64
+ poetry install
65
+ ```
66
+
67
+ ## Publishing a New Release
68
+
69
+ Since this project is considered critical on the Python Package Index,
70
+ two-factor authentication is required. For uploading packages to PyPi, an API
71
+ key is required; username+password will not work.
72
+
73
+ First, generate an API token at https://pypi.org/manage/account/token/. Then,
74
+ use this token when publishing instead of your username and password.
75
+
76
+ As username, use `__token__`.
77
+ As password, use the token itself, including the `pypi-` prefix.
78
+
79
+ See https://pypi.org/help/#apitoken for help using API tokens to publish. This
80
+ is what I have in `~/.pypirc`:
81
+
82
+ ```
83
+ [distutils]
84
+ index-servers =
85
+ rsa
86
+
87
+ # Use `twine upload -r rsa` to upload with this token.
88
+ [rsa]
89
+ repository = https://upload.pypi.org/legacy/
90
+ username = __token__
91
+ password = pypi-token
92
+ ```
93
+
94
+ ```
95
+ . ./.venv/bin/activate
96
+ pip install twine
97
+
98
+ poetry build
99
+ twine check dist/rsa-4.9.tar.gz dist/rsa-4.9-*.whl
100
+ twine upload -r rsa dist/rsa-4.9.tar.gz dist/rsa-4.9-*.whl
101
+ ```
102
+
103
+ The `pip install twine` is necessary as Python-RSA requires Python >= 3.6, and
104
+ Twine requires at least version 3.7. This means Poetry refuses to add it as
105
+ dependency.
106
+
evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/RECORD ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/pyrsa-decrypt,sha256=S6LDCj0lGDDdZp5xk_EGzp87_NES_oaSKJSQYR0V_nM,234
2
+ ../../../bin/pyrsa-encrypt,sha256=rEta2Ny25qATspyHYwhLmeJzNMG3Cea-TWnAk_rAms4,234
3
+ ../../../bin/pyrsa-keygen,sha256=ZGsNTjZwSSIXo0M4kJ3W-yD_T7tqMGWLRw789QM08b8,232
4
+ ../../../bin/pyrsa-priv2pub,sha256=RdsdjkkUp-hUMjbEoFNFpHEp7XtLNGb1jI59AjflqKI,255
5
+ ../../../bin/pyrsa-sign,sha256=fip0vUeZqd8lwVJbI3SRUfwlu7mIOhKG7r7sAAUM5mM,228
6
+ ../../../bin/pyrsa-verify,sha256=nYf48kJi0gdoqAsx7Dak_1AQhPtXqNC6ERcEjfGLmMg,232
7
+ rsa-4.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
8
+ rsa-4.9.dist-info/LICENSE,sha256=Bz8ot9OJyP509gfhfCf4HqpazmntxDqITyP0G0HFxyY,577
9
+ rsa-4.9.dist-info/METADATA,sha256=-540qZBdoxQdUSuhxWlXTnY-oMNVz3EML49u9IfmmQ4,4173
10
+ rsa-4.9.dist-info/RECORD,,
11
+ rsa-4.9.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ rsa-4.9.dist-info/WHEEL,sha256=y3eDiaFVSNTPbgzfNn0nYn5tEn1cX6WrdetDlQM4xWw,83
13
+ rsa-4.9.dist-info/entry_points.txt,sha256=p0nVsezmPSjm5x4GDMD4a9Sshc9ukdfw1kkmOmpaAu0,201
14
+ rsa/__init__.py,sha256=5bc5rkBB8vxWEtVYwoMQxM8df3O1Ak2_zEXqnkK9oes,1605
15
+ rsa/__pycache__/__init__.cpython-310.pyc,,
16
+ rsa/__pycache__/asn1.cpython-310.pyc,,
17
+ rsa/__pycache__/cli.cpython-310.pyc,,
18
+ rsa/__pycache__/common.cpython-310.pyc,,
19
+ rsa/__pycache__/core.cpython-310.pyc,,
20
+ rsa/__pycache__/key.cpython-310.pyc,,
21
+ rsa/__pycache__/parallel.cpython-310.pyc,,
22
+ rsa/__pycache__/pem.cpython-310.pyc,,
23
+ rsa/__pycache__/pkcs1.cpython-310.pyc,,
24
+ rsa/__pycache__/pkcs1_v2.cpython-310.pyc,,
25
+ rsa/__pycache__/prime.cpython-310.pyc,,
26
+ rsa/__pycache__/randnum.cpython-310.pyc,,
27
+ rsa/__pycache__/transform.cpython-310.pyc,,
28
+ rsa/__pycache__/util.cpython-310.pyc,,
29
+ rsa/asn1.py,sha256=WL2bhDg-q7riT8P8cBMpydsh020i6Ejl6vcQIuA0VXA,1792
30
+ rsa/cli.py,sha256=DOE66cB0-0SjUhs-PX2gbxiSma5-CT1lEAdcCYrTXwE,10183
31
+ rsa/common.py,sha256=DAWwAuOSv1X67CBHzBvH-1wOsRe9np6eVsL_ZLrBWcg,4863
32
+ rsa/core.py,sha256=Rf33atg4-pI7U-mTdoosmn8gTeTyX5xP7yv0iqWyogc,1714
33
+ rsa/key.py,sha256=3_xv7B-AZZ5jIIz-vpnpfJtStS415e8fNr2iTYOu5CM,28285
34
+ rsa/parallel.py,sha256=NcL1QjNWJxH9zL2OAOYKgr-HbAeEEmdckdxC6KMhkmM,2405
35
+ rsa/pem.py,sha256=lzFulzgLHyqhimeo3T4GeBXuGRClfkTMYYZbgmYYmQk,4123
36
+ rsa/pkcs1.py,sha256=wN9SWn1_zFJvHDNLGPeGZxoDA5T7ipVy9DntNcCYBpU,16690
37
+ rsa/pkcs1_v2.py,sha256=d5A27EcOgbgJeikuLZkzANOzBQh4nVX-Bom5DUXgXHw,3549
38
+ rsa/prime.py,sha256=Kij81g-VneGw20Cq6LRaCVT3b9tX4gWIzkWV-3h4qMg,5304
39
+ rsa/py.typed,sha256=TfYjsEjlfDcVNGFibSYzbCf81u37bSXWmv4oTYf0zY8,64
40
+ rsa/randnum.py,sha256=AwhXEZAT6spbUUPjhwQXGXKOTlG8FPHOI3gmTAcQ0pk,2752
41
+ rsa/transform.py,sha256=i-nVC7JcPZkYz1W-d-qg0n0PQS17kKeXhfd9IkDehj4,2272
42
+ rsa/util.py,sha256=9PuWg2jQfV8FHdE9hpGHDCi2iGM8Z-r4tIQXRVFmqYY,3090
evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/REQUESTED ADDED
File without changes
evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: poetry 1.0.7
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
evalkit_internvl/lib/python3.10/site-packages/rsa-4.9.dist-info/entry_points.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [console_scripts]
2
+ pyrsa-decrypt=rsa.cli:decrypt
3
+ pyrsa-encrypt=rsa.cli:encrypt
4
+ pyrsa-keygen=rsa.cli:keygen
5
+ pyrsa-priv2pub=rsa.util:private_to_public
6
+ pyrsa-sign=rsa.cli:sign
7
+ pyrsa-verify=rsa.cli:verify
8
+
evalkit_internvl/lib/python3.10/site-packages/safetensors/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Re-export this
2
+ from ._safetensors_rust import ( # noqa: F401
3
+ SafetensorError,
4
+ __version__,
5
+ deserialize,
6
+ safe_open,
7
+ serialize,
8
+ serialize_file,
9
+ )
evalkit_internvl/lib/python3.10/site-packages/safetensors/__init__.pyi ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ @staticmethod
3
+ def deserialize(bytes):
4
+ """
5
+ Opens a safetensors lazily and returns tensors as asked
6
+
7
+ Args:
8
+ data (:obj:`bytes`):
9
+ The byte content of a file
10
+
11
+ Returns:
12
+ (:obj:`List[str, Dict[str, Dict[str, any]]]`):
13
+ The deserialized content is like:
14
+ [("tensor_name", {"shape": [2, 3], "dtype": "F32", "data": b"\0\0.." }), (...)]
15
+ """
16
+ pass
17
+
18
+ @staticmethod
19
+ def serialize(tensor_dict, metadata=None):
20
+ """
21
+ Serializes raw data.
22
+
23
+ Args:
24
+ tensor_dict (:obj:`Dict[str, Dict[Any]]`):
25
+ The tensor dict is like:
26
+ {"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}}
27
+ metadata (:obj:`Dict[str, str]`, *optional*):
28
+ The optional purely text annotations
29
+
30
+ Returns:
31
+ (:obj:`bytes`):
32
+ The serialized content.
33
+ """
34
+ pass
35
+
36
+ @staticmethod
37
+ def serialize_file(tensor_dict, filename, metadata=None):
38
+ """
39
+ Serializes raw data.
40
+
41
+ Args:
42
+ tensor_dict (:obj:`Dict[str, Dict[Any]]`):
43
+ The tensor dict is like:
44
+ {"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}}
45
+ filename (:obj:`str`):
46
+ The name of the file to write into.
47
+ metadata (:obj:`Dict[str, str]`, *optional*):
48
+ The optional purely text annotations
49
+
50
+ Returns:
51
+ (:obj:`bytes`):
52
+ The serialized content.
53
+ """
54
+ pass
55
+
56
+ class safe_open:
57
+ """
58
+ Opens a safetensors lazily and returns tensors as asked
59
+
60
+ Args:
61
+ filename (:obj:`str`):
62
+ The filename to open
63
+
64
+ framework (:obj:`str`):
65
+ The framework you want you tensors in. Supported values:
66
+ `pt`, `tf`, `flax`, `numpy`.
67
+
68
+ device (:obj:`str`, defaults to :obj:`"cpu"`):
69
+ The device on which you want the tensors.
70
+ """
71
+
72
+ def __init__(self, filename, framework, device="cpu"):
73
+ pass
evalkit_internvl/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc ADDED
Binary file (4.29 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc ADDED
Binary file (5.45 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc ADDED
Binary file (4.54 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/safetensors/flax.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, Optional, Union
3
+
4
+ import numpy as np
5
+
6
+ import jax.numpy as jnp
7
+ from jax import Array
8
+ from safetensors import numpy, safe_open
9
+
10
+
11
+ def save(tensors: Dict[str, Array], metadata: Optional[Dict[str, str]] = None) -> bytes:
12
+ """
13
+ Saves a dictionary of tensors into raw bytes in safetensors format.
14
+
15
+ Args:
16
+ tensors (`Dict[str, Array]`):
17
+ The incoming tensors. Tensors need to be contiguous and dense.
18
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
19
+ Optional text only metadata you might want to save in your header.
20
+ For instance it can be useful to specify more about the underlying
21
+ tensors. This is purely informative and does not affect tensor loading.
22
+
23
+ Returns:
24
+ `bytes`: The raw bytes representing the format
25
+
26
+ Example:
27
+
28
+ ```python
29
+ from safetensors.flax import save
30
+ from jax import numpy as jnp
31
+
32
+ tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))}
33
+ byte_data = save(tensors)
34
+ ```
35
+ """
36
+ np_tensors = _jnp2np(tensors)
37
+ return numpy.save(np_tensors, metadata=metadata)
38
+
39
+
40
+ def save_file(
41
+ tensors: Dict[str, Array],
42
+ filename: Union[str, os.PathLike],
43
+ metadata: Optional[Dict[str, str]] = None,
44
+ ) -> None:
45
+ """
46
+ Saves a dictionary of tensors into raw bytes in safetensors format.
47
+
48
+ Args:
49
+ tensors (`Dict[str, Array]`):
50
+ The incoming tensors. Tensors need to be contiguous and dense.
51
+ filename (`str`, or `os.PathLike`)):
52
+ The filename we're saving into.
53
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
54
+ Optional text only metadata you might want to save in your header.
55
+ For instance it can be useful to specify more about the underlying
56
+ tensors. This is purely informative and does not affect tensor loading.
57
+
58
+ Returns:
59
+ `None`
60
+
61
+ Example:
62
+
63
+ ```python
64
+ from safetensors.flax import save_file
65
+ from jax import numpy as jnp
66
+
67
+ tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))}
68
+ save_file(tensors, "model.safetensors")
69
+ ```
70
+ """
71
+ np_tensors = _jnp2np(tensors)
72
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
73
+
74
+
75
+ def load(data: bytes) -> Dict[str, Array]:
76
+ """
77
+ Loads a safetensors file into flax format from pure bytes.
78
+
79
+ Args:
80
+ data (`bytes`):
81
+ The content of a safetensors file
82
+
83
+ Returns:
84
+ `Dict[str, Array]`: dictionary that contains name as key, value as `Array` on cpu
85
+
86
+ Example:
87
+
88
+ ```python
89
+ from safetensors.flax import load
90
+
91
+ file_path = "./my_folder/bert.safetensors"
92
+ with open(file_path, "rb") as f:
93
+ data = f.read()
94
+
95
+ loaded = load(data)
96
+ ```
97
+ """
98
+ flat = numpy.load(data)
99
+ return _np2jnp(flat)
100
+
101
+
102
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, Array]:
103
+ """
104
+ Loads a safetensors file into flax format.
105
+
106
+ Args:
107
+ filename (`str`, or `os.PathLike`)):
108
+ The name of the file which contains the tensors
109
+
110
+ Returns:
111
+ `Dict[str, Array]`: dictionary that contains name as key, value as `Array`
112
+
113
+ Example:
114
+
115
+ ```python
116
+ from safetensors.flax import load_file
117
+
118
+ file_path = "./my_folder/bert.safetensors"
119
+ loaded = load_file(file_path)
120
+ ```
121
+ """
122
+ result = {}
123
+ with safe_open(filename, framework="flax") as f:
124
+ for k in f.keys():
125
+ result[k] = f.get_tensor(k)
126
+ return result
127
+
128
+
129
+ def _np2jnp(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, Array]:
130
+ for k, v in numpy_dict.items():
131
+ numpy_dict[k] = jnp.array(v)
132
+ return numpy_dict
133
+
134
+
135
+ def _jnp2np(jnp_dict: Dict[str, Array]) -> Dict[str, np.array]:
136
+ for k, v in jnp_dict.items():
137
+ jnp_dict[k] = np.asarray(v)
138
+ return jnp_dict
evalkit_internvl/lib/python3.10/site-packages/safetensors/mlx.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, Optional, Union
3
+
4
+ import numpy as np
5
+
6
+ import mlx.core as mx
7
+ from safetensors import numpy, safe_open
8
+
9
+
10
+ def save(tensors: Dict[str, mx.array], metadata: Optional[Dict[str, str]] = None) -> bytes:
11
+ """
12
+ Saves a dictionary of tensors into raw bytes in safetensors format.
13
+
14
+ Args:
15
+ tensors (`Dict[str, mx.array]`):
16
+ The incoming tensors. Tensors need to be contiguous and dense.
17
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
18
+ Optional text only metadata you might want to save in your header.
19
+ For instance it can be useful to specify more about the underlying
20
+ tensors. This is purely informative and does not affect tensor loading.
21
+
22
+ Returns:
23
+ `bytes`: The raw bytes representing the format
24
+
25
+ Example:
26
+
27
+ ```python
28
+ from safetensors.mlx import save
29
+ import mlx.core as mx
30
+
31
+ tensors = {"embedding": mx.zeros((512, 1024)), "attention": mx.zeros((256, 256))}
32
+ byte_data = save(tensors)
33
+ ```
34
+ """
35
+ np_tensors = _mx2np(tensors)
36
+ return numpy.save(np_tensors, metadata=metadata)
37
+
38
+
39
+ def save_file(
40
+ tensors: Dict[str, mx.array],
41
+ filename: Union[str, os.PathLike],
42
+ metadata: Optional[Dict[str, str]] = None,
43
+ ) -> None:
44
+ """
45
+ Saves a dictionary of tensors into raw bytes in safetensors format.
46
+
47
+ Args:
48
+ tensors (`Dict[str, mx.array]`):
49
+ The incoming tensors. Tensors need to be contiguous and dense.
50
+ filename (`str`, or `os.PathLike`)):
51
+ The filename we're saving into.
52
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
53
+ Optional text only metadata you might want to save in your header.
54
+ For instance it can be useful to specify more about the underlying
55
+ tensors. This is purely informative and does not affect tensor loading.
56
+
57
+ Returns:
58
+ `None`
59
+
60
+ Example:
61
+
62
+ ```python
63
+ from safetensors.mlx import save_file
64
+ import mlx.core as mx
65
+
66
+ tensors = {"embedding": mx.zeros((512, 1024)), "attention": mx.zeros((256, 256))}
67
+ save_file(tensors, "model.safetensors")
68
+ ```
69
+ """
70
+ np_tensors = _mx2np(tensors)
71
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
72
+
73
+
74
+ def load(data: bytes) -> Dict[str, mx.array]:
75
+ """
76
+ Loads a safetensors file into MLX format from pure bytes.
77
+
78
+ Args:
79
+ data (`bytes`):
80
+ The content of a safetensors file
81
+
82
+ Returns:
83
+ `Dict[str, mx.array]`: dictionary that contains name as key, value as `mx.array`
84
+
85
+ Example:
86
+
87
+ ```python
88
+ from safetensors.mlx import load
89
+
90
+ file_path = "./my_folder/bert.safetensors"
91
+ with open(file_path, "rb") as f:
92
+ data = f.read()
93
+
94
+ loaded = load(data)
95
+ ```
96
+ """
97
+ flat = numpy.load(data)
98
+ return _np2mx(flat)
99
+
100
+
101
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, mx.array]:
102
+ """
103
+ Loads a safetensors file into MLX format.
104
+
105
+ Args:
106
+ filename (`str`, or `os.PathLike`)):
107
+ The name of the file which contains the tensors
108
+
109
+ Returns:
110
+ `Dict[str, mx.array]`: dictionary that contains name as key, value as `mx.array`
111
+
112
+ Example:
113
+
114
+ ```python
115
+ from safetensors.flax import load_file
116
+
117
+ file_path = "./my_folder/bert.safetensors"
118
+ loaded = load_file(file_path)
119
+ ```
120
+ """
121
+ result = {}
122
+ with safe_open(filename, framework="mlx") as f:
123
+ for k in f.keys():
124
+ result[k] = f.get_tensor(k)
125
+ return result
126
+
127
+
128
+ def _np2mx(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, mx.array]:
129
+ for k, v in numpy_dict.items():
130
+ numpy_dict[k] = mx.array(v)
131
+ return numpy_dict
132
+
133
+
134
+ def _mx2np(mx_dict: Dict[str, mx.array]) -> Dict[str, np.array]:
135
+ new_dict = {}
136
+ for k, v in mx_dict.items():
137
+ new_dict[k] = np.asarray(v)
138
+ return new_dict
evalkit_internvl/lib/python3.10/site-packages/safetensors/numpy.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from typing import Dict, Optional, Union
4
+
5
+ import numpy as np
6
+
7
+ from safetensors import deserialize, safe_open, serialize, serialize_file
8
+
9
+
10
+ def _tobytes(tensor: np.ndarray) -> bytes:
11
+ if not _is_little_endian(tensor):
12
+ tensor = tensor.byteswap(inplace=False)
13
+ return tensor.tobytes()
14
+
15
+
16
+ def save(tensor_dict: Dict[str, np.ndarray], metadata: Optional[Dict[str, str]] = None) -> bytes:
17
+ """
18
+ Saves a dictionary of tensors into raw bytes in safetensors format.
19
+
20
+ Args:
21
+ tensor_dict (`Dict[str, np.ndarray]`):
22
+ The incoming tensors. Tensors need to be contiguous and dense.
23
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
24
+ Optional text only metadata you might want to save in your header.
25
+ For instance it can be useful to specify more about the underlying
26
+ tensors. This is purely informative and does not affect tensor loading.
27
+
28
+ Returns:
29
+ `bytes`: The raw bytes representing the format
30
+
31
+ Example:
32
+
33
+ ```python
34
+ from safetensors.numpy import save
35
+ import numpy as np
36
+
37
+ tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))}
38
+ byte_data = save(tensors)
39
+ ```
40
+ """
41
+ flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()}
42
+ serialized = serialize(flattened, metadata=metadata)
43
+ result = bytes(serialized)
44
+ return result
45
+
46
+
47
+ def save_file(
48
+ tensor_dict: Dict[str, np.ndarray], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]] = None
49
+ ) -> None:
50
+ """
51
+ Saves a dictionary of tensors into raw bytes in safetensors format.
52
+
53
+ Args:
54
+ tensor_dict (`Dict[str, np.ndarray]`):
55
+ The incoming tensors. Tensors need to be contiguous and dense.
56
+ filename (`str`, or `os.PathLike`)):
57
+ The filename we're saving into.
58
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
59
+ Optional text only metadata you might want to save in your header.
60
+ For instance it can be useful to specify more about the underlying
61
+ tensors. This is purely informative and does not affect tensor loading.
62
+
63
+ Returns:
64
+ `None`
65
+
66
+ Example:
67
+
68
+ ```python
69
+ from safetensors.numpy import save_file
70
+ import numpy as np
71
+
72
+ tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))}
73
+ save_file(tensors, "model.safetensors")
74
+ ```
75
+ """
76
+ flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()}
77
+ serialize_file(flattened, filename, metadata=metadata)
78
+
79
+
80
+ def load(data: bytes) -> Dict[str, np.ndarray]:
81
+ """
82
+ Loads a safetensors file into numpy format from pure bytes.
83
+
84
+ Args:
85
+ data (`bytes`):
86
+ The content of a safetensors file
87
+
88
+ Returns:
89
+ `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray` on cpu
90
+
91
+ Example:
92
+
93
+ ```python
94
+ from safetensors.numpy import load
95
+
96
+ file_path = "./my_folder/bert.safetensors"
97
+ with open(file_path, "rb") as f:
98
+ data = f.read()
99
+
100
+ loaded = load(data)
101
+ ```
102
+ """
103
+ flat = deserialize(data)
104
+ return _view2np(flat)
105
+
106
+
107
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, np.ndarray]:
108
+ """
109
+ Loads a safetensors file into numpy format.
110
+
111
+ Args:
112
+ filename (`str`, or `os.PathLike`)):
113
+ The name of the file which contains the tensors
114
+
115
+ Returns:
116
+ `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray`
117
+
118
+ Example:
119
+
120
+ ```python
121
+ from safetensors.numpy import load_file
122
+
123
+ file_path = "./my_folder/bert.safetensors"
124
+ loaded = load_file(file_path)
125
+ ```
126
+ """
127
+ result = {}
128
+ with safe_open(filename, framework="np") as f:
129
+ for k in f.keys():
130
+ result[k] = f.get_tensor(k)
131
+ return result
132
+
133
+
134
+ _TYPES = {
135
+ "F64": np.float64,
136
+ "F32": np.float32,
137
+ "F16": np.float16,
138
+ "I64": np.int64,
139
+ "U64": np.uint64,
140
+ "I32": np.int32,
141
+ "U32": np.uint32,
142
+ "I16": np.int16,
143
+ "U16": np.uint16,
144
+ "I8": np.int8,
145
+ "U8": np.uint8,
146
+ "BOOL": bool,
147
+ }
148
+
149
+
150
+ def _getdtype(dtype_str: str) -> np.dtype:
151
+ return _TYPES[dtype_str]
152
+
153
+
154
+ def _view2np(safeview) -> Dict[str, np.ndarray]:
155
+ result = {}
156
+ for k, v in safeview:
157
+ dtype = _getdtype(v["dtype"])
158
+ arr = np.frombuffer(v["data"], dtype=dtype).reshape(v["shape"])
159
+ result[k] = arr
160
+ return result
161
+
162
+
163
+ def _is_little_endian(tensor: np.ndarray) -> bool:
164
+ byteorder = tensor.dtype.byteorder
165
+ if byteorder == "=":
166
+ if sys.byteorder == "little":
167
+ return True
168
+ else:
169
+ return False
170
+ elif byteorder == "|":
171
+ return True
172
+ elif byteorder == "<":
173
+ return True
174
+ elif byteorder == ">":
175
+ return False
176
+ raise ValueError(f"Unexpected byte order {byteorder}")
evalkit_internvl/lib/python3.10/site-packages/safetensors/paddle.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, Optional, Union
3
+
4
+ import numpy as np
5
+
6
+ import paddle
7
+ from safetensors import numpy
8
+
9
+
10
+ def save(tensors: Dict[str, paddle.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes:
11
+ """
12
+ Saves a dictionary of tensors into raw bytes in safetensors format.
13
+
14
+ Args:
15
+ tensors (`Dict[str, paddle.Tensor]`):
16
+ The incoming tensors. Tensors need to be contiguous and dense.
17
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
18
+ Optional text only metadata you might want to save in your header.
19
+ For instance it can be useful to specify more about the underlying
20
+ tensors. This is purely informative and does not affect tensor loading.
21
+
22
+ Returns:
23
+ `bytes`: The raw bytes representing the format
24
+
25
+ Example:
26
+
27
+ ```python
28
+ from safetensors.paddle import save
29
+ import paddle
30
+
31
+ tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))}
32
+ byte_data = save(tensors)
33
+ ```
34
+ """
35
+ np_tensors = _paddle2np(tensors)
36
+ return numpy.save(np_tensors, metadata=metadata)
37
+
38
+
39
+ def save_file(
40
+ tensors: Dict[str, paddle.Tensor],
41
+ filename: Union[str, os.PathLike],
42
+ metadata: Optional[Dict[str, str]] = None,
43
+ ) -> None:
44
+ """
45
+ Saves a dictionary of tensors into raw bytes in safetensors format.
46
+
47
+ Args:
48
+ tensors (`Dict[str, paddle.Tensor]`):
49
+ The incoming tensors. Tensors need to be contiguous and dense.
50
+ filename (`str`, or `os.PathLike`)):
51
+ The filename we're saving into.
52
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
53
+ Optional text only metadata you might want to save in your header.
54
+ For instance it can be useful to specify more about the underlying
55
+ tensors. This is purely informative and does not affect tensor loading.
56
+
57
+ Returns:
58
+ `None`
59
+
60
+ Example:
61
+
62
+ ```python
63
+ from safetensors.paddle import save_file
64
+ import paddle
65
+
66
+ tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))}
67
+ save_file(tensors, "model.safetensors")
68
+ ```
69
+ """
70
+ np_tensors = _paddle2np(tensors)
71
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
72
+
73
+
74
+ def load(data: bytes, device: str = "cpu") -> Dict[str, paddle.Tensor]:
75
+ """
76
+ Loads a safetensors file into paddle format from pure bytes.
77
+
78
+ Args:
79
+ data (`bytes`):
80
+ The content of a safetensors file
81
+
82
+ Returns:
83
+ `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor` on cpu
84
+
85
+ Example:
86
+
87
+ ```python
88
+ from safetensors.paddle import load
89
+
90
+ file_path = "./my_folder/bert.safetensors"
91
+ with open(file_path, "rb") as f:
92
+ data = f.read()
93
+
94
+ loaded = load(data)
95
+ ```
96
+ """
97
+ flat = numpy.load(data)
98
+ return _np2paddle(flat, device)
99
+
100
+
101
+ def load_file(filename: Union[str, os.PathLike], device="cpu") -> Dict[str, paddle.Tensor]:
102
+ """
103
+ Loads a safetensors file into paddle format.
104
+
105
+ Args:
106
+ filename (`str`, or `os.PathLike`)):
107
+ The name of the file which contains the tensors
108
+ device (`Union[Dict[str, any], str]`, *optional*, defaults to `cpu`):
109
+ The device where the tensors need to be located after load.
110
+ available options are all regular paddle device locations
111
+
112
+ Returns:
113
+ `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor`
114
+
115
+ Example:
116
+
117
+ ```python
118
+ from safetensors.paddle import load_file
119
+
120
+ file_path = "./my_folder/bert.safetensors"
121
+ loaded = load_file(file_path)
122
+ ```
123
+ """
124
+ flat = numpy.load_file(filename)
125
+ output = _np2paddle(flat, device)
126
+ return output
127
+
128
+
129
+ def _np2paddle(numpy_dict: Dict[str, np.ndarray], device: str = "cpu") -> Dict[str, paddle.Tensor]:
130
+ for k, v in numpy_dict.items():
131
+ numpy_dict[k] = paddle.to_tensor(v, place=device)
132
+ return numpy_dict
133
+
134
+
135
+ def _paddle2np(paddle_dict: Dict[str, paddle.Tensor]) -> Dict[str, np.array]:
136
+ for k, v in paddle_dict.items():
137
+ paddle_dict[k] = v.detach().cpu().numpy()
138
+ return paddle_dict
evalkit_internvl/lib/python3.10/site-packages/safetensors/py.typed ADDED
File without changes
evalkit_internvl/lib/python3.10/site-packages/safetensors/tensorflow.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, Optional, Union
3
+
4
+ import numpy as np
5
+ import tensorflow as tf
6
+
7
+ from safetensors import numpy, safe_open
8
+
9
+
10
+ def save(tensors: Dict[str, tf.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes:
11
+ """
12
+ Saves a dictionary of tensors into raw bytes in safetensors format.
13
+
14
+ Args:
15
+ tensors (`Dict[str, tf.Tensor]`):
16
+ The incoming tensors. Tensors need to be contiguous and dense.
17
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
18
+ Optional text only metadata you might want to save in your header.
19
+ For instance it can be useful to specify more about the underlying
20
+ tensors. This is purely informative and does not affect tensor loading.
21
+
22
+ Returns:
23
+ `bytes`: The raw bytes representing the format
24
+
25
+ Example:
26
+
27
+ ```python
28
+ from safetensors.tensorflow import save
29
+ import tensorflow as tf
30
+
31
+ tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))}
32
+ byte_data = save(tensors)
33
+ ```
34
+ """
35
+ np_tensors = _tf2np(tensors)
36
+ return numpy.save(np_tensors, metadata=metadata)
37
+
38
+
39
+ def save_file(
40
+ tensors: Dict[str, tf.Tensor],
41
+ filename: Union[str, os.PathLike],
42
+ metadata: Optional[Dict[str, str]] = None,
43
+ ) -> None:
44
+ """
45
+ Saves a dictionary of tensors into raw bytes in safetensors format.
46
+
47
+ Args:
48
+ tensors (`Dict[str, tf.Tensor]`):
49
+ The incoming tensors. Tensors need to be contiguous and dense.
50
+ filename (`str`, or `os.PathLike`)):
51
+ The filename we're saving into.
52
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
53
+ Optional text only metadata you might want to save in your header.
54
+ For instance it can be useful to specify more about the underlying
55
+ tensors. This is purely informative and does not affect tensor loading.
56
+
57
+ Returns:
58
+ `None`
59
+
60
+ Example:
61
+
62
+ ```python
63
+ from safetensors.tensorflow import save_file
64
+ import tensorflow as tf
65
+
66
+ tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))}
67
+ save_file(tensors, "model.safetensors")
68
+ ```
69
+ """
70
+ np_tensors = _tf2np(tensors)
71
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
72
+
73
+
74
+ def load(data: bytes) -> Dict[str, tf.Tensor]:
75
+ """
76
+ Loads a safetensors file into tensorflow format from pure bytes.
77
+
78
+ Args:
79
+ data (`bytes`):
80
+ The content of a safetensors file
81
+
82
+ Returns:
83
+ `Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor` on cpu
84
+
85
+ Example:
86
+
87
+ ```python
88
+ from safetensors.tensorflow import load
89
+
90
+ file_path = "./my_folder/bert.safetensors"
91
+ with open(file_path, "rb") as f:
92
+ data = f.read()
93
+
94
+ loaded = load(data)
95
+ ```
96
+ """
97
+ flat = numpy.load(data)
98
+ return _np2tf(flat)
99
+
100
+
101
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, tf.Tensor]:
102
+ """
103
+ Loads a safetensors file into tensorflow format.
104
+
105
+ Args:
106
+ filename (`str`, or `os.PathLike`)):
107
+ The name of the file which contains the tensors
108
+
109
+ Returns:
110
+ `Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor`
111
+
112
+ Example:
113
+
114
+ ```python
115
+ from safetensors.tensorflow import load_file
116
+
117
+ file_path = "./my_folder/bert.safetensors"
118
+ loaded = load_file(file_path)
119
+ ```
120
+ """
121
+ result = {}
122
+ with safe_open(filename, framework="tf") as f:
123
+ for k in f.keys():
124
+ result[k] = f.get_tensor(k)
125
+ return result
126
+
127
+
128
+ def _np2tf(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, tf.Tensor]:
129
+ for k, v in numpy_dict.items():
130
+ numpy_dict[k] = tf.convert_to_tensor(v)
131
+ return numpy_dict
132
+
133
+
134
+ def _tf2np(tf_dict: Dict[str, tf.Tensor]) -> Dict[str, np.array]:
135
+ for k, v in tf_dict.items():
136
+ tf_dict[k] = v.numpy()
137
+ return tf_dict
evalkit_internvl/lib/python3.10/site-packages/safetensors/torch.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from collections import defaultdict
4
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union
5
+
6
+ import torch
7
+
8
+ from safetensors import deserialize, safe_open, serialize, serialize_file
9
+
10
+
11
+ def storage_ptr(tensor: torch.Tensor) -> int:
12
+ try:
13
+ return tensor.untyped_storage().data_ptr()
14
+ except Exception:
15
+ # Fallback for torch==1.10
16
+ try:
17
+ return tensor.storage().data_ptr()
18
+ except NotImplementedError:
19
+ # Fallback for meta storage
20
+ return 0
21
+
22
+
23
+ def _end_ptr(tensor: torch.Tensor) -> int:
24
+ if tensor.nelement():
25
+ stop = tensor.view(-1)[-1].data_ptr() + _SIZE[tensor.dtype]
26
+ else:
27
+ stop = tensor.data_ptr()
28
+ return stop
29
+
30
+
31
+ def storage_size(tensor: torch.Tensor) -> int:
32
+ try:
33
+ return tensor.untyped_storage().nbytes()
34
+ except AttributeError:
35
+ # Fallback for torch==1.10
36
+ try:
37
+ return tensor.storage().size() * _SIZE[tensor.dtype]
38
+ except NotImplementedError:
39
+ # Fallback for meta storage
40
+ # On torch >=2.0 this is the tensor size
41
+ return tensor.nelement() * _SIZE[tensor.dtype]
42
+
43
+
44
+ def _filter_shared_not_shared(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]:
45
+ filtered_tensors = []
46
+ for shared in tensors:
47
+ if len(shared) < 2:
48
+ filtered_tensors.append(shared)
49
+ continue
50
+
51
+ areas = []
52
+ for name in shared:
53
+ tensor = state_dict[name]
54
+ areas.append((tensor.data_ptr(), _end_ptr(tensor), name))
55
+ areas.sort()
56
+
57
+ _, last_stop, last_name = areas[0]
58
+ filtered_tensors.append({last_name})
59
+ for start, stop, name in areas[1:]:
60
+ if start >= last_stop:
61
+ filtered_tensors.append({name})
62
+ else:
63
+ filtered_tensors[-1].add(name)
64
+ last_stop = stop
65
+
66
+ return filtered_tensors
67
+
68
+
69
+ def _find_shared_tensors(state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]:
70
+ tensors = defaultdict(set)
71
+ for k, v in state_dict.items():
72
+ if v.device != torch.device("meta") and storage_ptr(v) != 0 and storage_size(v) != 0:
73
+ # Need to add device as key because of multiple GPU.
74
+ tensors[(v.device, storage_ptr(v), storage_size(v))].add(k)
75
+ tensors = list(sorted(tensors.values()))
76
+ tensors = _filter_shared_not_shared(tensors, state_dict)
77
+ return tensors
78
+
79
+
80
+ def _is_complete(tensor: torch.Tensor) -> bool:
81
+ return tensor.data_ptr() == storage_ptr(tensor) and tensor.nelement() * _SIZE[tensor.dtype] == storage_size(tensor)
82
+
83
+
84
+ def _remove_duplicate_names(
85
+ state_dict: Dict[str, torch.Tensor],
86
+ *,
87
+ preferred_names: Optional[List[str]] = None,
88
+ discard_names: Optional[List[str]] = None,
89
+ ) -> Dict[str, List[str]]:
90
+ if preferred_names is None:
91
+ preferred_names = []
92
+ preferred_names = set(preferred_names)
93
+ if discard_names is None:
94
+ discard_names = []
95
+ discard_names = set(discard_names)
96
+
97
+ shareds = _find_shared_tensors(state_dict)
98
+ to_remove = defaultdict(list)
99
+ for shared in shareds:
100
+ complete_names = set([name for name in shared if _is_complete(state_dict[name])])
101
+ if not complete_names:
102
+ raise RuntimeError(
103
+ "Error while trying to find names to remove to save state dict, but found no suitable name to keep"
104
+ f" for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model"
105
+ " since you could be storing much more memory than needed. Please refer to"
106
+ " https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an"
107
+ " issue."
108
+ )
109
+
110
+ keep_name = sorted(list(complete_names))[0]
111
+
112
+ # Mechanism to preferentially select keys to keep
113
+ # coming from the on-disk file to allow
114
+ # loading models saved with a different choice
115
+ # of keep_name
116
+ preferred = complete_names.difference(discard_names)
117
+ if preferred:
118
+ keep_name = sorted(list(preferred))[0]
119
+
120
+ if preferred_names:
121
+ preferred = preferred_names.intersection(complete_names)
122
+ if preferred:
123
+ keep_name = sorted(list(preferred))[0]
124
+ for name in sorted(shared):
125
+ if name != keep_name:
126
+ to_remove[keep_name].append(name)
127
+ return to_remove
128
+
129
+
130
+ def save_model(
131
+ model: torch.nn.Module, filename: str, metadata: Optional[Dict[str, str]] = None, force_contiguous: bool = True
132
+ ):
133
+ """
134
+ Saves a given torch model to specified filename.
135
+ This method exists specifically to avoid tensor sharing issues which are
136
+ not allowed in `safetensors`. [More information on tensor sharing](../torch_shared_tensors)
137
+
138
+ Args:
139
+ model (`torch.nn.Module`):
140
+ The model to save on disk.
141
+ filename (`str`):
142
+ The filename location to save the file
143
+ metadata (`Dict[str, str]`, *optional*):
144
+ Extra information to save along with the file.
145
+ Some metadata will be added for each dropped tensors.
146
+ This information will not be enough to recover the entire
147
+ shared structure but might help understanding things
148
+ force_contiguous (`boolean`, *optional*, defaults to True):
149
+ Forcing the state_dict to be saved as contiguous tensors.
150
+ This has no effect on the correctness of the model, but it
151
+ could potentially change performance if the layout of the tensor
152
+ was chosen specifically for that reason.
153
+ """
154
+ state_dict = model.state_dict()
155
+ to_removes = _remove_duplicate_names(state_dict)
156
+
157
+ for kept_name, to_remove_group in to_removes.items():
158
+ for to_remove in to_remove_group:
159
+ if metadata is None:
160
+ metadata = {}
161
+
162
+ if to_remove not in metadata:
163
+ # Do not override user data
164
+ metadata[to_remove] = kept_name
165
+ del state_dict[to_remove]
166
+ if force_contiguous:
167
+ state_dict = {k: v.contiguous() for k, v in state_dict.items()}
168
+ try:
169
+ save_file(state_dict, filename, metadata=metadata)
170
+ except ValueError as e:
171
+ msg = str(e)
172
+ msg += " Or use save_model(..., force_contiguous=True), read the docs for potential caveats."
173
+ raise ValueError(msg)
174
+
175
+
176
+ def load_model(
177
+ model: torch.nn.Module, filename: Union[str, os.PathLike], strict: bool = True, device: Union[str, int] = "cpu"
178
+ ) -> Tuple[List[str], List[str]]:
179
+ """
180
+ Loads a given filename onto a torch model.
181
+ This method exists specifically to avoid tensor sharing issues which are
182
+ not allowed in `safetensors`. [More information on tensor sharing](../torch_shared_tensors)
183
+
184
+ Args:
185
+ model (`torch.nn.Module`):
186
+ The model to load onto.
187
+ filename (`str`, or `os.PathLike`):
188
+ The filename location to load the file from.
189
+ strict (`bool`, *optional*, defaults to True):
190
+ Whether to fail if you're missing keys or having unexpected ones.
191
+ When false, the function simply returns missing and unexpected names.
192
+ device (`Union[str, int]`, *optional*, defaults to `cpu`):
193
+ The device where the tensors need to be located after load.
194
+ available options are all regular torch device locations.
195
+
196
+ Returns:
197
+ `(missing, unexpected): (List[str], List[str])`
198
+ `missing` are names in the model which were not modified during loading
199
+ `unexpected` are names that are on the file, but weren't used during
200
+ the load.
201
+ """
202
+ state_dict = load_file(filename, device=device)
203
+ model_state_dict = model.state_dict()
204
+ to_removes = _remove_duplicate_names(model_state_dict, preferred_names=state_dict.keys())
205
+ missing, unexpected = model.load_state_dict(state_dict, strict=False)
206
+ missing = set(missing)
207
+ for to_remove_group in to_removes.values():
208
+ for to_remove in to_remove_group:
209
+ if to_remove not in missing:
210
+ unexpected.append(to_remove)
211
+ else:
212
+ missing.remove(to_remove)
213
+ if strict and (missing or unexpected):
214
+ missing_keys = ", ".join([f'"{k}"' for k in sorted(missing)])
215
+ unexpected_keys = ", ".join([f'"{k}"' for k in sorted(unexpected)])
216
+ error = f"Error(s) in loading state_dict for {model.__class__.__name__}:"
217
+ if missing:
218
+ error += f"\n Missing key(s) in state_dict: {missing_keys}"
219
+ if unexpected:
220
+ error += f"\n Unexpected key(s) in state_dict: {unexpected_keys}"
221
+ raise RuntimeError(error)
222
+ return missing, unexpected
223
+
224
+
225
+ def save(tensors: Dict[str, torch.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes:
226
+ """
227
+ Saves a dictionary of tensors into raw bytes in safetensors format.
228
+
229
+ Args:
230
+ tensors (`Dict[str, torch.Tensor]`):
231
+ The incoming tensors. Tensors need to be contiguous and dense.
232
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
233
+ Optional text only metadata you might want to save in your header.
234
+ For instance it can be useful to specify more about the underlying
235
+ tensors. This is purely informative and does not affect tensor loading.
236
+
237
+ Returns:
238
+ `bytes`: The raw bytes representing the format
239
+
240
+ Example:
241
+
242
+ ```python
243
+ from safetensors.torch import save
244
+ import torch
245
+
246
+ tensors = {"embedding": torch.zeros((512, 1024)), "attention": torch.zeros((256, 256))}
247
+ byte_data = save(tensors)
248
+ ```
249
+ """
250
+ serialized = serialize(_flatten(tensors), metadata=metadata)
251
+ result = bytes(serialized)
252
+ return result
253
+
254
+
255
+ def save_file(
256
+ tensors: Dict[str, torch.Tensor],
257
+ filename: Union[str, os.PathLike],
258
+ metadata: Optional[Dict[str, str]] = None,
259
+ ):
260
+ """
261
+ Saves a dictionary of tensors into raw bytes in safetensors format.
262
+
263
+ Args:
264
+ tensors (`Dict[str, torch.Tensor]`):
265
+ The incoming tensors. Tensors need to be contiguous and dense.
266
+ filename (`str`, or `os.PathLike`)):
267
+ The filename we're saving into.
268
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
269
+ Optional text only metadata you might want to save in your header.
270
+ For instance it can be useful to specify more about the underlying
271
+ tensors. This is purely informative and does not affect tensor loading.
272
+
273
+ Returns:
274
+ `None`
275
+
276
+ Example:
277
+
278
+ ```python
279
+ from safetensors.torch import save_file
280
+ import torch
281
+
282
+ tensors = {"embedding": torch.zeros((512, 1024)), "attention": torch.zeros((256, 256))}
283
+ save_file(tensors, "model.safetensors")
284
+ ```
285
+ """
286
+ serialize_file(_flatten(tensors), filename, metadata=metadata)
287
+
288
+
289
+ def load_file(filename: Union[str, os.PathLike], device: Union[str, int] = "cpu") -> Dict[str, torch.Tensor]:
290
+ """
291
+ Loads a safetensors file into torch format.
292
+
293
+ Args:
294
+ filename (`str`, or `os.PathLike`):
295
+ The name of the file which contains the tensors
296
+ device (`Union[str, int]`, *optional*, defaults to `cpu`):
297
+ The device where the tensors need to be located after load.
298
+ available options are all regular torch device locations.
299
+
300
+ Returns:
301
+ `Dict[str, torch.Tensor]`: dictionary that contains name as key, value as `torch.Tensor`
302
+
303
+ Example:
304
+
305
+ ```python
306
+ from safetensors.torch import load_file
307
+
308
+ file_path = "./my_folder/bert.safetensors"
309
+ loaded = load_file(file_path)
310
+ ```
311
+ """
312
+ result = {}
313
+ with safe_open(filename, framework="pt", device=device) as f:
314
+ for k in f.keys():
315
+ result[k] = f.get_tensor(k)
316
+ return result
317
+
318
+
319
+ def load(data: bytes) -> Dict[str, torch.Tensor]:
320
+ """
321
+ Loads a safetensors file into torch format from pure bytes.
322
+
323
+ Args:
324
+ data (`bytes`):
325
+ The content of a safetensors file
326
+
327
+ Returns:
328
+ `Dict[str, torch.Tensor]`: dictionary that contains name as key, value as `torch.Tensor` on cpu
329
+
330
+ Example:
331
+
332
+ ```python
333
+ from safetensors.torch import load
334
+
335
+ file_path = "./my_folder/bert.safetensors"
336
+ with open(file_path, "rb") as f:
337
+ data = f.read()
338
+
339
+ loaded = load(data)
340
+ ```
341
+ """
342
+ flat = deserialize(data)
343
+ return _view2torch(flat)
344
+
345
+
346
+ # torch.float8 formats require 2.1; we do not support these dtypes on earlier versions
347
+ _float8_e4m3fn = getattr(torch, "float8_e4m3fn", None)
348
+ _float8_e5m2 = getattr(torch, "float8_e5m2", None)
349
+
350
+ _SIZE = {
351
+ torch.int64: 8,
352
+ torch.float32: 4,
353
+ torch.int32: 4,
354
+ torch.bfloat16: 2,
355
+ torch.float16: 2,
356
+ torch.int16: 2,
357
+ torch.uint8: 1,
358
+ torch.int8: 1,
359
+ torch.bool: 1,
360
+ torch.float64: 8,
361
+ _float8_e4m3fn: 1,
362
+ _float8_e5m2: 1,
363
+ }
364
+
365
+ _TYPES = {
366
+ "F64": torch.float64,
367
+ "F32": torch.float32,
368
+ "F16": torch.float16,
369
+ "BF16": torch.bfloat16,
370
+ "I64": torch.int64,
371
+ # "U64": torch.uint64,
372
+ "I32": torch.int32,
373
+ # "U32": torch.uint32,
374
+ "I16": torch.int16,
375
+ # "U16": torch.uint16,
376
+ "I8": torch.int8,
377
+ "U8": torch.uint8,
378
+ "BOOL": torch.bool,
379
+ "F8_E4M3": _float8_e4m3fn,
380
+ "F8_E5M2": _float8_e5m2,
381
+ }
382
+
383
+
384
+ def _getdtype(dtype_str: str) -> torch.dtype:
385
+ return _TYPES[dtype_str]
386
+
387
+
388
+ def _view2torch(safeview) -> Dict[str, torch.Tensor]:
389
+ result = {}
390
+ for k, v in safeview:
391
+ dtype = _getdtype(v["dtype"])
392
+ if len(v["data"]) == 0:
393
+ # Workaround because frombuffer doesn't accept zero-size tensors
394
+ assert any(x == 0 for x in v["shape"])
395
+ arr = torch.empty(v["shape"], dtype=dtype)
396
+ else:
397
+ arr = torch.frombuffer(v["data"], dtype=dtype).reshape(v["shape"])
398
+ if sys.byteorder == "big":
399
+ arr = torch.from_numpy(arr.numpy().byteswap(inplace=False))
400
+ result[k] = arr
401
+
402
+ return result
403
+
404
+
405
+ def _tobytes(tensor: torch.Tensor, name: str) -> bytes:
406
+ if tensor.layout != torch.strided:
407
+ raise ValueError(
408
+ f"You are trying to save a sparse tensor: `{name}` which this library does not support."
409
+ " You can make it a dense tensor before saving with `.to_dense()` but be aware this might"
410
+ " make a much larger file than needed."
411
+ )
412
+
413
+ if not tensor.is_contiguous():
414
+ raise ValueError(
415
+ f"You are trying to save a non contiguous tensor: `{name}` which is not allowed. It either means you"
416
+ " are trying to save tensors which are reference of each other in which case it's recommended to save"
417
+ " only the full tensors, and reslice at load time, or simply call `.contiguous()` on your tensor to"
418
+ " pack it before saving."
419
+ )
420
+ if tensor.device.type != "cpu":
421
+ # Moving tensor to cpu before saving
422
+ tensor = tensor.to("cpu")
423
+
424
+ import ctypes
425
+
426
+ import numpy as np
427
+
428
+ # When shape is empty (scalar), np.prod returns a float
429
+ # we need a int for the following calculations
430
+ length = int(np.prod(tensor.shape).item())
431
+ bytes_per_item = _SIZE[tensor.dtype]
432
+
433
+ total_bytes = length * bytes_per_item
434
+
435
+ ptr = tensor.data_ptr()
436
+ if ptr == 0:
437
+ return b""
438
+ newptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_ubyte))
439
+ data = np.ctypeslib.as_array(newptr, (total_bytes,)) # no internal copy
440
+ if sys.byteorder == "big":
441
+ NPDTYPES = {
442
+ torch.int64: np.int64,
443
+ torch.float32: np.float32,
444
+ torch.int32: np.int32,
445
+ # XXX: This is ok because both have the same width
446
+ torch.bfloat16: np.float16,
447
+ torch.float16: np.float16,
448
+ torch.int16: np.int16,
449
+ torch.uint8: np.uint8,
450
+ torch.int8: np.int8,
451
+ torch.bool: bool,
452
+ torch.float64: np.float64,
453
+ # XXX: This is ok because both have the same width and byteswap is a no-op anyway
454
+ _float8_e4m3fn: np.uint8,
455
+ _float8_e5m2: np.uint8,
456
+ }
457
+ npdtype = NPDTYPES[tensor.dtype]
458
+ # Not in place as that would potentially modify a live running model
459
+ data = data.view(npdtype).byteswap(inplace=False)
460
+ return data.tobytes()
461
+
462
+
463
+ def _flatten(tensors: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, Any]]:
464
+ if not isinstance(tensors, dict):
465
+ raise ValueError(f"Expected a dict of [str, torch.Tensor] but received {type(tensors)}")
466
+
467
+ invalid_tensors = []
468
+ for k, v in tensors.items():
469
+ if not isinstance(v, torch.Tensor):
470
+ raise ValueError(f"Key `{k}` is invalid, expected torch.Tensor but received {type(v)}")
471
+
472
+ if v.layout != torch.strided:
473
+ invalid_tensors.append(k)
474
+ if invalid_tensors:
475
+ raise ValueError(
476
+ f"You are trying to save a sparse tensors: `{invalid_tensors}` which this library does not support."
477
+ " You can make it a dense tensor before saving with `.to_dense()` but be aware this might"
478
+ " make a much larger file than needed."
479
+ )
480
+
481
+ shared_pointers = _find_shared_tensors(tensors)
482
+ failing = []
483
+ for names in shared_pointers:
484
+ if len(names) > 1:
485
+ failing.append(names)
486
+
487
+ if failing:
488
+ raise RuntimeError(
489
+ f"""
490
+ Some tensors share memory, this will lead to duplicate memory on disk and potential differences when loading them again: {failing}.
491
+ A potential way to correctly save your model is to use `save_model`.
492
+ More information at https://huggingface.co/docs/safetensors/torch_shared_tensors
493
+ """
494
+ )
495
+
496
+ return {
497
+ k: {
498
+ "dtype": str(v.dtype).split(".")[-1],
499
+ "shape": v.shape,
500
+ "data": _tobytes(v, k),
501
+ }
502
+ for k, v in tensors.items()
503
+ }
evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e32cb992ae5b1f858d09f7b912fe455fe9ae85fa11c75c97d1a44a1eff547a95
3
+ size 145200
evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.cu ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "cuda_kernel.h"
2
+
3
+ //////////////////////////////////////////////////////////////////////////////////////////////////
4
+ //////////////////////////////////////////////////////////////////////////////////////////////////
5
+
6
+ __global__ void index_max_cuda_kernel(
7
+ float *index_vals, // [batch_size, 32, num_block]
8
+ int *indices, // [batch_size, num_block]
9
+ float *max_vals, // [batch_size, A_num_block * 32]
10
+ float *max_vals_scatter, // [batch_size, 32, num_block]
11
+ long batch_size,
12
+ long A_num_block,
13
+ long B_num_block,
14
+ long num_block
15
+ ) {
16
+
17
+ long batch_idx = blockIdx.x;
18
+
19
+ long thread_idx = threadIdx.x;
20
+ long num_thread = blockDim.x;
21
+
22
+ extern __shared__ float buffer[];
23
+ int *max_buffer = (int*)buffer;
24
+
25
+ for (int i = 0; i < A_num_block * 32; i = i + num_thread) {
26
+ int idx = i + thread_idx;
27
+ if (idx < A_num_block * 32) {
28
+ max_buffer[idx] = -1e8;
29
+ }
30
+ }
31
+ __syncthreads();
32
+
33
+ int *indices_pt = &indices[batch_idx * num_block];
34
+ float *index_vals_pt = &index_vals[batch_idx * num_block * 32];
35
+
36
+ for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) {
37
+ int idx = idx_start + thread_idx;
38
+ int A_block_idx = indices_pt[idx % num_block] / B_num_block;
39
+ atomicMax(&max_buffer[A_block_idx * 32 + idx / num_block], (int)(index_vals_pt[idx] * 1000));
40
+ }
41
+ __syncthreads();
42
+
43
+ float *max_vals_pt = &max_vals[batch_idx * A_num_block * 32];
44
+ for (int i = 0; i < A_num_block * 32; i = i + num_thread) {
45
+ int idx = i + thread_idx;
46
+ if (idx < A_num_block * 32) {
47
+ max_vals_pt[idx] = (float)max_buffer[idx] / 1000.;
48
+ }
49
+ }
50
+
51
+ float *max_vals_scatter_pt = &max_vals_scatter[batch_idx * num_block * 32];
52
+ for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) {
53
+ int idx = idx_start + thread_idx;
54
+ int A_block_idx = indices_pt[idx % num_block] / B_num_block;
55
+ max_vals_scatter_pt[idx] = (float)max_buffer[A_block_idx * 32 + idx / num_block] / 1000.;
56
+ }
57
+
58
+ }
59
+
60
+ __global__ void mm_to_sparse_cuda_kernel(
61
+ float *dense_A, // [batch_size, A_num_block, dim, 32]
62
+ float *dense_B, // [batch_size, B_num_block, dim, 32]
63
+ int *indices, // [batch_size, num_block]
64
+ float *sparse_C, // [batch_size, num_block, 32, 32]
65
+ long batch_size,
66
+ long A_num_block,
67
+ long B_num_block,
68
+ long dim,
69
+ long num_block
70
+ ) {
71
+
72
+ long batch_idx = blockIdx.y;
73
+ long block_idx = blockIdx.x * blockDim.y + threadIdx.y;
74
+
75
+ long thread_idx = threadIdx.x;
76
+
77
+ __shared__ float buffer[4096];
78
+ float *A_buffer = &buffer[threadIdx.y * 1024]; // [2, 8, 32]
79
+ float *B_buffer = &buffer[threadIdx.y * 1024 + 512]; // [2, 8, 32]
80
+
81
+ long batch_idx__block_idx = batch_idx * num_block + block_idx;
82
+
83
+ long AB_block_idx = indices[batch_idx__block_idx];
84
+ float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * dim * 32];
85
+ float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * dim * 32];
86
+
87
+ int reg_1_idx = thread_idx / 8; // [0000000011111111222222223333333344444444555555556666666677777777]
88
+ int reg_2_idx = thread_idx % 8; // [0123456701234567012345670123456701234567012345670123456701234567]
89
+
90
+ float reg_1[8];
91
+ float reg_2[8];
92
+
93
+ float reg_array[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
94
+
95
+ #pragma unroll
96
+ for (int i = 0; i < 4; i++) {
97
+ A_buffer[i * 64 + thread_idx] = dense_A_pt[i * 64 + thread_idx];
98
+ B_buffer[i * 64 + thread_idx] = dense_B_pt[i * 64 + thread_idx];
99
+ }
100
+
101
+ __syncthreads();
102
+
103
+ #pragma unroll
104
+ for (int i = 0; i < 4; i++) {
105
+ reg_1[i] = A_buffer[reg_1_idx * 4 + i];
106
+ reg_2[i] = B_buffer[reg_2_idx * 4 + i];
107
+ }
108
+
109
+ for (int dim_stride = 1; dim_stride < (dim / 8); dim_stride++) {
110
+
111
+ #pragma unroll
112
+ for (int i = 0; i < 4; i++) {
113
+ A_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_A_pt[dim_stride * 256 + i * 64 + thread_idx];
114
+ B_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_B_pt[dim_stride * 256 + i * 64 + thread_idx];
115
+ }
116
+
117
+ #pragma unroll
118
+ for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) {
119
+ #pragma unroll
120
+ for (int i = 0; i < 4; i++) {
121
+ reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_1_idx * 4 + i];
122
+ reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_2_idx * 4 + i];
123
+ }
124
+ #pragma unroll
125
+ for (int i = 0; i < 4; i++) {
126
+ #pragma unroll
127
+ for (int j = 0; j < 4; j++) {
128
+ reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j];
129
+ }
130
+ }
131
+ }
132
+
133
+ __syncthreads();
134
+
135
+ #pragma unroll
136
+ for (int i = 0; i < 4; i++) {
137
+ reg_1[i] = A_buffer[(dim_stride % 2) * 256 + reg_1_idx * 4 + i];
138
+ reg_2[i] = B_buffer[(dim_stride % 2) * 256 + reg_2_idx * 4 + i];
139
+ }
140
+
141
+ #pragma unroll
142
+ for (int i = 0; i < 4; i++) {
143
+ #pragma unroll
144
+ for (int j = 0; j < 4; j++) {
145
+ reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j];
146
+ }
147
+ }
148
+
149
+ }
150
+
151
+ #pragma unroll
152
+ for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) {
153
+ #pragma unroll
154
+ for (int i = 0; i < 4; i++) {
155
+ reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[256 + mini_dim_idx * 32 + reg_1_idx * 4 + i];
156
+ reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[256 + mini_dim_idx * 32 + reg_2_idx * 4 + i];
157
+ }
158
+ #pragma unroll
159
+ for (int i = 0; i < 4; i++) {
160
+ #pragma unroll
161
+ for (int j = 0; j < 4; j++) {
162
+ reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j];
163
+ }
164
+ }
165
+ }
166
+ #pragma unroll
167
+ for (int i = 0; i < 4; i++) {
168
+ #pragma unroll
169
+ for (int j = 0; j < 4; j++) {
170
+ reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j];
171
+ }
172
+ }
173
+ __syncthreads();
174
+
175
+ float *C_buffer = &buffer[threadIdx.y * 1024]; // [32, 32]
176
+
177
+ #pragma unroll
178
+ for (int i = 0; i < 4; i++) {
179
+ #pragma unroll
180
+ for (int j = 0; j < 4; j++) {
181
+ C_buffer[(reg_2_idx * 4 + j) * 32 + reg_1_idx * 4 + i] = reg_array[i * 4 + j];
182
+ }
183
+ }
184
+ __syncthreads();
185
+
186
+ float *sparse_C_pt = &sparse_C[batch_idx__block_idx * 1024];
187
+
188
+ #pragma unroll
189
+ for (int i = 0; i < 16; i++) {
190
+ sparse_C_pt[i * 64 + thread_idx] = C_buffer[i * 64 + thread_idx];
191
+ }
192
+
193
+ }
194
+
195
+ __global__ void sparse_dense_mm_cuda_kernel(
196
+ float *sparse_A, // [batch_size, num_block, 32, 32]
197
+ int *indices, // [batch_size, num_block]
198
+ float *dense_B, // [batch_size, B_num_block, dim, 32]
199
+ float *dense_C, // [batch_size, A_num_block, dim, 32]
200
+ long batch_size,
201
+ long A_num_block,
202
+ long B_num_block,
203
+ long dim,
204
+ long num_block
205
+ ) {
206
+
207
+ long batch_idx = blockIdx.y;
208
+ long block_idx = blockIdx.x * blockDim.y + threadIdx.y;
209
+
210
+ long thread_idx = threadIdx.x;
211
+
212
+ __shared__ float buffer[6144];
213
+ float *A_buffer = &buffer[threadIdx.y * 3072]; // [32, 32]
214
+ float *B_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [32, 64]
215
+
216
+ long batch_idx__block_idx = batch_idx * num_block + block_idx;
217
+
218
+ float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024];
219
+ #pragma unroll
220
+ for (int i = 0; i < 8; i++) {
221
+ A_buffer[i * 128 + thread_idx] = sparse_A_pt[i * 128 + thread_idx];
222
+ }
223
+
224
+ long AB_block_idx = indices[batch_idx__block_idx];
225
+ float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * 32 * dim];
226
+ float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32 * dim];
227
+
228
+ // [0000000011111111222222223333333344444444555555556666666677777777]
229
+ // [0123456701234567012345670123456701234567012345670123456701234567]
230
+ int reg_1_idx = thread_idx / 8;
231
+ int reg_2_idx = thread_idx % 8;
232
+
233
+ float reg_1[8];
234
+ float reg_2[8];
235
+
236
+ float reg_array[16];
237
+
238
+ for (int dim_stride = 0; dim_stride < dim; dim_stride = dim_stride + 64) {
239
+
240
+ #pragma unroll
241
+ for (int i = 0; i < 16; i++) {
242
+ B_buffer[i * 128 + thread_idx] = dense_B_pt[dim_stride * 32 + i * 128 + thread_idx];
243
+ }
244
+
245
+ #pragma unroll
246
+ for (int i = 0; i < 16; i++) {
247
+ reg_array[i] = 0;
248
+ }
249
+
250
+ __syncthreads();
251
+
252
+ #pragma unroll
253
+ for (int i = 0; i < 4; i++) {
254
+ reg_1[i] = B_buffer[(reg_1_idx * 4 + i) * 32];
255
+ reg_2[i] = A_buffer[reg_2_idx * 4 + i];
256
+ }
257
+
258
+ #pragma unroll
259
+ for (int mini_dim_idx = 1; mini_dim_idx < 32; mini_dim_idx++) {
260
+ #pragma unroll
261
+ for (int i = 0; i < 4; i++) {
262
+ reg_1[(mini_dim_idx % 2) * 4 + i] = B_buffer[(reg_1_idx * 4 + i) * 32 + mini_dim_idx];
263
+ reg_2[(mini_dim_idx % 2) * 4 + i] = A_buffer[mini_dim_idx * 32 + reg_2_idx * 4 + i];
264
+ }
265
+ #pragma unroll
266
+ for (int i = 0; i < 4; i++) {
267
+ #pragma unroll
268
+ for (int j = 0; j < 4; j++) {
269
+ reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j];
270
+ }
271
+ }
272
+ }
273
+
274
+ #pragma unroll
275
+ for (int i = 0; i < 4; i++) {
276
+ #pragma unroll
277
+ for (int j = 0; j < 4; j++) {
278
+ reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j];
279
+ }
280
+ }
281
+
282
+ __syncthreads();
283
+
284
+ float *C_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [64, 32]
285
+
286
+ #pragma unroll
287
+ for (int i = 0; i < 4; i++) {
288
+ #pragma unroll
289
+ for (int j = 0; j < 4; j++) {
290
+ C_buffer[(reg_1_idx * 4 + i) * 32 + reg_2_idx * 4 + j] = reg_array[i * 4 + j];
291
+ }
292
+ }
293
+ __syncthreads();
294
+
295
+ #pragma unroll
296
+ for (int i = 0; i < 16; i++) {
297
+ atomicAdd(&dense_C_pt[dim_stride * 32 + i * 128 + thread_idx], C_buffer[i * 128 + thread_idx]);
298
+ }
299
+ __syncthreads();
300
+
301
+ }
302
+
303
+ }
304
+
305
+
306
+ __global__ void reduce_sum_cuda_kernel(
307
+ float *sparse_A, // [batch_size, num_block, 32, 32]
308
+ int *indices, // [batch_size, num_block]
309
+ float *dense_C, // [batch_size, A_num_block, 32]
310
+ long batch_size,
311
+ long A_num_block,
312
+ long B_num_block,
313
+ long num_block
314
+ ) {
315
+
316
+ long batch_idx = blockIdx.y;
317
+ long block_idx = blockIdx.x * blockDim.y + threadIdx.y;
318
+
319
+ long thread_idx = threadIdx.x;
320
+
321
+ long batch_idx__block_idx = batch_idx * num_block + block_idx;
322
+
323
+ long AB_block_idx = indices[batch_idx__block_idx];
324
+ float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024];
325
+
326
+ float reg_array[16];
327
+ float value = 0;
328
+
329
+ #pragma unroll
330
+ for (int i = 0; i < 8; i++) {
331
+ reg_array[i] = sparse_A_pt[i * 32 + thread_idx];
332
+ }
333
+ #pragma unroll
334
+ for (int stride = 8; stride < 32; stride = stride + 8) {
335
+ #pragma unroll
336
+ for (int i = 0; i < 8; i++) {
337
+ reg_array[(stride + i) % 16] = sparse_A_pt[(stride + i) * 32 + thread_idx];
338
+ }
339
+ #pragma unroll
340
+ for (int i = 0; i < 8; i++) {
341
+ value = value + reg_array[(stride - 8 + i) % 16];
342
+ }
343
+ }
344
+ #pragma unroll
345
+ for (int i = 0; i < 8; i++) {
346
+ value = value + reg_array[8 + i];
347
+ }
348
+
349
+ float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32];
350
+
351
+ atomicAdd(&dense_C_pt[thread_idx], value);
352
+
353
+ }
354
+
355
+ __global__ void scatter_cuda_kernel(
356
+ float *dense_A, // [batch_size, A_num_block, 32]
357
+ int *indices, // [batch_size, num_block]
358
+ float *sparse_C, // [batch_size, num_block, 32, 32]
359
+ long batch_size,
360
+ long A_num_block,
361
+ long B_num_block,
362
+ long num_block
363
+ ) {
364
+
365
+ long batch_idx = blockIdx.y;
366
+ long block_idx = blockIdx.x * blockDim.y + threadIdx.y;
367
+
368
+ long thread_idx = threadIdx.x;
369
+
370
+ long batch_idx__block_idx = batch_idx * num_block + block_idx;
371
+
372
+ long AB_block_idx = indices[batch_idx__block_idx];
373
+ float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32];
374
+ float *sparse_C_pt = &sparse_C[(batch_idx * num_block + block_idx) * 1024];
375
+
376
+ float value = dense_A_pt[thread_idx];
377
+
378
+ #pragma unroll
379
+ for (int i = 0; i < 32; i++) {
380
+ sparse_C_pt[i * 32 + thread_idx] = value;
381
+ }
382
+
383
+ }
evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #define WARP_SIZE 32
3
+ #define FULL_MASK 0xffffffff
4
+ #define OPTIMAL_THREADS 256
5
+
6
+ __global__ void index_max_cuda_kernel(
7
+ float *index_vals, // [batch_size, 32, num_block]
8
+ int *indices, // [batch_size, num_block]
9
+ float *max_vals, // [batch_size, A_num_block * 32]
10
+ float *max_vals_scatter, // [batch_size, 32, num_block]
11
+ long batch_size,
12
+ long A_num_block,
13
+ long B_num_block,
14
+ long num_block
15
+ );
16
+
17
+ __global__ void mm_to_sparse_cuda_kernel(
18
+ float *dense_A, // [batch_size, A_num_block, dim, 32]
19
+ float *dense_B, // [batch_size, B_num_block, dim, 32]
20
+ int *indices, // [batch_size, num_block]
21
+ float *sparse_C, // [batch_size, num_block, 32, 32]
22
+ long batch_size,
23
+ long A_num_block,
24
+ long B_num_block,
25
+ long dim,
26
+ long num_block
27
+ );
28
+
29
+ __global__ void sparse_dense_mm_cuda_kernel(
30
+ float *sparse_A, // [batch_size, num_block, 32, 32]
31
+ int *indices, // [batch_size, num_block]
32
+ float *dense_B, // [batch_size, B_num_block, dim, 32]
33
+ float *dense_C, // [batch_size, A_num_block, dim, 32]
34
+ long batch_size,
35
+ long A_num_block,
36
+ long B_num_block,
37
+ long dim,
38
+ long num_block
39
+ );
40
+
41
+ __global__ void reduce_sum_cuda_kernel(
42
+ float *sparse_A, // [batch_size, num_block, 32, 32]
43
+ int *indices, // [batch_size, num_block]
44
+ float *dense_C, // [batch_size, A_num_block, 32]
45
+ long batch_size,
46
+ long A_num_block,
47
+ long B_num_block,
48
+ long num_block
49
+ );
50
+
51
+ __global__ void scatter_cuda_kernel(
52
+ float *dense_A, // [batch_size, A_num_block, 32]
53
+ int *indices, // [batch_size, num_block]
54
+ float *sparse_C, // [batch_size, num_block, 32, 32]
55
+ long batch_size,
56
+ long A_num_block,
57
+ long B_num_block,
58
+ long num_block
59
+ );
evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.cu ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include <ATen/ATen.h>
3
+ #include "cuda_launch.h"
4
+ #include "cuda_kernel.h"
5
+ #include <vector>
6
+
7
+ //////////////////////////////////////////////////////////////////////////////////////////////////
8
+ //////////////////////////////////////////////////////////////////////////////////////////////////
9
+
10
+ std::vector<at::Tensor> index_max_kernel(
11
+ at::Tensor index_vals, // [batch_size, 32, num_block]
12
+ at::Tensor indices, // [batch_size, num_block],
13
+ int A_num_block,
14
+ int B_num_block
15
+ ) {
16
+ int batch_size = indices.size(0);
17
+ int num_block = indices.size(1);
18
+
19
+ at::Tensor max_vals = at::zeros({batch_size, A_num_block * 32}, index_vals.options());
20
+ at::Tensor max_vals_scatter = at::zeros({batch_size, 32, num_block}, index_vals.options());
21
+
22
+ dim3 threads(256);
23
+ dim3 blocks(batch_size);
24
+ int shared_mem = A_num_block * 32 * sizeof(float);
25
+
26
+ index_max_cuda_kernel<<<blocks, threads, shared_mem>>>(
27
+ index_vals.data_ptr<float>(),
28
+ indices.data_ptr<int>(),
29
+ max_vals.data_ptr<float>(),
30
+ max_vals_scatter.data_ptr<float>(),
31
+ batch_size,
32
+ A_num_block,
33
+ B_num_block,
34
+ num_block
35
+ );
36
+
37
+ return {max_vals, max_vals_scatter};
38
+ }
39
+
40
+ at::Tensor mm_to_sparse_kernel(
41
+ at::Tensor dense_A, // [batch_size, A_num_block, dim, 32]
42
+ at::Tensor dense_B, // [batch_size, B_num_block, dim, 32]
43
+ at::Tensor indices // [batch_size, num_block]
44
+ ) {
45
+ int batch_size = dense_A.size(0);
46
+ int A_num_block = dense_A.size(1);
47
+ int B_num_block = dense_B.size(1);
48
+ int dim = dense_A.size(2);
49
+ int num_block = indices.size(1);
50
+
51
+ at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options());
52
+
53
+ dim3 threads(64, 4);
54
+ dim3 blocks(num_block / 4, batch_size);
55
+
56
+ mm_to_sparse_cuda_kernel<<<blocks, threads>>>(
57
+ dense_A.data_ptr<float>(),
58
+ dense_B.data_ptr<float>(),
59
+ indices.data_ptr<int>(),
60
+ sparse_C.data_ptr<float>(),
61
+ batch_size,
62
+ A_num_block,
63
+ B_num_block,
64
+ dim,
65
+ num_block
66
+ );
67
+
68
+ return sparse_C;
69
+ }
70
+
71
+ at::Tensor sparse_dense_mm_kernel(
72
+ at::Tensor sparse_A, // [batch_size, num_block, 32, 32]
73
+ at::Tensor indices, // [batch_size, num_block]
74
+ at::Tensor dense_B, // [batch_size, B_num_block, dim, 32]
75
+ int A_num_block
76
+ ) {
77
+ int batch_size = sparse_A.size(0);
78
+ int num_block = sparse_A.size(1);
79
+ int B_num_block = dense_B.size(1);
80
+ int dim = dense_B.size(2);
81
+
82
+ at::Tensor dense_C = at::zeros({batch_size, A_num_block, dim, 32}, dense_B.options());
83
+
84
+ dim3 threads(128, 2);
85
+ dim3 blocks(num_block / 2, batch_size);
86
+
87
+ sparse_dense_mm_cuda_kernel<<<blocks, threads>>>(
88
+ sparse_A.data_ptr<float>(),
89
+ indices.data_ptr<int>(),
90
+ dense_B.data_ptr<float>(),
91
+ dense_C.data_ptr<float>(),
92
+ batch_size,
93
+ A_num_block,
94
+ B_num_block,
95
+ dim,
96
+ num_block
97
+ );
98
+
99
+ return dense_C;
100
+ }
101
+
102
+ at::Tensor reduce_sum_kernel(
103
+ at::Tensor sparse_A, // [batch_size, num_block, 32, 32]
104
+ at::Tensor indices, // [batch_size, num_block]
105
+ int A_num_block,
106
+ int B_num_block
107
+ ) {
108
+ int batch_size = sparse_A.size(0);
109
+ int num_block = sparse_A.size(1);
110
+
111
+ at::Tensor dense_C = at::zeros({batch_size, A_num_block, 32}, sparse_A.options());
112
+
113
+ dim3 threads(32, 4);
114
+ dim3 blocks(num_block / 4, batch_size);
115
+
116
+ reduce_sum_cuda_kernel<<<blocks, threads>>>(
117
+ sparse_A.data_ptr<float>(),
118
+ indices.data_ptr<int>(),
119
+ dense_C.data_ptr<float>(),
120
+ batch_size,
121
+ A_num_block,
122
+ B_num_block,
123
+ num_block
124
+ );
125
+
126
+ return dense_C;
127
+ }
128
+
129
+ at::Tensor scatter_kernel(
130
+ at::Tensor dense_A, // [batch_size, A_num_block, 32]
131
+ at::Tensor indices, // [batch_size, num_block]
132
+ int B_num_block
133
+ ) {
134
+ int batch_size = dense_A.size(0);
135
+ int A_num_block = dense_A.size(1);
136
+ int num_block = indices.size(1);
137
+
138
+ at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options());
139
+
140
+ dim3 threads(32, 4);
141
+ dim3 blocks(num_block / 4, batch_size);
142
+
143
+ scatter_cuda_kernel<<<blocks, threads>>>(
144
+ dense_A.data_ptr<float>(),
145
+ indices.data_ptr<int>(),
146
+ sparse_C.data_ptr<float>(),
147
+ batch_size,
148
+ A_num_block,
149
+ B_num_block,
150
+ num_block
151
+ );
152
+
153
+ return sparse_C;
154
+ }
evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include <ATen/ATen.h>
3
+ #include <vector>
4
+
5
+ #define min(a, b) ((a)<(b)?(a):(b))
6
+ #define max(a, b) ((a)>(b)?(a):(b))
7
+
8
+ std::vector<at::Tensor> index_max_kernel(
9
+ at::Tensor index_vals,
10
+ at::Tensor indices,
11
+ int A_num_block,
12
+ int B_num_block
13
+ );
14
+
15
+ at::Tensor mm_to_sparse_kernel(
16
+ at::Tensor dense_A,
17
+ at::Tensor dense_B,
18
+ at::Tensor indices
19
+ );
20
+
21
+ at::Tensor sparse_dense_mm_kernel(
22
+ at::Tensor sparse_A,
23
+ at::Tensor indices,
24
+ at::Tensor dense_B,
25
+ int A_num_block
26
+ );
27
+
28
+ at::Tensor reduce_sum_kernel(
29
+ at::Tensor sparse_A,
30
+ at::Tensor indices,
31
+ int A_num_block,
32
+ int B_num_block
33
+ );
34
+
35
+ at::Tensor scatter_kernel(
36
+ at::Tensor dense_A,
37
+ at::Tensor indices,
38
+ int B_num_block
39
+ );
evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/mra/torch_extension.cpp ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include <ATen/ATen.h>
3
+ #include "cuda_launch.h"
4
+ #include <vector>
5
+
6
+ std::vector<at::Tensor> index_max(
7
+ at::Tensor index_vals,
8
+ at::Tensor indices,
9
+ int A_num_block,
10
+ int B_num_block
11
+ ) {
12
+ return index_max_kernel(
13
+ index_vals,
14
+ indices,
15
+ A_num_block,
16
+ B_num_block
17
+ );
18
+ }
19
+
20
+ at::Tensor mm_to_sparse(
21
+ at::Tensor dense_A,
22
+ at::Tensor dense_B,
23
+ at::Tensor indices
24
+ ) {
25
+ return mm_to_sparse_kernel(
26
+ dense_A,
27
+ dense_B,
28
+ indices
29
+ );
30
+ }
31
+
32
+ at::Tensor sparse_dense_mm(
33
+ at::Tensor sparse_A,
34
+ at::Tensor indices,
35
+ at::Tensor dense_B,
36
+ int A_num_block
37
+ ) {
38
+ return sparse_dense_mm_kernel(
39
+ sparse_A,
40
+ indices,
41
+ dense_B,
42
+ A_num_block
43
+ );
44
+ }
45
+
46
+ at::Tensor reduce_sum(
47
+ at::Tensor sparse_A,
48
+ at::Tensor indices,
49
+ int A_num_block,
50
+ int B_num_block
51
+ ) {
52
+ return reduce_sum_kernel(
53
+ sparse_A,
54
+ indices,
55
+ A_num_block,
56
+ B_num_block
57
+ );
58
+ }
59
+
60
+ at::Tensor scatter(
61
+ at::Tensor dense_A,
62
+ at::Tensor indices,
63
+ int B_num_block
64
+ ) {
65
+ return scatter_kernel(
66
+ dense_A,
67
+ indices,
68
+ B_num_block
69
+ );
70
+ }
71
+
72
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
73
+ m.def("index_max", &index_max, "index_max (CUDA)");
74
+ m.def("mm_to_sparse", &mm_to_sparse, "mm_to_sparse (CUDA)");
75
+ m.def("sparse_dense_mm", &sparse_dense_mm, "sparse_dense_mm (CUDA)");
76
+ m.def("reduce_sum", &reduce_sum, "reduce_sum (CUDA)");
77
+ m.def("scatter", &scatter, "scatter (CUDA)");
78
+ }
evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #define MAX_THREADS_PER_BLOCK 1024
3
+ #define OPTIMAL_THREADS_PER_BLOCK 256
4
+ #define WARP_SIZE 32
5
+ #define MAX_NUM_BLOCK_X 2147483647
6
+ #define MAX_NUM_BLOCK_Y 65535
7
+ #define MAX_NUM_BLOCK_Z 65535
8
+ #define MAX_SHARED_MEM_PER_BLOCK 48000
9
+ #define FULL_MASK 0xffffffff
evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #include "common.h"
3
+
4
+ template<typename T>
5
+ __device__ int set_insert(T *set, int set_size, T value) {
6
+ int slot = value % set_size;
7
+ int start_slot = slot;
8
+ while (true) {
9
+ T prev = atomicCAS(&set[slot], EMPTY_VALUE, value);
10
+ if (prev == EMPTY_VALUE || prev == value) {
11
+ return slot;
12
+ }
13
+ slot = (slot + 1) % set_size;
14
+ if (slot == start_slot) {
15
+ return -1;
16
+ }
17
+ }
18
+ return -1;
19
+ }
20
+
21
+ template<typename T>
22
+ __device__ int set_lookup(T *set, int set_size, T value) {
23
+ int slot = value % set_size;
24
+ int start_slot = slot;
25
+ while (true) {
26
+ if (set[slot] == value) {
27
+ return slot;
28
+ }
29
+ slot = (slot + 1) % set_size;
30
+ if (slot == start_slot) {
31
+ return -1;
32
+ }
33
+ }
34
+ return -1;
35
+ }
36
+
37
+ template<typename T>
38
+ __device__ void init_buffer(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) {
39
+ __syncthreads();
40
+ for (int i = 0; i < buffer_size; i = i + num_threads) {
41
+ int offset_idx = i + thread_id;
42
+ if (offset_idx < buffer_size) {
43
+ buffer[offset_idx] = init_value;
44
+ }
45
+ }
46
+ __syncthreads();
47
+ }
48
+
49
+ template<typename T>
50
+ __device__ void copy_data(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) {
51
+ __syncthreads();
52
+ for (int i = 0; i < data_length; i = i + num_threads) {
53
+ int offset_idx = i + thread_id;
54
+ if (offset_idx < data_length) {
55
+ dist_pt[offset_idx] = src_pt[offset_idx];
56
+ }
57
+ }
58
+ __syncthreads();
59
+ }
60
+
61
+ template<typename T>
62
+ __device__ void init_buffer_nonblocking(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) {
63
+ for (int i = 0; i < buffer_size; i = i + num_threads) {
64
+ int offset_idx = i + thread_id;
65
+ if (offset_idx < buffer_size) {
66
+ buffer[offset_idx] = init_value;
67
+ }
68
+ }
69
+ }
70
+
71
+ template<typename T>
72
+ __device__ void copy_data_nonblocking(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) {
73
+ for (int i = 0; i < data_length; i = i + num_threads) {
74
+ int offset_idx = i + thread_id;
75
+ if (offset_idx < data_length) {
76
+ dist_pt[offset_idx] = src_pt[offset_idx];
77
+ }
78
+ }
79
+ }
evalkit_internvl/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu ADDED
@@ -0,0 +1,588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation.cu
2
+
3
+ #include <torch/extension.h>
4
+ #include <ATen/ATen.h>
5
+ #include "fast_lsh_cumulation.h"
6
+ #include "fast_lsh_cumulation_cuda.h"
7
+ #include "common_cuda.h"
8
+ #include "common.h"
9
+ #include <vector>
10
+ //////////////////////////////////////////////////////////////////////////////////////////////////
11
+ //////////////////////////////////////////////////////////////////////////////////////////////////
12
+
13
+ std::vector<at::Tensor> fast_hash_ver1_kernel(
14
+ at::Tensor query_mask,
15
+ at::Tensor query_vector,
16
+ at::Tensor key_mask,
17
+ at::Tensor key_vector,
18
+ int num_hash_f,
19
+ int hash_code_len,
20
+ bool use_cuda
21
+ ) {
22
+
23
+ int batch_size = query_vector.size(0);
24
+ int num_query = query_vector.size(1);
25
+ int num_key = key_vector.size(1);
26
+ int vector_dim = query_vector.size(2);
27
+
28
+ int num_hash_per_part = vector_dim / hash_code_len;
29
+ int num_part = max(1, ceil_divide(num_hash_f, num_hash_per_part));
30
+
31
+ at::Tensor Dmat = 2 * at::randint(0, 2, {batch_size, 3, num_part, vector_dim}, query_mask.options()) - 1;
32
+ at::Tensor query_hash_code = at::zeros({batch_size, num_query, num_hash_f}, query_mask.options());
33
+ at::Tensor key_hash_code = at::zeros({batch_size, num_key, num_hash_f}, key_mask.options());
34
+
35
+ int *query_mask_ptr = query_mask.data_ptr<int>();
36
+ float *query_vector_ptr = query_vector.data_ptr<float>();
37
+ int *key_mask_ptr = key_mask.data_ptr<int>();
38
+ float *key_vector_ptr = key_vector.data_ptr<float>();
39
+
40
+ int *Dmat_ptr = Dmat.data_ptr<int>();
41
+
42
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
43
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
44
+
45
+ if (use_cuda) {
46
+ {
47
+ dim3 threads(vector_dim);
48
+ dim3 blocks(num_part, num_query, batch_size);
49
+ int shared_mem = vector_dim * sizeof(float);
50
+ fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>(
51
+ query_mask_ptr,
52
+ query_vector_ptr,
53
+ Dmat_ptr,
54
+ query_hash_code_ptr,
55
+ batch_size,
56
+ num_query,
57
+ vector_dim,
58
+ num_part,
59
+ num_hash_f,
60
+ hash_code_len
61
+ );
62
+ }
63
+ {
64
+ dim3 threads(vector_dim);
65
+ dim3 blocks(num_part, num_key, batch_size);
66
+ int shared_mem = vector_dim * sizeof(float);
67
+ fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>(
68
+ key_mask_ptr,
69
+ key_vector_ptr,
70
+ Dmat_ptr,
71
+ key_hash_code_ptr,
72
+ batch_size,
73
+ num_key,
74
+ vector_dim,
75
+ num_part,
76
+ num_hash_f,
77
+ hash_code_len
78
+ );
79
+ }
80
+ }
81
+
82
+ return {query_hash_code, key_hash_code};
83
+
84
+ }
85
+
86
+ at::Tensor lsh_cumulation_ver1_kernel(
87
+ at::Tensor query_mask,
88
+ at::Tensor query_hash_code,
89
+ at::Tensor key_mask,
90
+ at::Tensor key_hash_code,
91
+ at::Tensor value,
92
+ int hashtable_capacity,
93
+ bool use_cuda
94
+ ) {
95
+
96
+ int batch_size = query_hash_code.size(0);
97
+ int num_hash_f = query_hash_code.size(2);
98
+
99
+ int num_query = query_hash_code.size(1);
100
+ int num_key = key_hash_code.size(1);
101
+ int value_dim = value.size(2);
102
+
103
+ at::Tensor hashtable_value = at::empty({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options());
104
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
105
+
106
+ if (use_cuda) {
107
+ int threads_x = WARP_SIZE;
108
+ int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE;
109
+ int block_x_step1 = num_key / threads_y;
110
+ int block_x_step2 = num_query / threads_y;
111
+ int block_y = batch_size;
112
+
113
+ dim3 threads(threads_x, threads_y);
114
+ dim3 blocks_step1(block_x_step1, block_y);
115
+ dim3 blocks_step2(block_x_step2, block_y);
116
+
117
+ int *query_mask_ptr = query_mask.data_ptr<int>();
118
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
119
+ int *key_mask_ptr = key_mask.data_ptr<int>();
120
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
121
+ float *value_ptr = value.data_ptr<float>();
122
+ float *hashtable_value_ptr = hashtable_value.data_ptr<float>();
123
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
124
+
125
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
126
+
127
+ cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float));
128
+
129
+ lsh_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>(
130
+ key_mask_ptr,
131
+ key_hash_code_ptr,
132
+ value_ptr,
133
+ hashtable_value_ptr,
134
+ batch_size,
135
+ num_hash_f,
136
+ hashtable_capacity,
137
+ num_key,
138
+ value_dim,
139
+ value_offset
140
+ );
141
+
142
+ lsh_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>(
143
+ query_mask_ptr,
144
+ query_hash_code_ptr,
145
+ hashtable_value_ptr,
146
+ cumulation_value_ptr,
147
+ batch_size,
148
+ num_hash_f,
149
+ hashtable_capacity,
150
+ num_query,
151
+ value_dim,
152
+ value_offset
153
+ );
154
+ }
155
+
156
+ }
157
+
158
+ return cumulation_value;
159
+
160
+ }
161
+
162
+ at::Tensor lsh_weighted_cumulation_ver1_kernel(
163
+ at::Tensor query_mask,
164
+ at::Tensor query_hash_code,
165
+ at::Tensor query_weight,
166
+ at::Tensor key_mask,
167
+ at::Tensor key_hash_code,
168
+ at::Tensor key_weight,
169
+ at::Tensor value,
170
+ int hashtable_capacity,
171
+ bool use_cuda
172
+ ) {
173
+
174
+ int batch_size = query_hash_code.size(0);
175
+ int num_hash_f = query_hash_code.size(2);
176
+
177
+ int num_query = query_hash_code.size(1);
178
+ int num_key = key_hash_code.size(1);
179
+ int value_dim = value.size(2);
180
+ int weight_dim = query_weight.size(2);
181
+
182
+ at::Tensor hashtable_value = at::zeros({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options());
183
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
184
+
185
+ if (use_cuda) {
186
+ int threads_x = WARP_SIZE;
187
+ int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE;
188
+ int block_x_step1 = num_key / threads_y;
189
+ int block_x_step2 = num_query / threads_y;
190
+ int block_y = batch_size;
191
+
192
+ dim3 threads(threads_x, threads_y);
193
+ dim3 blocks_step1(block_x_step1, block_y);
194
+ dim3 blocks_step2(block_x_step2, block_y);
195
+
196
+ int *query_mask_ptr = query_mask.data_ptr<int>();
197
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
198
+ float *query_weight_ptr = query_weight.data_ptr<float>();
199
+ int *key_mask_ptr = key_mask.data_ptr<int>();
200
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
201
+ float *key_weight_ptr = key_weight.data_ptr<float>();
202
+ float *value_ptr = value.data_ptr<float>();
203
+ float *hashtable_value_ptr = hashtable_value.data_ptr<float>();
204
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
205
+
206
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
207
+ for (int weight_idx = 0; weight_idx < weight_dim; weight_idx++) {
208
+
209
+ cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float));
210
+
211
+ lsh_weighted_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>(
212
+ key_mask_ptr,
213
+ key_hash_code_ptr,
214
+ key_weight_ptr,
215
+ value_ptr,
216
+ hashtable_value_ptr,
217
+ batch_size,
218
+ num_hash_f,
219
+ hashtable_capacity,
220
+ num_key,
221
+ value_dim,
222
+ weight_dim,
223
+ value_offset,
224
+ weight_idx
225
+ );
226
+
227
+ lsh_weighted_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>(
228
+ query_mask_ptr,
229
+ query_hash_code_ptr,
230
+ query_weight_ptr,
231
+ hashtable_value_ptr,
232
+ cumulation_value_ptr,
233
+ batch_size,
234
+ num_hash_f,
235
+ hashtable_capacity,
236
+ num_query,
237
+ value_dim,
238
+ weight_dim,
239
+ value_offset,
240
+ weight_idx
241
+ );
242
+ }
243
+ }
244
+
245
+ }
246
+
247
+ return cumulation_value;
248
+
249
+ }
250
+
251
+ at::Tensor lsh_weighted_cumulation_ver2_kernel(
252
+ at::Tensor query_mask,
253
+ at::Tensor query_hash_code,
254
+ at::Tensor query_weight,
255
+ at::Tensor key_mask,
256
+ at::Tensor key_hash_code,
257
+ at::Tensor key_weight,
258
+ at::Tensor value,
259
+ int hashtable_capacity,
260
+ bool use_cuda
261
+ ) {
262
+
263
+ int batch_size = query_hash_code.size(0);
264
+ int num_hash_f = query_hash_code.size(2);
265
+
266
+ int num_query = query_hash_code.size(1);
267
+ int num_key = key_hash_code.size(1);
268
+ int value_dim = value.size(2);
269
+ int weight_dim = query_weight.size(2);
270
+
271
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
272
+ at::Tensor key_sorted_idxes = at::zeros({batch_size, num_hash_f, num_key}, query_hash_code.options());
273
+ at::Tensor query_info = at::zeros({batch_size, num_query, 2, num_hash_f}, query_hash_code.options());
274
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
275
+
276
+ if (use_cuda) {
277
+
278
+ int *query_mask_ptr = query_mask.data_ptr<int>();
279
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
280
+ float *query_weight_ptr = query_weight.data_ptr<float>();
281
+ int *key_mask_ptr = key_mask.data_ptr<int>();
282
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
283
+ float *key_weight_ptr = key_weight.data_ptr<float>();
284
+ float *value_ptr = value.data_ptr<float>();
285
+
286
+ int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
287
+ int *key_sorted_idxes_ptr = key_sorted_idxes.data_ptr<int>();
288
+ int *query_info_ptr = query_info.data_ptr<int>();
289
+
290
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
291
+
292
+ {
293
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
294
+ dim3 blocks_step13(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
295
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
296
+ dim3 blocks_step2(num_hash_f, batch_size);
297
+ int shared_mem = hashtable_capacity * sizeof(float);
298
+ count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
299
+ key_mask_ptr,
300
+ key_hash_code_ptr,
301
+ count_sort_table_ptr,
302
+ batch_size,
303
+ num_hash_f,
304
+ hashtable_capacity,
305
+ num_key
306
+ );
307
+ count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
308
+ count_sort_table_ptr,
309
+ batch_size,
310
+ num_hash_f,
311
+ hashtable_capacity
312
+ );
313
+ count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
314
+ key_mask_ptr,
315
+ key_hash_code_ptr,
316
+ count_sort_table_ptr,
317
+ key_sorted_idxes_ptr,
318
+ batch_size,
319
+ num_hash_f,
320
+ hashtable_capacity,
321
+ num_key
322
+ );
323
+ }
324
+ {
325
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
326
+ dim3 blocks(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
327
+ extract_query_info_cuda_kernel<<<blocks, threads>>>(
328
+ query_mask_ptr,
329
+ query_hash_code_ptr,
330
+ count_sort_table_ptr,
331
+ query_info_ptr,
332
+ batch_size,
333
+ num_hash_f,
334
+ hashtable_capacity,
335
+ num_query
336
+ );
337
+ }
338
+ {
339
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
340
+ dim3 blocks(num_query, num_hash_f, batch_size);
341
+ int shared_mem = (weight_dim + WARP_SIZE) * sizeof(float);
342
+ lsh_weighted_cumulation_ver2_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
343
+ query_mask_ptr,
344
+ query_info_ptr,
345
+ key_sorted_idxes_ptr,
346
+ query_weight_ptr,
347
+ key_weight_ptr,
348
+ value_ptr,
349
+ cumulation_value_ptr,
350
+ batch_size,
351
+ num_hash_f,
352
+ num_query,
353
+ num_key,
354
+ value_dim,
355
+ weight_dim
356
+ );
357
+ }
358
+ }
359
+
360
+ return cumulation_value;
361
+
362
+ }
363
+
364
+ at::Tensor lsh_weighted_cumulation_ver3_kernel(
365
+ at::Tensor query_mask,
366
+ at::Tensor query_hash_code,
367
+ at::Tensor query_weight,
368
+ at::Tensor key_mask,
369
+ at::Tensor key_hash_code,
370
+ at::Tensor key_weight,
371
+ at::Tensor value,
372
+ int hashtable_capacity,
373
+ bool use_cuda
374
+ ) {
375
+
376
+ int batch_size = query_hash_code.size(0);
377
+ int num_hash_f = query_hash_code.size(2);
378
+
379
+ int num_query = query_hash_code.size(1);
380
+ int num_key = key_hash_code.size(1);
381
+ int value_dim = value.size(2);
382
+ int weight_dim = query_weight.size(2);
383
+
384
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
385
+ at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options());
386
+ at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options());
387
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
388
+
389
+ if (use_cuda) {
390
+
391
+ int *query_mask_ptr = query_mask.data_ptr<int>();
392
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
393
+ float *query_weight_ptr = query_weight.data_ptr<float>();
394
+ int *key_mask_ptr = key_mask.data_ptr<int>();
395
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
396
+ float *key_weight_ptr = key_weight.data_ptr<float>();
397
+ float *value_ptr = value.data_ptr<float>();
398
+
399
+ int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
400
+ int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>();
401
+ int *key_info_ptr = key_info.data_ptr<int>();
402
+
403
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
404
+
405
+ {
406
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
407
+ dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
408
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
409
+ dim3 blocks_step2(num_hash_f, batch_size);
410
+ int shared_mem = hashtable_capacity * sizeof(float);
411
+ count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
412
+ query_mask_ptr,
413
+ query_hash_code_ptr,
414
+ count_sort_table_ptr,
415
+ batch_size,
416
+ num_hash_f,
417
+ hashtable_capacity,
418
+ num_query
419
+ );
420
+ count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
421
+ count_sort_table_ptr,
422
+ batch_size,
423
+ num_hash_f,
424
+ hashtable_capacity
425
+ );
426
+ count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
427
+ query_mask_ptr,
428
+ query_hash_code_ptr,
429
+ count_sort_table_ptr,
430
+ query_sorted_idxes_ptr,
431
+ batch_size,
432
+ num_hash_f,
433
+ hashtable_capacity,
434
+ num_query
435
+ );
436
+ }
437
+ {
438
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
439
+ dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
440
+ extract_query_info_cuda_kernel<<<blocks, threads>>>(
441
+ key_mask_ptr,
442
+ key_hash_code_ptr,
443
+ count_sort_table_ptr,
444
+ key_info_ptr,
445
+ batch_size,
446
+ num_hash_f,
447
+ hashtable_capacity,
448
+ num_key
449
+ );
450
+ }
451
+ {
452
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
453
+ dim3 blocks(num_key, num_hash_f, batch_size);
454
+ int shared_mem = (weight_dim + value_dim + WARP_SIZE) * sizeof(float);
455
+ lsh_weighted_cumulation_ver3_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
456
+ query_sorted_idxes_ptr,
457
+ key_mask_ptr,
458
+ key_info_ptr,
459
+ query_weight_ptr,
460
+ key_weight_ptr,
461
+ value_ptr,
462
+ cumulation_value_ptr,
463
+ batch_size,
464
+ num_hash_f,
465
+ num_query,
466
+ num_key,
467
+ value_dim,
468
+ weight_dim
469
+ );
470
+ }
471
+ }
472
+
473
+ return cumulation_value;
474
+
475
+ }
476
+
477
+ at::Tensor lsh_weighted_cumulation_ver4_kernel(
478
+ at::Tensor query_mask,
479
+ at::Tensor query_hash_code,
480
+ at::Tensor query_weight,
481
+ at::Tensor key_mask,
482
+ at::Tensor key_hash_code,
483
+ at::Tensor key_weight,
484
+ at::Tensor value,
485
+ int hashtable_capacity,
486
+ bool use_cuda
487
+ ) {
488
+
489
+ int batch_size = query_hash_code.size(0);
490
+ int num_hash_f = query_hash_code.size(2);
491
+
492
+ int num_query = query_hash_code.size(1);
493
+ int num_key = key_hash_code.size(1);
494
+ int value_dim = value.size(2);
495
+ int weight_dim = query_weight.size(2);
496
+
497
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
498
+ at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options());
499
+ at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options());
500
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
501
+
502
+ if (use_cuda) {
503
+
504
+ int *query_mask_ptr = query_mask.data_ptr<int>();
505
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
506
+ float *query_weight_ptr = query_weight.data_ptr<float>();
507
+ int *key_mask_ptr = key_mask.data_ptr<int>();
508
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
509
+ float *key_weight_ptr = key_weight.data_ptr<float>();
510
+ float *value_ptr = value.data_ptr<float>();
511
+
512
+ int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
513
+ int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>();
514
+ int *key_info_ptr = key_info.data_ptr<int>();
515
+
516
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
517
+
518
+ {
519
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
520
+ dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
521
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
522
+ dim3 blocks_step2(num_hash_f, batch_size);
523
+ int shared_mem = hashtable_capacity * sizeof(float);
524
+ count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
525
+ query_mask_ptr,
526
+ query_hash_code_ptr,
527
+ count_sort_table_ptr,
528
+ batch_size,
529
+ num_hash_f,
530
+ hashtable_capacity,
531
+ num_query
532
+ );
533
+ count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
534
+ count_sort_table_ptr,
535
+ batch_size,
536
+ num_hash_f,
537
+ hashtable_capacity
538
+ );
539
+ count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
540
+ query_mask_ptr,
541
+ query_hash_code_ptr,
542
+ count_sort_table_ptr,
543
+ query_sorted_idxes_ptr,
544
+ batch_size,
545
+ num_hash_f,
546
+ hashtable_capacity,
547
+ num_query
548
+ );
549
+ }
550
+ {
551
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
552
+ dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
553
+ extract_query_info_cuda_kernel<<<blocks, threads>>>(
554
+ key_mask_ptr,
555
+ key_hash_code_ptr,
556
+ count_sort_table_ptr,
557
+ key_info_ptr,
558
+ batch_size,
559
+ num_hash_f,
560
+ hashtable_capacity,
561
+ num_key
562
+ );
563
+ }
564
+ {
565
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
566
+ dim3 blocks(num_key, batch_size);
567
+ int shared_mem = (weight_dim + value_dim + 2 * num_hash_f) * sizeof(float);
568
+ lsh_weighted_cumulation_ver4_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
569
+ query_sorted_idxes_ptr,
570
+ key_mask_ptr,
571
+ key_info_ptr,
572
+ query_weight_ptr,
573
+ key_weight_ptr,
574
+ value_ptr,
575
+ cumulation_value_ptr,
576
+ batch_size,
577
+ num_hash_f,
578
+ num_query,
579
+ num_key,
580
+ value_dim,
581
+ weight_dim
582
+ );
583
+ }
584
+ }
585
+
586
+ return cumulation_value;
587
+
588
+ }