ZTWHHH commited on
Commit
59bd0e3
·
verified ·
1 Parent(s): 5e69cbc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/LICENSE +201 -0
  2. wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/RECORD +173 -0
  3. wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/REQUESTED +0 -0
  4. wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/WHEEL +5 -0
  5. wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/entry_points.txt +6 -0
  6. wemm/lib/python3.10/site-packages/boto3-1.26.118.dist-info/METADATA +187 -0
  7. wemm/lib/python3.10/site-packages/boto3-1.26.118.dist-info/WHEEL +5 -0
  8. wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/LICENSE.txt +28 -0
  9. wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/METADATA +75 -0
  10. wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/RECORD +58 -0
  11. wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/WHEEL +4 -0
  12. wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/entry_points.txt +3 -0
  13. wemm/lib/python3.10/site-packages/lit-18.1.8.dist-info/WHEEL +5 -0
  14. wemm/lib/python3.10/site-packages/pillow-11.1.0.dist-info/METADATA +176 -0
  15. wemm/lib/python3.10/site-packages/qcloud_cos/__init__.py +19 -0
  16. wemm/lib/python3.10/site-packages/qcloud_cos/__pycache__/ai_recognition.cpython-310.pyc +0 -0
  17. wemm/lib/python3.10/site-packages/qcloud_cos/__pycache__/streambody.cpython-310.pyc +0 -0
  18. wemm/lib/python3.10/site-packages/qcloud_cos/cos_exception.py +101 -0
  19. wemm/lib/python3.10/site-packages/qcloud_cos/cos_threadpool.py +111 -0
  20. wemm/lib/python3.10/site-packages/qcloud_cos/meta_insight.py +908 -0
  21. wemm/lib/python3.10/site-packages/qcloud_cos/resumable_downloader.py +226 -0
  22. wemm/lib/python3.10/site-packages/qcloud_cos/streambody.py +83 -0
  23. wemm/lib/python3.10/site-packages/qcloud_cos/version.py +1 -0
  24. wemm/lib/python3.10/site-packages/torchgen/api/__init__.py +0 -0
  25. wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/__init__.cpython-310.pyc +0 -0
  26. wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc +0 -0
  27. wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/native.cpython-310.pyc +0 -0
  28. wemm/lib/python3.10/site-packages/torchgen/api/autograd.py +663 -0
  29. wemm/lib/python3.10/site-packages/torchgen/api/cpp.py +460 -0
  30. wemm/lib/python3.10/site-packages/torchgen/api/dispatcher.py +118 -0
  31. wemm/lib/python3.10/site-packages/torchgen/api/native.py +153 -0
  32. wemm/lib/python3.10/site-packages/torchgen/api/structured.py +156 -0
  33. wemm/lib/python3.10/site-packages/torchgen/api/translate.py +431 -0
  34. wemm/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc +0 -0
  35. wemm/lib/python3.10/site-packages/torchgen/api/types/types.py +182 -0
  36. wemm/lib/python3.10/site-packages/torchgen/api/types/types_base.py +267 -0
  37. wemm/lib/python3.10/site-packages/torchgen/dest/lazy_ir.py +710 -0
  38. wemm/lib/python3.10/site-packages/torchgen/dest/register_dispatch_key.py +983 -0
  39. wemm/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc +0 -0
  40. wemm/lib/python3.10/site-packages/torchgen/model.py +0 -0
  41. wemm/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc +0 -0
  42. wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/FunctionalInverses.h +16 -0
  43. wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h +24 -0
  44. wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp +19 -0
  45. wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterBackendSelect.cpp +49 -0
  46. wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.cpp +35 -0
  47. wemm/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/config.cpython-310.pyc +0 -0
  48. wemm/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/gen_static_runtime_ops.cpython-310.pyc +0 -0
  49. wemm/lib/python3.10/site-packages/torchgen/static_runtime/config.py +388 -0
  50. wemm/lib/python3.10/site-packages/triton/__pycache__/utils.cpython-310.pyc +0 -0
wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/RECORD ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/accelerate,sha256=DBfzvhxWZpAt7ot8k6S_ytOszJeviUDzG8GodnHcP3Q,243
2
+ ../../../bin/accelerate-config,sha256=lHfl1GeRkoXZGPuzHc7nwghwGm5nMl99ef5ISMseT0s,235
3
+ ../../../bin/accelerate-estimate-memory,sha256=ciUD5cX75n2iKYsjo2lei9zsiWsH5A39huqciKkgrGQ,237
4
+ ../../../bin/accelerate-launch,sha256=ku3gdovuzuyM-2BL9NmKTLCpoGTS51znpTOvH_m7tRo,235
5
+ ../../../bin/accelerate-merge-weights,sha256=p4Vnyfa7ag_Y5AHPL-nP8PiQnBbaqpyaeVWNtG9TC70,234
6
+ accelerate-1.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
7
+ accelerate-1.2.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
8
+ accelerate-1.2.1.dist-info/METADATA,sha256=fNLbXzvJfyB5uEpohux1coAc5rA_VpDu9RrOHlVffjQ,19178
9
+ accelerate-1.2.1.dist-info/RECORD,,
10
+ accelerate-1.2.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ accelerate-1.2.1.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
12
+ accelerate-1.2.1.dist-info/entry_points.txt,sha256=Vpy8gUGfZ-1VnM2229fb8CpJNLBdMH_wtJ9PQ7b_2tQ,296
13
+ accelerate-1.2.1.dist-info/top_level.txt,sha256=esVfdxTidsjQ90zsN_rPpjLFJ4ijRlx4mnLrG09hlt4,11
14
+ accelerate/__init__.py,sha256=4ht0Ky7GWh9OVs3YLKPUShdq7paA-vKk2qH8YsWlTYo,1504
15
+ accelerate/__pycache__/__init__.cpython-310.pyc,,
16
+ accelerate/__pycache__/accelerator.cpython-310.pyc,,
17
+ accelerate/__pycache__/big_modeling.cpython-310.pyc,,
18
+ accelerate/__pycache__/checkpointing.cpython-310.pyc,,
19
+ accelerate/__pycache__/data_loader.cpython-310.pyc,,
20
+ accelerate/__pycache__/hooks.cpython-310.pyc,,
21
+ accelerate/__pycache__/inference.cpython-310.pyc,,
22
+ accelerate/__pycache__/launchers.cpython-310.pyc,,
23
+ accelerate/__pycache__/local_sgd.cpython-310.pyc,,
24
+ accelerate/__pycache__/logging.cpython-310.pyc,,
25
+ accelerate/__pycache__/memory_utils.cpython-310.pyc,,
26
+ accelerate/__pycache__/optimizer.cpython-310.pyc,,
27
+ accelerate/__pycache__/scheduler.cpython-310.pyc,,
28
+ accelerate/__pycache__/state.cpython-310.pyc,,
29
+ accelerate/__pycache__/tracking.cpython-310.pyc,,
30
+ accelerate/accelerator.py,sha256=zw8FuMyzBDokxc7YUtZWvOLyxYG1CcSzFd3Aiiy-clI,161482
31
+ accelerate/big_modeling.py,sha256=hn8R3_z3yvEizHuLrARWqTU_vx4M_gb0ehmDVpJPhBs,29758
32
+ accelerate/checkpointing.py,sha256=PUL5cBbFFR0AO1Y0OyXCzqSmZ5vaUaceGeDnWmTQk-U,13002
33
+ accelerate/commands/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
34
+ accelerate/commands/__pycache__/__init__.cpython-310.pyc,,
35
+ accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc,,
36
+ accelerate/commands/__pycache__/env.cpython-310.pyc,,
37
+ accelerate/commands/__pycache__/estimate.cpython-310.pyc,,
38
+ accelerate/commands/__pycache__/launch.cpython-310.pyc,,
39
+ accelerate/commands/__pycache__/merge.cpython-310.pyc,,
40
+ accelerate/commands/__pycache__/test.cpython-310.pyc,,
41
+ accelerate/commands/__pycache__/tpu.cpython-310.pyc,,
42
+ accelerate/commands/__pycache__/utils.cpython-310.pyc,,
43
+ accelerate/commands/accelerate_cli.py,sha256=aaqbgTuvtj0N4FPFI0KBpPTiVtWTPUWSlbSBzsy58l8,1856
44
+ accelerate/commands/config/__init__.py,sha256=iJK8dgj3pc5Vdr1E7UuGoFu-BlybyXLxYDoTg9gXngE,1645
45
+ accelerate/commands/config/__pycache__/__init__.cpython-310.pyc,,
46
+ accelerate/commands/config/__pycache__/cluster.cpython-310.pyc,,
47
+ accelerate/commands/config/__pycache__/config.cpython-310.pyc,,
48
+ accelerate/commands/config/__pycache__/config_args.cpython-310.pyc,,
49
+ accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc,,
50
+ accelerate/commands/config/__pycache__/default.cpython-310.pyc,,
51
+ accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc,,
52
+ accelerate/commands/config/__pycache__/update.cpython-310.pyc,,
53
+ accelerate/commands/config/cluster.py,sha256=n1HGGAN0VVVX1yHONf0xgO6LZ5ZBpou_xfmvPOnItIM,35700
54
+ accelerate/commands/config/config.py,sha256=FuRlQvOjgATEtyqOSsGD-KEtOCvACOHjs2C-krrtldk,3035
55
+ accelerate/commands/config/config_args.py,sha256=-fPg3nj4F_6Bp87dHS25fF6_3U2vP2rzllSd1Mj3TSw,9976
56
+ accelerate/commands/config/config_utils.py,sha256=beW-Hc-ka1NqSfId73L9ThnMxUf7K9otXsuxIuif9-A,3141
57
+ accelerate/commands/config/default.py,sha256=GI7Q9Asy7Cr81ditzNA6hOjIf6doinynz6-cO0GZwZ4,5705
58
+ accelerate/commands/config/sagemaker.py,sha256=GjHE2-h4tRr1P_PFtMF3miiAtJlzkbHbMb6kFXqn8eo,10341
59
+ accelerate/commands/config/update.py,sha256=NXW1J7GkUHpg71QlIXsmMB_0z8S8IZo2FWax5POwrhc,2395
60
+ accelerate/commands/env.py,sha256=VrG8ufBRMfP0R3sLyN0c8Fe3iPhyqGvLNKtQHX_uGBQ,3871
61
+ accelerate/commands/estimate.py,sha256=Ro0jeYOQPlQdR1XjqrUaUpyGFiUpX0cfW-pTgUwio1Q,12409
62
+ accelerate/commands/launch.py,sha256=637e9E68218PupSngacKKIX6kxWtM2spjMh5r8p792U,45429
63
+ accelerate/commands/menu/__init__.py,sha256=uqSlBM0TFHBwzdv3p3SXfpAk1lZFp4h1a7mbBdscPHs,645
64
+ accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc,,
65
+ accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc,,
66
+ accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc,,
67
+ accelerate/commands/menu/__pycache__/input.cpython-310.pyc,,
68
+ accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc,,
69
+ accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc,,
70
+ accelerate/commands/menu/cursor.py,sha256=-lmpJVAzvNc0c3EOtSuLoKB59zqylVCbYyWLPnrOmvQ,2028
71
+ accelerate/commands/menu/helpers.py,sha256=KrSB5fJjH4MUEUAQJ6bYaN16AYcnl9UalDrPD3DYeeg,1483
72
+ accelerate/commands/menu/input.py,sha256=Uj9eDp8-Mb0Fe49nuogqo9W_RCfYd6udfjiPKx7Wjmg,2537
73
+ accelerate/commands/menu/keymap.py,sha256=eXj-suyYs1m5dEHoUKN4mKAMLc8DWHnwhP6G6JSU0jQ,4086
74
+ accelerate/commands/menu/selection_menu.py,sha256=bxy-DHaKKC6SCToOlMBv5_z0MdUzylEg6Sio9OuV3GM,4921
75
+ accelerate/commands/merge.py,sha256=quDKckN3vKn9nsGjdwfoojnfTMFdKRRUkY1DYuuNNmc,2388
76
+ accelerate/commands/test.py,sha256=YrPYEaAACOGZ6btn2MV6NbMSEdBUcMWADLbQWaZSHtk,2149
77
+ accelerate/commands/tpu.py,sha256=KyxDP7IuveidZrbW4rx2s8Ku3o_ptI6tzwr_R7ck0os,5548
78
+ accelerate/commands/utils.py,sha256=ilcfE32oHh28EToM00nc_SR6upfZiuxUI0AjjZu8KYY,3995
79
+ accelerate/data_loader.py,sha256=fPag1dn23dtlZYZSwfSG8XwSAKZPloSSPNPbz4Os7Kk,58955
80
+ accelerate/hooks.py,sha256=uwCizAOaYROLK_LdifR9ai2FrF7WMgzudrKOY5TmGIs,31732
81
+ accelerate/inference.py,sha256=LdFo8N0ivRxYrzrCXQN6oPFzyaRyxmHZ55eJaKOH9cM,7687
82
+ accelerate/launchers.py,sha256=g7RBD9QRSsIJBYLaCmaIjVXHKeiSIvgq2TOW0q9R9z0,13763
83
+ accelerate/local_sgd.py,sha256=33g8YE1FFJ9CZAhWHReczGZ2CLu-7f817VECzjMCNMg,4081
84
+ accelerate/logging.py,sha256=4XcgY_BV7Qn_enh2tZ-8fNtuaE_3n-LsYJbgwhRx_PI,5042
85
+ accelerate/memory_utils.py,sha256=3R5LoeHl6GgTZ-IMPrDZMdaEehWarGdPqODushb-6pg,862
86
+ accelerate/optimizer.py,sha256=QfgCkQ5dA-fLSi_Z7CBPRCObXA1rL9zxHg4tyKCEg2A,8113
87
+ accelerate/scheduler.py,sha256=des_4M_Tt1W8gCYZZbLla0GHBEgJY3Wx2EGBQPTzeiY,4238
88
+ accelerate/state.py,sha256=6jOGPqHUyjrGCfaWdVgBrPLz3Y8p2Brm0FlhKXAdMrw,52098
89
+ accelerate/test_utils/__init__.py,sha256=OuXzYTvyikwGYXkN_Jc7HVnYG8RlLQojOGlsZ0i2KYI,1559
90
+ accelerate/test_utils/__pycache__/__init__.cpython-310.pyc,,
91
+ accelerate/test_utils/__pycache__/examples.cpython-310.pyc,,
92
+ accelerate/test_utils/__pycache__/testing.cpython-310.pyc,,
93
+ accelerate/test_utils/__pycache__/training.cpython-310.pyc,,
94
+ accelerate/test_utils/examples.py,sha256=jRm1S9TkmeoLaqprBvtVFN4LesiaDZtKMNIoLNY2euw,7281
95
+ accelerate/test_utils/scripts/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
96
+ accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc,,
97
+ accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc,,
98
+ accelerate/test_utils/scripts/__pycache__/test_ddp_comm_hook.cpython-310.pyc,,
99
+ accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc,,
100
+ accelerate/test_utils/scripts/__pycache__/test_merge_weights.cpython-310.pyc,,
101
+ accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc,,
102
+ accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc,,
103
+ accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc,,
104
+ accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc,,
105
+ accelerate/test_utils/scripts/external_deps/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
106
+ accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc,,
107
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc,,
108
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_ds_multiple_model.cpython-310.pyc,,
109
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc,,
110
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc,,
111
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc,,
112
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc,,
113
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc,,
114
+ accelerate/test_utils/scripts/external_deps/test_checkpointing.py,sha256=GukYgSZClft38oMlIh-K9bjkT1BAP-SnRp2Q673BmgQ,10699
115
+ accelerate/test_utils/scripts/external_deps/test_ds_multiple_model.py,sha256=Cg4-h0B4UcOQ5CxXjIdrsPVR5fFsWCv24DqZGjXEwW8,13790
116
+ accelerate/test_utils/scripts/external_deps/test_metrics.py,sha256=UIvyYY6uQq6GK_QZTkOiIYF31qFITzARjNct79rsc50,12164
117
+ accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py,sha256=ggwLWntGfA0in1Qo1OdlvYV7LA1l4Sjn3GX9ULWGddk,11536
118
+ accelerate/test_utils/scripts/external_deps/test_performance.py,sha256=-fQ_OLKSgMrCtBgT1qJufQHom8lXsTwZVDKEuoR3TZc,10120
119
+ accelerate/test_utils/scripts/external_deps/test_pippy.py,sha256=-nvZsNOe8UEjmAF-hTS1dQBfbPcNpcuvIH6sPNYJ4to,4670
120
+ accelerate/test_utils/scripts/external_deps/test_zero3_integration.py,sha256=lXWL9hUE1N7TNDQP5UTSALZVTHvdHs-Blimp18nuUac,1575
121
+ accelerate/test_utils/scripts/test_cli.py,sha256=qfk1aYFtdvYFCYPkl05602SNGvk08QTv0xZVVcFVtzM,833
122
+ accelerate/test_utils/scripts/test_ddp_comm_hook.py,sha256=3tq_XA0t64GzzqQ818kOy7q86VlR0ibVhd9FsVOQxTk,3153
123
+ accelerate/test_utils/scripts/test_distributed_data_loop.py,sha256=4jrjyq9URiKrxN119FNssfbPV6iHpLKwnHMTbuolnDU,15090
124
+ accelerate/test_utils/scripts/test_merge_weights.py,sha256=DsbcNX_yxKdP9--YexlVjMyT36_7CA_hwieBd5ZbDGs,6054
125
+ accelerate/test_utils/scripts/test_notebook.py,sha256=qfIy3IvH74-kGn8nadBn_k7qrviqvsxy5ijsnUhuY6o,3894
126
+ accelerate/test_utils/scripts/test_ops.py,sha256=1kQxHkLu16lT17Xj7C666BUG-G1u8rdI59c3taFK2tM,6204
127
+ accelerate/test_utils/scripts/test_script.py,sha256=6OY3y2JlkCcOyFgTaBeYmNFeXb4Rcu1TPOr98b1IMnk,34253
128
+ accelerate/test_utils/scripts/test_sync.py,sha256=GrYmYWxR06O7_aG_QAsEzuKvAQX_sXsg_-RhfppYy4g,18602
129
+ accelerate/test_utils/testing.py,sha256=vk4MZT_CGwxhPcmS50Glu_IQFyoW8V1IwQS0aaBT9JM,23456
130
+ accelerate/test_utils/training.py,sha256=8k_YAQ21MzUdb2aFWq1t2fihW1b-iBGh1OJSL3whY68,4019
131
+ accelerate/tracking.py,sha256=WLY-H1DTsxrz4BVzle7QZMp0Irg84yFMbA1e6JaY3pM,39789
132
+ accelerate/utils/__init__.py,sha256=w2XQxUqMc5nHSS2yHFXkdoKl09oWi-Bcg5MvNmxSJMs,7263
133
+ accelerate/utils/__pycache__/__init__.cpython-310.pyc,,
134
+ accelerate/utils/__pycache__/bnb.cpython-310.pyc,,
135
+ accelerate/utils/__pycache__/constants.cpython-310.pyc,,
136
+ accelerate/utils/__pycache__/dataclasses.cpython-310.pyc,,
137
+ accelerate/utils/__pycache__/deepspeed.cpython-310.pyc,,
138
+ accelerate/utils/__pycache__/environment.cpython-310.pyc,,
139
+ accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc,,
140
+ accelerate/utils/__pycache__/imports.cpython-310.pyc,,
141
+ accelerate/utils/__pycache__/launch.cpython-310.pyc,,
142
+ accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc,,
143
+ accelerate/utils/__pycache__/memory.cpython-310.pyc,,
144
+ accelerate/utils/__pycache__/modeling.cpython-310.pyc,,
145
+ accelerate/utils/__pycache__/offload.cpython-310.pyc,,
146
+ accelerate/utils/__pycache__/operations.cpython-310.pyc,,
147
+ accelerate/utils/__pycache__/other.cpython-310.pyc,,
148
+ accelerate/utils/__pycache__/random.cpython-310.pyc,,
149
+ accelerate/utils/__pycache__/rich.cpython-310.pyc,,
150
+ accelerate/utils/__pycache__/torch_xla.cpython-310.pyc,,
151
+ accelerate/utils/__pycache__/tqdm.cpython-310.pyc,,
152
+ accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc,,
153
+ accelerate/utils/__pycache__/versions.cpython-310.pyc,,
154
+ accelerate/utils/bnb.py,sha256=3i59dy8EcBYJEnT2alJ5_M-zeIpFsrceQ4bImiJJKOk,20570
155
+ accelerate/utils/constants.py,sha256=wTMK0MHmNTEquQEP-KR7daUPd6WQlNBHk3dSv2cj1KA,3032
156
+ accelerate/utils/dataclasses.py,sha256=nbETUDHoAowHWNynHEoRoeBVlwBTMoX5YXgDwkjzY4M,120386
157
+ accelerate/utils/deepspeed.py,sha256=NMRMHcc56dO9AbFPYKhrHo3HEMvVyCEEaIu1ldg8HRg,13300
158
+ accelerate/utils/environment.py,sha256=5FEX5DH0nEqSKp12NpJO_v6bCNVYUyLhlFS0RKV5AZM,14729
159
+ accelerate/utils/fsdp_utils.py,sha256=1kel83Xrp65Q-MAyombm1OfgSvl7VsVejBuX9uJiLzM,18177
160
+ accelerate/utils/imports.py,sha256=0TWsPqUbeQqRcfFqUVJD7sUwEPq5JKmOihCICGLIG8I,14019
161
+ accelerate/utils/launch.py,sha256=U-SrduXgI366tk9BZS6ywpmfqFEyQ1VlVf86QFRrPuc,29533
162
+ accelerate/utils/megatron_lm.py,sha256=L8dAqLeVf7XjjX4VH1jQKk1gBYaVpnEdo6M3MQ8CWkI,58087
163
+ accelerate/utils/memory.py,sha256=jYGcK70LAruVoD-faXr5GVF6vuIOFsCdfnSgWSD9bPo,5939
164
+ accelerate/utils/modeling.py,sha256=JHX_hSX8rWcEMRxT2PHY5B9Gue6pKliI1I-d1WsaRwk,92691
165
+ accelerate/utils/offload.py,sha256=qjaVai81wbkA0YH2WkmOXvZT0BRphygfRV_4Ua4j4U4,7837
166
+ accelerate/utils/operations.py,sha256=eyaf1s5f6kkDTX_wGaxFOXQJaeaD8LArtxXDwyLA2Wc,31380
167
+ accelerate/utils/other.py,sha256=5oGbDA_1z2Qq2cFpVexKqzKm4-dc1hWCBkpSQOomEDU,12365
168
+ accelerate/utils/random.py,sha256=ssRk26FiM0f2yMiBIwpDkdH5STCsD_WelZDoEGObDis,5373
169
+ accelerate/utils/rich.py,sha256=8JZX_uGMQX-BufdXxJpdne7BWd1KyLHSgbiGxrDMYr8,847
170
+ accelerate/utils/torch_xla.py,sha256=Pq1tuqN0X_pWDVza6YgjfO45uoJdoRVRForLeLQzFus,1908
171
+ accelerate/utils/tqdm.py,sha256=k8e9JnieTEQHCCNBaiBys7hPxWlEbyRASdIma-qy_X8,1657
172
+ accelerate/utils/transformer_engine.py,sha256=b7x4Y9DKcgNNVAJzPiryxWlhvRExZfIW2Y0qEErGzms,5883
173
+ accelerate/utils/versions.py,sha256=UgmcbjBm--6CIx1ZamSAMjAK_B_2l48LbeaNygqej8M,2149
wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/REQUESTED ADDED
File without changes
wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.44.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
wemm/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/entry_points.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [console_scripts]
2
+ accelerate = accelerate.commands.accelerate_cli:main
3
+ accelerate-config = accelerate.commands.config:main
4
+ accelerate-estimate-memory = accelerate.commands.estimate:main
5
+ accelerate-launch = accelerate.commands.launch:main
6
+ accelerate-merge-weights = accelerate.commands.merge:main
wemm/lib/python3.10/site-packages/boto3-1.26.118.dist-info/METADATA ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: boto3
3
+ Version: 1.26.118
4
+ Summary: The AWS SDK for Python
5
+ Home-page: https://github.com/boto/boto3
6
+ Author: Amazon Web Services
7
+ License: Apache License 2.0
8
+ Project-URL: Documentation, https://boto3.amazonaws.com/v1/documentation/api/latest/index.html
9
+ Project-URL: Source, https://github.com/boto/boto3
10
+ Platform: UNKNOWN
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Natural Language :: English
14
+ Classifier: License :: OSI Approved :: Apache Software License
15
+ Classifier: Programming Language :: Python
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.7
18
+ Classifier: Programming Language :: Python :: 3.8
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Requires-Python: >= 3.7
23
+ License-File: LICENSE
24
+ License-File: NOTICE
25
+ Requires-Dist: botocore (<1.30.0,>=1.29.118)
26
+ Requires-Dist: jmespath (<2.0.0,>=0.7.1)
27
+ Requires-Dist: s3transfer (<0.7.0,>=0.6.0)
28
+ Provides-Extra: crt
29
+ Requires-Dist: botocore[crt] (<2.0a0,>=1.21.0) ; extra == 'crt'
30
+
31
+ ===============================
32
+ Boto3 - The AWS SDK for Python
33
+ ===============================
34
+
35
+ |Version| |Python| |License|
36
+
37
+ Boto3 is the Amazon Web Services (AWS) Software Development Kit (SDK) for
38
+ Python, which allows Python developers to write software that makes use
39
+ of services like Amazon S3 and Amazon EC2. You can find the latest, most
40
+ up to date, documentation at our `doc site`_, including a list of
41
+ services that are supported.
42
+
43
+ Boto3 is maintained and published by `Amazon Web Services`_.
44
+
45
+ Boto (pronounced boh-toh) was named after the fresh water dolphin native to the Amazon river. The name was chosen by the author of the original Boto library, Mitch Garnaat, as a reference to the company.
46
+
47
+ Notices
48
+ -------
49
+
50
+ On 2021-01-15, deprecation for Python 2.7 was announced and support was dropped
51
+ on 2021-07-15. To avoid disruption, customers using Boto3 on Python 2.7 may
52
+ need to upgrade their version of Python or pin the version of Boto3. For
53
+ more information, see this `blog post <https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-python-2-7-in-aws-sdk-for-python-and-aws-cli-v1/>`__.
54
+
55
+ On 2022-05-30, support for Python 3.6 was ended. This follows the
56
+ Python Software Foundation `end of support <https://www.python.org/dev/peps/pep-0494/#lifespan>`__
57
+ for the runtime which occurred on 2021-12-23.
58
+ For more information, see this `blog post <https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/>`__.
59
+
60
+ .. _boto: https://docs.pythonboto.org/
61
+ .. _`doc site`: https://boto3.amazonaws.com/v1/documentation/api/latest/index.html
62
+ .. _`Amazon Web Services`: https://aws.amazon.com/what-is-aws/
63
+ .. |Python| image:: https://img.shields.io/pypi/pyversions/boto3.svg?style=flat
64
+ :target: https://pypi.python.org/pypi/boto3/
65
+ :alt: Python Versions
66
+ .. |Version| image:: http://img.shields.io/pypi/v/boto3.svg?style=flat
67
+ :target: https://pypi.python.org/pypi/boto3/
68
+ :alt: Package Version
69
+ .. |License| image:: http://img.shields.io/pypi/l/boto3.svg?style=flat
70
+ :target: https://github.com/boto/boto3/blob/develop/LICENSE
71
+ :alt: License
72
+
73
+ Getting Started
74
+ ---------------
75
+ Assuming that you have a supported version of Python installed, you can first
76
+ set up your environment with:
77
+
78
+ .. code-block:: sh
79
+
80
+ $ python -m venv .venv
81
+ ...
82
+ $ . .venv/bin/activate
83
+
84
+ Then, you can install boto3 from PyPI with:
85
+
86
+ .. code-block:: sh
87
+
88
+ $ python -m pip install boto3
89
+
90
+ or install from source with:
91
+
92
+ .. code-block:: sh
93
+
94
+ $ git clone https://github.com/boto/boto3.git
95
+ $ cd boto3
96
+ $ python -m pip install -r requirements.txt
97
+ $ python -m pip install -e .
98
+
99
+
100
+ Using Boto3
101
+ ~~~~~~~~~~~~~~
102
+ After installing boto3
103
+
104
+ Next, set up credentials (in e.g. ``~/.aws/credentials``):
105
+
106
+ .. code-block:: ini
107
+
108
+ [default]
109
+ aws_access_key_id = YOUR_KEY
110
+ aws_secret_access_key = YOUR_SECRET
111
+
112
+ Then, set up a default region (in e.g. ``~/.aws/config``):
113
+
114
+ .. code-block:: ini
115
+
116
+ [default]
117
+ region=us-east-1
118
+
119
+ Other credentials configuration method can be found `here <https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html>`__
120
+
121
+ Then, from a Python interpreter:
122
+
123
+ .. code-block:: python
124
+
125
+ >>> import boto3
126
+ >>> s3 = boto3.resource('s3')
127
+ >>> for bucket in s3.buckets.all():
128
+ print(bucket.name)
129
+
130
+ Running Tests
131
+ ~~~~~~~~~~~~~
132
+ You can run tests in all supported Python versions using ``tox``. By default,
133
+ it will run all of the unit and functional tests, but you can also specify your own
134
+ ``pytest`` options. Note that this requires that you have all supported
135
+ versions of Python installed, otherwise you must pass ``-e`` or run the
136
+ ``pytest`` command directly:
137
+
138
+ .. code-block:: sh
139
+
140
+ $ tox
141
+ $ tox -- unit/test_session.py
142
+ $ tox -e py26,py33 -- integration/
143
+
144
+ You can also run individual tests with your default Python version:
145
+
146
+ .. code-block:: sh
147
+
148
+ $ pytest tests/unit
149
+
150
+
151
+ Getting Help
152
+ ------------
153
+
154
+ We use GitHub issues for tracking bugs and feature requests and have limited
155
+ bandwidth to address them. Please use these community resources for getting
156
+ help:
157
+
158
+ * Ask a question on `Stack Overflow <https://stackoverflow.com/>`__ and tag it with `boto3 <https://stackoverflow.com/questions/tagged/boto3>`__
159
+ * Open a support ticket with `AWS Support <https://console.aws.amazon.com/support/home#/>`__
160
+ * If it turns out that you may have found a bug, please `open an issue <https://github.com/boto/boto3/issues/new>`__
161
+
162
+
163
+ Contributing
164
+ ------------
165
+
166
+ We value feedback and contributions from our community. Whether it's a bug report, new feature, correction, or additional documentation, we welcome your issues and pull requests. Please read through this `CONTRIBUTING <https://github.com/boto/boto3/blob/develop/CONTRIBUTING.rst>`__ document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your contribution.
167
+
168
+
169
+ Maintenance and Support for SDK Major Versions
170
+ ----------------------------------------------
171
+
172
+ Boto3 was made generally available on 06/22/2015 and is currently in the full support phase of the availability life cycle.
173
+
174
+ For information about maintenance and support for SDK major versions and their underlying dependencies, see the following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide:
175
+
176
+ * `AWS SDKs and Tools Maintenance Policy <https://docs.aws.amazon.com/sdkref/latest/guide/maint-policy.html>`__
177
+ * `AWS SDKs and Tools Version Support Matrix <https://docs.aws.amazon.com/sdkref/latest/guide/version-support-matrix.html>`__
178
+
179
+
180
+ More Resources
181
+ --------------
182
+
183
+ * `NOTICE <https://github.com/boto/boto3/blob/develop/NOTICE>`__
184
+ * `Changelog <https://github.com/boto/boto3/blob/develop/CHANGELOG.rst>`__
185
+ * `License <https://github.com/boto/boto3/blob/develop/LICENSE>`__
186
+
187
+
wemm/lib/python3.10/site-packages/boto3-1.26.118.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2007 Pallets
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions are
5
+ met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of the copyright holder nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
24
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/METADATA ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.3
2
+ Name: Jinja2
3
+ Version: 3.1.5
4
+ Summary: A very fast and expressive template engine.
5
+ Maintainer-email: Pallets <contact@palletsprojects.com>
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
8
+ Classifier: Development Status :: 5 - Production/Stable
9
+ Classifier: Environment :: Web Environment
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: BSD License
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
15
+ Classifier: Topic :: Text Processing :: Markup :: HTML
16
+ Classifier: Typing :: Typed
17
+ Requires-Dist: MarkupSafe>=2.0
18
+ Requires-Dist: Babel>=2.7 ; extra == "i18n"
19
+ Project-URL: Changes, https://jinja.palletsprojects.com/changes/
20
+ Project-URL: Chat, https://discord.gg/pallets
21
+ Project-URL: Documentation, https://jinja.palletsprojects.com/
22
+ Project-URL: Donate, https://palletsprojects.com/donate
23
+ Project-URL: Source, https://github.com/pallets/jinja/
24
+ Provides-Extra: i18n
25
+
26
+ # Jinja
27
+
28
+ Jinja is a fast, expressive, extensible templating engine. Special
29
+ placeholders in the template allow writing code similar to Python
30
+ syntax. Then the template is passed data to render the final document.
31
+
32
+ It includes:
33
+
34
+ - Template inheritance and inclusion.
35
+ - Define and import macros within templates.
36
+ - HTML templates can use autoescaping to prevent XSS from untrusted
37
+ user input.
38
+ - A sandboxed environment can safely render untrusted templates.
39
+ - AsyncIO support for generating templates and calling async
40
+ functions.
41
+ - I18N support with Babel.
42
+ - Templates are compiled to optimized Python code just-in-time and
43
+ cached, or can be compiled ahead-of-time.
44
+ - Exceptions point to the correct line in templates to make debugging
45
+ easier.
46
+ - Extensible filters, tests, functions, and even syntax.
47
+
48
+ Jinja's philosophy is that while application logic belongs in Python if
49
+ possible, it shouldn't make the template designer's job difficult by
50
+ restricting functionality too much.
51
+
52
+
53
+ ## In A Nutshell
54
+
55
+ ```jinja
56
+ {% extends "base.html" %}
57
+ {% block title %}Members{% endblock %}
58
+ {% block content %}
59
+ <ul>
60
+ {% for user in users %}
61
+ <li><a href="{{ user.url }}">{{ user.username }}</a></li>
62
+ {% endfor %}
63
+ </ul>
64
+ {% endblock %}
65
+ ```
66
+
67
+ ## Donate
68
+
69
+ The Pallets organization develops and supports Jinja and other popular
70
+ packages. In order to grow the community of contributors and users, and
71
+ allow the maintainers to devote more time to the projects, [please
72
+ donate today][].
73
+
74
+ [please donate today]: https://palletsprojects.com/donate
75
+
wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/RECORD ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ jinja2-3.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ jinja2-3.1.5.dist-info/LICENSE.txt,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
3
+ jinja2-3.1.5.dist-info/METADATA,sha256=PJNSUFNBwoqGA2vce2XSP8M_p2EYqAHYI7hoWLABtFo,2593
4
+ jinja2-3.1.5.dist-info/RECORD,,
5
+ jinja2-3.1.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ jinja2-3.1.5.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
7
+ jinja2-3.1.5.dist-info/entry_points.txt,sha256=OL85gYU1eD8cuPlikifFngXpeBjaxl6rIJ8KkC_3r-I,58
8
+ jinja2/__init__.py,sha256=zpt8UHzpS2eB1c04kn1LkKkaXLXXcKd33klq7UJGIgg,1928
9
+ jinja2/__pycache__/__init__.cpython-310.pyc,,
10
+ jinja2/__pycache__/_identifier.cpython-310.pyc,,
11
+ jinja2/__pycache__/async_utils.cpython-310.pyc,,
12
+ jinja2/__pycache__/bccache.cpython-310.pyc,,
13
+ jinja2/__pycache__/compiler.cpython-310.pyc,,
14
+ jinja2/__pycache__/constants.cpython-310.pyc,,
15
+ jinja2/__pycache__/debug.cpython-310.pyc,,
16
+ jinja2/__pycache__/defaults.cpython-310.pyc,,
17
+ jinja2/__pycache__/environment.cpython-310.pyc,,
18
+ jinja2/__pycache__/exceptions.cpython-310.pyc,,
19
+ jinja2/__pycache__/ext.cpython-310.pyc,,
20
+ jinja2/__pycache__/filters.cpython-310.pyc,,
21
+ jinja2/__pycache__/idtracking.cpython-310.pyc,,
22
+ jinja2/__pycache__/lexer.cpython-310.pyc,,
23
+ jinja2/__pycache__/loaders.cpython-310.pyc,,
24
+ jinja2/__pycache__/meta.cpython-310.pyc,,
25
+ jinja2/__pycache__/nativetypes.cpython-310.pyc,,
26
+ jinja2/__pycache__/nodes.cpython-310.pyc,,
27
+ jinja2/__pycache__/optimizer.cpython-310.pyc,,
28
+ jinja2/__pycache__/parser.cpython-310.pyc,,
29
+ jinja2/__pycache__/runtime.cpython-310.pyc,,
30
+ jinja2/__pycache__/sandbox.cpython-310.pyc,,
31
+ jinja2/__pycache__/tests.cpython-310.pyc,,
32
+ jinja2/__pycache__/utils.cpython-310.pyc,,
33
+ jinja2/__pycache__/visitor.cpython-310.pyc,,
34
+ jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958
35
+ jinja2/async_utils.py,sha256=vK-PdsuorOMnWSnEkT3iUJRIkTnYgO2T6MnGxDgHI5o,2834
36
+ jinja2/bccache.py,sha256=gh0qs9rulnXo0PhX5jTJy2UHzI8wFnQ63o_vw7nhzRg,14061
37
+ jinja2/compiler.py,sha256=9RpCQl5X88BHllJiPsHPh295Hh0uApvwFJNQuutULeM,74131
38
+ jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433
39
+ jinja2/debug.py,sha256=CnHqCDHd-BVGvti_8ZsTolnXNhA3ECsY-6n_2pwU8Hw,6297
40
+ jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267
41
+ jinja2/environment.py,sha256=9nhrP7Ch-NbGX00wvyr4yy-uhNHq2OCc60ggGrni_fk,61513
42
+ jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071
43
+ jinja2/ext.py,sha256=5PF5eHfh8mXAIxXHHRB2xXbXohi8pE3nHSOxa66uS7E,31875
44
+ jinja2/filters.py,sha256=cvRI2pqXNMzw8ba41VOBpgu_wu1r-l1_QxwD6yVoJ5g,55025
45
+ jinja2/idtracking.py,sha256=-ll5lIp73pML3ErUYiIJj7tdmWxcH_IlDv3yA_hiZYo,10555
46
+ jinja2/lexer.py,sha256=LYiYio6br-Tep9nPcupWXsPEtjluw3p1mU-lNBVRUfk,29786
47
+ jinja2/loaders.py,sha256=wIrnxjvcbqh5VwW28NSkfotiDq8qNCxIOSFbGUiSLB4,24055
48
+ jinja2/meta.py,sha256=OTDPkaFvU2Hgvx-6akz7154F8BIWaRmvJcBFvwopHww,4397
49
+ jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210
50
+ jinja2/nodes.py,sha256=m1Duzcr6qhZI8JQ6VyJgUNinjAf5bQzijSmDnMsvUx8,34579
51
+ jinja2/optimizer.py,sha256=rJnCRlQ7pZsEEmMhsQDgC_pKyDHxP5TPS6zVPGsgcu8,1651
52
+ jinja2/parser.py,sha256=lLOFy3sEmHc5IaEHRiH1sQVnId2moUQzhyeJZTtdY30,40383
53
+ jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
+ jinja2/runtime.py,sha256=gDk-GvdriJXqgsGbHgrcKTP0Yp6zPXzhzrIpCFH3jAU,34249
55
+ jinja2/sandbox.py,sha256=Mw2aitlY2I8la7FYhcX2YG9BtUYcLnD0Gh3d29cDWrY,15009
56
+ jinja2/tests.py,sha256=VLsBhVFnWg-PxSBz1MhRnNWgP1ovXk3neO1FLQMeC9Q,5926
57
+ jinja2/utils.py,sha256=rRp3o9e7ZKS4fyrWRbELyLcpuGVTFcnooaOa1qx_FIk,24129
58
+ jinja2/visitor.py,sha256=EcnL1PIwf_4RVCOMxsRNuR8AXHbS1qfAdMOE2ngKJz4,3557
wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: flit 3.10.1
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
wemm/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/entry_points.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [babel.extractors]
2
+ jinja2=jinja2.ext:babel_extract[i18n]
3
+
wemm/lib/python3.10/site-packages/lit-18.1.8.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.41.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
wemm/lib/python3.10/site-packages/pillow-11.1.0.dist-info/METADATA ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: pillow
3
+ Version: 11.1.0
4
+ Summary: Python Imaging Library (Fork)
5
+ Author-email: "Jeffrey A. Clark" <aclark@aclark.net>
6
+ License: MIT-CMU
7
+ Project-URL: Changelog, https://github.com/python-pillow/Pillow/releases
8
+ Project-URL: Documentation, https://pillow.readthedocs.io
9
+ Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=pypi
10
+ Project-URL: Homepage, https://python-pillow.github.io
11
+ Project-URL: Mastodon, https://fosstodon.org/@pillow
12
+ Project-URL: Release notes, https://pillow.readthedocs.io/en/stable/releasenotes/index.html
13
+ Project-URL: Source, https://github.com/python-pillow/Pillow
14
+ Keywords: Imaging
15
+ Classifier: Development Status :: 6 - Mature
16
+ Classifier: License :: OSI Approved :: CMU License (MIT-CMU)
17
+ Classifier: Programming Language :: Python :: 3 :: Only
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Programming Language :: Python :: 3.13
23
+ Classifier: Programming Language :: Python :: Implementation :: CPython
24
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
25
+ Classifier: Topic :: Multimedia :: Graphics
26
+ Classifier: Topic :: Multimedia :: Graphics :: Capture :: Digital Camera
27
+ Classifier: Topic :: Multimedia :: Graphics :: Capture :: Screen Capture
28
+ Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion
29
+ Classifier: Topic :: Multimedia :: Graphics :: Viewers
30
+ Classifier: Typing :: Typed
31
+ Requires-Python: >=3.9
32
+ Description-Content-Type: text/markdown
33
+ License-File: LICENSE
34
+ Provides-Extra: docs
35
+ Requires-Dist: furo; extra == "docs"
36
+ Requires-Dist: olefile; extra == "docs"
37
+ Requires-Dist: sphinx>=8.1; extra == "docs"
38
+ Requires-Dist: sphinx-copybutton; extra == "docs"
39
+ Requires-Dist: sphinx-inline-tabs; extra == "docs"
40
+ Requires-Dist: sphinxext-opengraph; extra == "docs"
41
+ Provides-Extra: fpx
42
+ Requires-Dist: olefile; extra == "fpx"
43
+ Provides-Extra: mic
44
+ Requires-Dist: olefile; extra == "mic"
45
+ Provides-Extra: tests
46
+ Requires-Dist: check-manifest; extra == "tests"
47
+ Requires-Dist: coverage>=7.4.2; extra == "tests"
48
+ Requires-Dist: defusedxml; extra == "tests"
49
+ Requires-Dist: markdown2; extra == "tests"
50
+ Requires-Dist: olefile; extra == "tests"
51
+ Requires-Dist: packaging; extra == "tests"
52
+ Requires-Dist: pyroma; extra == "tests"
53
+ Requires-Dist: pytest; extra == "tests"
54
+ Requires-Dist: pytest-cov; extra == "tests"
55
+ Requires-Dist: pytest-timeout; extra == "tests"
56
+ Requires-Dist: trove-classifiers>=2024.10.12; extra == "tests"
57
+ Provides-Extra: typing
58
+ Requires-Dist: typing-extensions; python_version < "3.10" and extra == "typing"
59
+ Provides-Extra: xmp
60
+ Requires-Dist: defusedxml; extra == "xmp"
61
+
62
+ <p align="center">
63
+ <img width="248" height="250" src="https://raw.githubusercontent.com/python-pillow/pillow-logo/main/pillow-logo-248x250.png" alt="Pillow logo">
64
+ </p>
65
+
66
+ # Pillow
67
+
68
+ ## Python Imaging Library (Fork)
69
+
70
+ Pillow is the friendly PIL fork by [Jeffrey A. Clark and
71
+ contributors](https://github.com/python-pillow/Pillow/graphs/contributors).
72
+ PIL is the Python Imaging Library by Fredrik Lundh and contributors.
73
+ As of 2019, Pillow development is
74
+ [supported by Tidelift](https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=readme&utm_campaign=enterprise).
75
+
76
+ <table>
77
+ <tr>
78
+ <th>docs</th>
79
+ <td>
80
+ <a href="https://pillow.readthedocs.io/?badge=latest"><img
81
+ alt="Documentation Status"
82
+ src="https://readthedocs.org/projects/pillow/badge/?version=latest"></a>
83
+ </td>
84
+ </tr>
85
+ <tr>
86
+ <th>tests</th>
87
+ <td>
88
+ <a href="https://github.com/python-pillow/Pillow/actions/workflows/lint.yml"><img
89
+ alt="GitHub Actions build status (Lint)"
90
+ src="https://github.com/python-pillow/Pillow/workflows/Lint/badge.svg"></a>
91
+ <a href="https://github.com/python-pillow/Pillow/actions/workflows/test.yml"><img
92
+ alt="GitHub Actions build status (Test Linux and macOS)"
93
+ src="https://github.com/python-pillow/Pillow/workflows/Test/badge.svg"></a>
94
+ <a href="https://github.com/python-pillow/Pillow/actions/workflows/test-windows.yml"><img
95
+ alt="GitHub Actions build status (Test Windows)"
96
+ src="https://github.com/python-pillow/Pillow/workflows/Test%20Windows/badge.svg"></a>
97
+ <a href="https://github.com/python-pillow/Pillow/actions/workflows/test-mingw.yml"><img
98
+ alt="GitHub Actions build status (Test MinGW)"
99
+ src="https://github.com/python-pillow/Pillow/workflows/Test%20MinGW/badge.svg"></a>
100
+ <a href="https://github.com/python-pillow/Pillow/actions/workflows/test-cygwin.yml"><img
101
+ alt="GitHub Actions build status (Test Cygwin)"
102
+ src="https://github.com/python-pillow/Pillow/workflows/Test%20Cygwin/badge.svg"></a>
103
+ <a href="https://github.com/python-pillow/Pillow/actions/workflows/test-docker.yml"><img
104
+ alt="GitHub Actions build status (Test Docker)"
105
+ src="https://github.com/python-pillow/Pillow/workflows/Test%20Docker/badge.svg"></a>
106
+ <a href="https://ci.appveyor.com/project/python-pillow/Pillow"><img
107
+ alt="AppVeyor CI build status (Windows)"
108
+ src="https://img.shields.io/appveyor/build/python-pillow/Pillow/main.svg?label=Windows%20build"></a>
109
+ <a href="https://github.com/python-pillow/Pillow/actions/workflows/wheels.yml"><img
110
+ alt="GitHub Actions build status (Wheels)"
111
+ src="https://github.com/python-pillow/Pillow/workflows/Wheels/badge.svg"></a>
112
+ <a href="https://app.codecov.io/gh/python-pillow/Pillow"><img
113
+ alt="Code coverage"
114
+ src="https://codecov.io/gh/python-pillow/Pillow/branch/main/graph/badge.svg"></a>
115
+ <a href="https://issues.oss-fuzz.com/issues?q=title:pillow"><img
116
+ alt="Fuzzing Status"
117
+ src="https://oss-fuzz-build-logs.storage.googleapis.com/badges/pillow.svg"></a>
118
+ </td>
119
+ </tr>
120
+ <tr>
121
+ <th>package</th>
122
+ <td>
123
+ <a href="https://zenodo.org/badge/latestdoi/17549/python-pillow/Pillow"><img
124
+ alt="Zenodo"
125
+ src="https://zenodo.org/badge/17549/python-pillow/Pillow.svg"></a>
126
+ <a href="https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=badge"><img
127
+ alt="Tidelift"
128
+ src="https://tidelift.com/badges/package/pypi/pillow?style=flat"></a>
129
+ <a href="https://pypi.org/project/pillow/"><img
130
+ alt="Newest PyPI version"
131
+ src="https://img.shields.io/pypi/v/pillow.svg"></a>
132
+ <a href="https://pypi.org/project/pillow/"><img
133
+ alt="Number of PyPI downloads"
134
+ src="https://img.shields.io/pypi/dm/pillow.svg"></a>
135
+ <a href="https://www.bestpractices.dev/projects/6331"><img
136
+ alt="OpenSSF Best Practices"
137
+ src="https://www.bestpractices.dev/projects/6331/badge"></a>
138
+ </td>
139
+ </tr>
140
+ <tr>
141
+ <th>social</th>
142
+ <td>
143
+ <a href="https://gitter.im/python-pillow/Pillow?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"><img
144
+ alt="Join the chat at https://gitter.im/python-pillow/Pillow"
145
+ src="https://badges.gitter.im/python-pillow/Pillow.svg"></a>
146
+ <a href="https://fosstodon.org/@pillow"><img
147
+ alt="Follow on https://fosstodon.org/@pillow"
148
+ src="https://img.shields.io/badge/publish-on%20Mastodon-595aff.svg"
149
+ rel="me"></a>
150
+ </td>
151
+ </tr>
152
+ </table>
153
+
154
+ ## Overview
155
+
156
+ The Python Imaging Library adds image processing capabilities to your Python interpreter.
157
+
158
+ This library provides extensive file format support, an efficient internal representation, and fairly powerful image processing capabilities.
159
+
160
+ The core image library is designed for fast access to data stored in a few basic pixel formats. It should provide a solid foundation for a general image processing tool.
161
+
162
+ ## More Information
163
+
164
+ - [Documentation](https://pillow.readthedocs.io/)
165
+ - [Installation](https://pillow.readthedocs.io/en/latest/installation/basic-installation.html)
166
+ - [Handbook](https://pillow.readthedocs.io/en/latest/handbook/index.html)
167
+ - [Contribute](https://github.com/python-pillow/Pillow/blob/main/.github/CONTRIBUTING.md)
168
+ - [Issues](https://github.com/python-pillow/Pillow/issues)
169
+ - [Pull requests](https://github.com/python-pillow/Pillow/pulls)
170
+ - [Release notes](https://pillow.readthedocs.io/en/stable/releasenotes/index.html)
171
+ - [Changelog](https://github.com/python-pillow/Pillow/releases)
172
+ - [Pre-fork](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst#pre-fork)
173
+
174
+ ## Report a Vulnerability
175
+
176
+ To report a security vulnerability, please follow the procedure described in the [Tidelift security policy](https://tidelift.com/docs/security).
wemm/lib/python3.10/site-packages/qcloud_cos/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .cos_client import CosS3Client
2
+ from .cos_client import CosConfig
3
+ from .cos_exception import CosServiceError
4
+ from .cos_exception import CosClientError
5
+ from .cos_auth import CosS3Auth
6
+ from .cos_comm import get_date
7
+ from .meta_insight import MetaInsightClient
8
+ from .ai_recognition import AIRecognitionClient
9
+
10
+ import logging
11
+
12
+ try:
13
+ from logging import NullHandler
14
+ except ImportError:
15
+ class NullHandler(logging.Handler):
16
+ def emit(self, record):
17
+ pass
18
+
19
+ logging.getLogger(__name__).addHandler(NullHandler())
wemm/lib/python3.10/site-packages/qcloud_cos/__pycache__/ai_recognition.cpython-310.pyc ADDED
Binary file (41.8 kB). View file
 
wemm/lib/python3.10/site-packages/qcloud_cos/__pycache__/streambody.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
wemm/lib/python3.10/site-packages/qcloud_cos/cos_exception.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding=utf-8
2
+
3
+ import xml.dom.minidom
4
+
5
+
6
+ class CosException(Exception):
7
+ def __init__(self, message):
8
+ self._message = message
9
+
10
+ def __str__(self):
11
+ return str(self._message)
12
+
13
+
14
+ def digest_xml(data):
15
+ msg = dict()
16
+ try:
17
+ tree = xml.dom.minidom.parseString(data)
18
+ root = tree.documentElement
19
+
20
+ result = root.getElementsByTagName('Code')
21
+ msg['code'] = result[0].childNodes[0].nodeValue
22
+
23
+ result = root.getElementsByTagName('Message')
24
+ msg['message'] = result[0].childNodes[0].nodeValue
25
+
26
+ result = root.getElementsByTagName('Resource')
27
+ msg['resource'] = result[0].childNodes[0].nodeValue
28
+
29
+ result = root.getElementsByTagName('RequestId')
30
+ msg['requestid'] = result[0].childNodes[0].nodeValue
31
+
32
+ result = root.getElementsByTagName('TraceId')
33
+ if result:
34
+ msg['traceid'] = result[0].childNodes[0].nodeValue
35
+ else:
36
+ msg['traceid'] = 'Unknown'
37
+ return msg
38
+ except Exception as e:
39
+ return "Response Error Msg Is INVALID"
40
+
41
+
42
+ class CosClientError(CosException):
43
+ """Client端错误,如timeout"""
44
+
45
+ def __init__(self, message):
46
+ CosException.__init__(self, message)
47
+
48
+
49
+ class CosServiceError(CosException):
50
+ """COS Server端错误,可以获取特定的错误信息"""
51
+
52
+ def __init__(self, method, message, status_code):
53
+ CosException.__init__(self, message)
54
+ if isinstance(message, dict):
55
+ self._origin_msg = ''
56
+ self._digest_msg = message
57
+ else:
58
+ self._origin_msg = message
59
+ self._digest_msg = digest_xml(message)
60
+ self._status_code = status_code
61
+
62
+ def __str__(self):
63
+ return str(self._digest_msg)
64
+
65
+ def get_origin_msg(self):
66
+ """获取原始的XML格式错误信息"""
67
+ return self._origin_msg
68
+
69
+ def get_digest_msg(self):
70
+ """获取经过处理的dict格式的错误信息"""
71
+ return self._digest_msg
72
+
73
+ def get_status_code(self):
74
+ """获取http error code"""
75
+ return self._status_code
76
+
77
+ def get_error_code(self):
78
+ """获取COS定义的错误码描述,服务器返回错误信息格式出错时,返回空 """
79
+ if isinstance(self._digest_msg, dict):
80
+ return self._digest_msg['code']
81
+ return "Unknown"
82
+
83
+ def get_error_msg(self):
84
+ if isinstance(self._digest_msg, dict):
85
+ return self._digest_msg['message']
86
+ return "Unknown"
87
+
88
+ def get_resource_location(self):
89
+ if isinstance(self._digest_msg, dict):
90
+ return self._digest_msg['resource']
91
+ return "Unknown"
92
+
93
+ def get_trace_id(self):
94
+ if isinstance(self._digest_msg, dict):
95
+ return self._digest_msg['traceid']
96
+ return "Unknown"
97
+
98
+ def get_request_id(self):
99
+ if isinstance(self._digest_msg, dict):
100
+ return self._digest_msg['requestid']
101
+ return "Unknown"
wemm/lib/python3.10/site-packages/qcloud_cos/cos_threadpool.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from threading import Thread
4
+ from logging import getLogger
5
+ from six.moves.queue import Queue
6
+ from threading import Lock
7
+ import gc
8
+
9
+ logger = getLogger(__name__)
10
+
11
+
12
+ class WorkerThread(Thread):
13
+ def __init__(self, task_queue, *args, **kwargs):
14
+ super(WorkerThread, self).__init__(*args, **kwargs)
15
+
16
+ self._task_queue = task_queue
17
+ self._succ_task_num = 0
18
+ self._fail_task_num = 0
19
+ self._ret = list()
20
+
21
+ def run(self):
22
+ while True:
23
+ func, args, kwargs = self._task_queue.get()
24
+ # 判断线程是否需要退出
25
+ if func is None:
26
+ return
27
+ try:
28
+ ret = func(*args, **kwargs)
29
+ self._succ_task_num += 1
30
+ self._ret.append(ret)
31
+
32
+ except Exception as e:
33
+ logger.error(str(e))
34
+ self._fail_task_num += 1
35
+ if hasattr(e, '_message') and e._message:
36
+ self._ret.append(e._message)
37
+ elif hasattr(e, 'message') and e.message:
38
+ self._ret.append(e.message)
39
+ else:
40
+ self._ret.append('meet some exception')
41
+ finally:
42
+ self._task_queue.task_done()
43
+
44
+ def get_result(self):
45
+ return self._succ_task_num, self._fail_task_num, self._ret
46
+
47
+
48
+ class SimpleThreadPool:
49
+
50
+ def __init__(self, num_threads=5, num_queue=0):
51
+ self._num_threads = num_threads
52
+ self._queue = Queue(num_queue)
53
+ self._lock = Lock()
54
+ self._active = False
55
+ self._workers = list()
56
+ self._finished = False
57
+
58
+ def add_task(self, func, *args, **kwargs):
59
+ if not self._active:
60
+ with self._lock:
61
+ if not self._active:
62
+ self._workers = []
63
+ self._active = True
64
+
65
+ for i in range(self._num_threads):
66
+ w = WorkerThread(self._queue)
67
+ self._workers.append(w)
68
+ w.start()
69
+
70
+ self._queue.put((func, args, kwargs))
71
+
72
+ def wait_completion(self):
73
+ self._queue.join()
74
+ self._finished = True
75
+ # 已经结束的任务, 需要将线程都退出, 防止卡死
76
+ for i in range(self._num_threads):
77
+ self._queue.put((None, None, None))
78
+
79
+ self._active = False
80
+
81
+ def get_result(self):
82
+ assert self._finished
83
+ detail = [worker.get_result() for worker in self._workers]
84
+ succ_all = all([tp[1] == 0 for tp in detail])
85
+ return {'success_all': succ_all, 'detail': detail}
86
+
87
+
88
+ if __name__ == '__main__':
89
+ pass
90
+
91
+ # pool = SimpleThreadPool(2)
92
+
93
+ # def task_sleep(x):
94
+ # from time import sleep
95
+ # sleep(x)
96
+ # return 'hello, sleep %d seconds' % x
97
+
98
+ # def raise_exception():
99
+ # raise ValueError("Pa! Exception!")
100
+
101
+ # for i in range(1000):
102
+ # pool.add_task(task_sleep, 0.001)
103
+ # print(i)
104
+ # pool.add_task(task_sleep, 0)
105
+ # pool.add_task(task_sleep, 0)
106
+ # pool.add_task(raise_exception)
107
+ # pool.add_task(raise_exception)
108
+
109
+ # pool.wait_completion()
110
+ # print(pool.get_result())
111
+ # [(1, 0, ['hello, sleep 5 seconds']), (2, 1, ['hello, sleep 2 seconds', 'hello, sleep 3 seconds', ValueError('Pa! Exception!',)])]
wemm/lib/python3.10/site-packages/qcloud_cos/meta_insight.py ADDED
@@ -0,0 +1,908 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding=utf-8
2
+ import json
3
+
4
+ from qcloud_cos import CosS3Auth
5
+ from qcloud_cos.cos_client import logger, CosS3Client
6
+ from .cos_comm import *
7
+
8
+
9
+ class MetaInsightClient(CosS3Client):
10
+
11
+ def ci_create_dataset(self, Body, **kwargs):
12
+ """ 创建数据集 https://cloud.tencent.com/document/product/460/106020
13
+
14
+ :param Body:(dict) 创建数据集配置信息.
15
+ :param kwargs:(dict) 设置上传的headers.
16
+ :return(dict): response header.
17
+ :return(dict): 请求成功返回的结果,dict类型.
18
+
19
+ .. code-block:: python
20
+
21
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
22
+ client = CosS3Client(config)
23
+ # 创建数据集
24
+ response, data = client.ci_create_dataset(
25
+ Body={}
26
+ )
27
+ print data
28
+ print response
29
+ """
30
+ headers = mapped(kwargs)
31
+ final_headers = {}
32
+ params = {}
33
+ for key in headers:
34
+ if key.startswith("response"):
35
+ params[key] = headers[key]
36
+ else:
37
+ final_headers[key] = headers[key]
38
+ headers = final_headers
39
+
40
+ params = format_values(params)
41
+ body = json.dumps(Body)
42
+ path = "/" + "dataset"
43
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
44
+
45
+ logger.info("ci_create_dataset result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
46
+ url=url,
47
+ headers=headers,
48
+ params=params,
49
+ body=body))
50
+ rt = self.send_request(
51
+ method='POST',
52
+ url=url,
53
+ appid=self._conf._appid,
54
+ data=body,
55
+ auth=CosS3Auth(self._conf, path, params=params),
56
+ params=params,
57
+ headers=headers,
58
+ ci_request=True)
59
+
60
+ data = xml_to_dict(rt.content)
61
+ format_dict(data, ['Response'])
62
+
63
+ response = dict(**rt.headers)
64
+ return response, data
65
+
66
+ def ci_create_dataset_binding(self, Body, **kwargs):
67
+ """ 绑定存储桶与数据集 https://cloud.tencent.com/document/product/460/106159
68
+
69
+ :param Body:(dict) 绑定存储桶与数据集配置信息.
70
+ :param kwargs:(dict) 设置上传的headers.
71
+ :return(dict): response header.
72
+ :return(dict): 请求成功返回的结果,dict类型.
73
+
74
+ .. code-block:: python
75
+
76
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
77
+ client = CosS3Client(config)
78
+ # 绑定存储桶与数据集
79
+ response, data = client.ci_create_dataset_binding(
80
+ Body={}
81
+ )
82
+ print data
83
+ print response
84
+ """
85
+ headers = mapped(kwargs)
86
+ final_headers = {}
87
+ params = {}
88
+ for key in headers:
89
+ if key.startswith("response"):
90
+ params[key] = headers[key]
91
+ else:
92
+ final_headers[key] = headers[key]
93
+ headers = final_headers
94
+
95
+ params = format_values(params)
96
+ body = json.dumps(Body)
97
+ path = "/" + "datasetbinding"
98
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
99
+
100
+ logger.info("ci_create_dataset_binding result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
101
+ url=url,
102
+ headers=headers,
103
+ params=params,
104
+ body=body))
105
+ rt = self.send_request(
106
+ method='POST',
107
+ url=url,
108
+ appid=self._conf._appid,
109
+ data=body,
110
+ auth=CosS3Auth(self._conf, path, params=params),
111
+ params=params,
112
+ headers=headers,
113
+ ci_request=True)
114
+
115
+ data = xml_to_dict(rt.content)
116
+ format_dict(data, ['Response'])
117
+
118
+ response = dict(**rt.headers)
119
+ return response, data
120
+
121
+ def ci_create_file_meta_index(self, Body, **kwargs):
122
+ """ 创建元数据索引 https://cloud.tencent.com/document/product/460/106022
123
+
124
+ :param Body:(dict) 创建元数据索引配置信息.
125
+ :param kwargs:(dict) 设置上传的headers.
126
+ :return(dict): response header.
127
+ :return(dict): 请求成功返回的结果,dict类型.
128
+
129
+ .. code-block:: python
130
+
131
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
132
+ client = CosS3Client(config)
133
+ # 创建元数据索引
134
+ response, data = client.ci_create_file_meta_index(
135
+ Body={}
136
+ )
137
+ print data
138
+ print response
139
+ """
140
+ headers = mapped(kwargs)
141
+ final_headers = {}
142
+ params = {}
143
+ for key in headers:
144
+ if key.startswith("response"):
145
+ params[key] = headers[key]
146
+ else:
147
+ final_headers[key] = headers[key]
148
+ headers = final_headers
149
+
150
+ params = format_values(params)
151
+ body = json.dumps(Body)
152
+ path = "/" + "filemeta"
153
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
154
+
155
+ logger.info("ci_create_file_meta_index result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
156
+ url=url,
157
+ headers=headers,
158
+ params=params,
159
+ body=body))
160
+ rt = self.send_request(
161
+ method='POST',
162
+ url=url,
163
+ appid=self._conf._appid,
164
+ data=body,
165
+ auth=CosS3Auth(self._conf, path, params=params),
166
+ params=params,
167
+ headers=headers,
168
+ ci_request=True)
169
+
170
+ data = xml_to_dict(rt.content)
171
+ format_dict(data, ['Response'])
172
+
173
+ response = dict(**rt.headers)
174
+ return response, data
175
+
176
+ def ci_dataset_face_search(self, Body, **kwargs):
177
+ """ 人脸搜索 https://cloud.tencent.com/document/product/460/106166
178
+
179
+ :param Body:(dict) 人脸搜索配置信息.
180
+ :param kwargs:(dict) 设置上传的headers.
181
+ :return(dict): response header.
182
+ :return(dict): 请求成功返回的结果,dict类型.
183
+
184
+ .. code-block:: python
185
+
186
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
187
+ client = CosS3Client(config)
188
+ # 人脸搜索
189
+ response, data = client.ci_dataset_face_search(
190
+ Body={}
191
+ )
192
+ print data
193
+ print response
194
+ """
195
+ headers = mapped(kwargs)
196
+ final_headers = {}
197
+ params = {}
198
+ for key in headers:
199
+ if key.startswith("response"):
200
+ params[key] = headers[key]
201
+ else:
202
+ final_headers[key] = headers[key]
203
+ headers = final_headers
204
+
205
+ params = format_values(params)
206
+ body = json.dumps(Body)
207
+ path = "/" + "datasetquery" + "/" + "facesearch"
208
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
209
+
210
+ logger.info("ci_dataset_face_search result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
211
+ url=url,
212
+ headers=headers,
213
+ params=params,
214
+ body=body))
215
+ rt = self.send_request(
216
+ method='POST',
217
+ url=url,
218
+ appid=self._conf._appid,
219
+ data=body,
220
+ auth=CosS3Auth(self._conf, path, params=params),
221
+ params=params,
222
+ headers=headers,
223
+ ci_request=True)
224
+
225
+ data = xml_to_dict(rt.content)
226
+ format_dict(data, ['Response'])
227
+
228
+ response = dict(**rt.headers)
229
+ return response, data
230
+
231
+ def ci_dataset_simple_query(self, Body, **kwargs):
232
+ """ 简单查询 https://cloud.tencent.com/document/product/460/106375
233
+
234
+ :param Body:(dict) 简单查询配置信息.
235
+ :param kwargs:(dict) 设置上传的headers.
236
+ :return(dict): response header.
237
+ :return(dict): 请求成功返回的结果,dict类型.
238
+
239
+ .. code-block:: python
240
+
241
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
242
+ client = CosS3Client(config)
243
+ # 简单查询
244
+ response, data = client.ci_dataset_simple_query(
245
+ Body={}
246
+ )
247
+ print data
248
+ print response
249
+ """
250
+ headers = mapped(kwargs)
251
+ final_headers = {}
252
+ params = {}
253
+ for key in headers:
254
+ if key.startswith("response"):
255
+ params[key] = headers[key]
256
+ else:
257
+ final_headers[key] = headers[key]
258
+ headers = final_headers
259
+
260
+ params = format_values(params)
261
+ body = json.dumps(Body)
262
+ path = "/" + "datasetquery" + "/" + "simple"
263
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
264
+
265
+ logger.info("ci_dataset_simple_query result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
266
+ url=url,
267
+ headers=headers,
268
+ params=params,
269
+ body=body))
270
+ rt = self.send_request(
271
+ method='POST',
272
+ url=url,
273
+ appid=self._conf._appid,
274
+ data=body,
275
+ auth=CosS3Auth(self._conf, path, params=params),
276
+ params=params,
277
+ headers=headers,
278
+ ci_request=True)
279
+
280
+ data = xml_to_dict(rt.content)
281
+ format_dict(data, ['Response'])
282
+
283
+ response = dict(**rt.headers)
284
+ return response, data
285
+
286
+ def ci_delete_dataset(self, Body, **kwargs):
287
+ """ 删除数据集 https://cloud.tencent.com/document/product/460/106157
288
+
289
+ :param Body:(dict) 删除数据集配置信息.
290
+ :param kwargs:(dict) 设置上传���headers.
291
+ :return(dict): response header.
292
+ :return(dict): 请求成功返回的结果,dict类型.
293
+
294
+ .. code-block:: python
295
+
296
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
297
+ client = CosS3Client(config)
298
+ # 删除数据集
299
+ response, data = client.ci_delete_dataset(
300
+ Body={}
301
+ )
302
+ print data
303
+ print response
304
+ """
305
+ headers = mapped(kwargs)
306
+ final_headers = {}
307
+ params = {}
308
+ for key in headers:
309
+ if key.startswith("response"):
310
+ params[key] = headers[key]
311
+ else:
312
+ final_headers[key] = headers[key]
313
+ headers = final_headers
314
+
315
+ params = format_values(params)
316
+ body = json.dumps(Body)
317
+ path = "/" + "dataset"
318
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
319
+
320
+ logger.info("ci_delete_dataset result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
321
+ url=url,
322
+ headers=headers,
323
+ params=params,
324
+ body=body))
325
+ rt = self.send_request(
326
+ method='DELETE',
327
+ url=url,
328
+ appid=self._conf._appid,
329
+ data=body,
330
+ auth=CosS3Auth(self._conf, path, params=params),
331
+ params=params,
332
+ headers=headers,
333
+ ci_request=True)
334
+
335
+ data = xml_to_dict(rt.content)
336
+ format_dict(data, ['Response'])
337
+
338
+ response = dict(**rt.headers)
339
+ return response, data
340
+
341
+ def ci_delete_dataset_binding(self, Body, **kwargs):
342
+ """ 解绑存储桶与数据集 https://cloud.tencent.com/document/product/460/106160
343
+
344
+ :param Body:(dict) 解绑存储桶与数据集配置信息.
345
+ :param kwargs:(dict) 设置上传的headers.
346
+ :return(dict): response header.
347
+ :return(dict): 请求成功返回的结果,dict类型.
348
+
349
+ .. code-block:: python
350
+
351
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
352
+ client = CosS3Client(config)
353
+ # 解绑存储桶与数据集
354
+ response, data = client.ci_delete_dataset_binding(
355
+ Body={}
356
+ )
357
+ print data
358
+ print response
359
+ """
360
+ headers = mapped(kwargs)
361
+ final_headers = {}
362
+ params = {}
363
+ for key in headers:
364
+ if key.startswith("response"):
365
+ params[key] = headers[key]
366
+ else:
367
+ final_headers[key] = headers[key]
368
+ headers = final_headers
369
+
370
+ params = format_values(params)
371
+ body = json.dumps(Body)
372
+ path = "/" + "datasetbinding"
373
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
374
+
375
+ logger.info("ci_delete_dataset_binding result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
376
+ url=url,
377
+ headers=headers,
378
+ params=params,
379
+ body=body))
380
+ rt = self.send_request(
381
+ method='DELETE',
382
+ url=url,
383
+ appid=self._conf._appid,
384
+ data=body,
385
+ auth=CosS3Auth(self._conf, path, params=params),
386
+ params=params,
387
+ headers=headers,
388
+ ci_request=True)
389
+
390
+ data = xml_to_dict(rt.content)
391
+ format_dict(data, ['Response'])
392
+
393
+ response = dict(**rt.headers)
394
+ return response, data
395
+
396
+ def ci_delete_file_meta_index(self, Body, **kwargs):
397
+ """ 删除元数据索引 https://cloud.tencent.com/document/product/460/106163
398
+
399
+ :param Body:(dict) 删除元数据索引配置信息.
400
+ :param kwargs:(dict) 设置上传的headers.
401
+ :return(dict): response header.
402
+ :return(dict): 请求成功返回的结果,dict类型.
403
+
404
+ .. code-block:: python
405
+
406
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
407
+ client = CosS3Client(config)
408
+ # 删除元数据索引
409
+ response, data = client.ci_delete_file_meta_index(
410
+ Body={}
411
+ )
412
+ print data
413
+ print response
414
+ """
415
+ headers = mapped(kwargs)
416
+ final_headers = {}
417
+ params = {}
418
+ for key in headers:
419
+ if key.startswith("response"):
420
+ params[key] = headers[key]
421
+ else:
422
+ final_headers[key] = headers[key]
423
+ headers = final_headers
424
+
425
+ params = format_values(params)
426
+ body = json.dumps(Body)
427
+ path = "/" + "filemeta"
428
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
429
+
430
+ logger.info("ci_delete_file_meta_index result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
431
+ url=url,
432
+ headers=headers,
433
+ params=params,
434
+ body=body))
435
+ rt = self.send_request(
436
+ method='DELETE',
437
+ url=url,
438
+ appid=self._conf._appid,
439
+ data=body,
440
+ auth=CosS3Auth(self._conf, path, params=params),
441
+ params=params,
442
+ headers=headers,
443
+ ci_request=True)
444
+
445
+ data = xml_to_dict(rt.content)
446
+ format_dict(data, ['Response'])
447
+
448
+ response = dict(**rt.headers)
449
+ return response, data
450
+
451
+ def ci_describe_dataset(self, DatasetName, Statistics=False, **kwargs):
452
+ """ 查询数据集 https://cloud.tencent.com/document/product/460/106155
453
+
454
+ :param DatasetName:(string) 数据集名称,同一个账户下唯一。.
455
+ :param Statistics:(bool) 是否需要实时统计数据集中文件相关信息。有效值: false:不统计,返回的文件的总大小、数量信息可能不正确也可能都为0。 true:需要统计,返回数据集中当前的文件的总大小、数量信息。 默认值为false。.
456
+ :param kwargs:(dict) 设置上传的headers.
457
+ :return(dict): response header.
458
+ :return(dict): 请求成功返回的结果,dict类型.
459
+
460
+ .. code-block:: python
461
+
462
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
463
+ client = CosS3Client(config)
464
+ # 查询数据集
465
+ response, data = client.ci_describe_dataset(
466
+ Datasetname='',
467
+ Statistics=''
468
+ )
469
+ print data
470
+ print response
471
+ """
472
+ headers = mapped(kwargs)
473
+ final_headers = {}
474
+ params = {}
475
+ for key in headers:
476
+ if key.startswith("response"):
477
+ params[key] = headers[key]
478
+ else:
479
+ final_headers[key] = headers[key]
480
+ headers = final_headers
481
+ params["datasetname"] = DatasetName
482
+ params["statistics"] = Statistics
483
+
484
+ params = format_values(params)
485
+
486
+ path = "/" + "dataset"
487
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
488
+
489
+ logger.info("ci_describe_dataset result, url=:{url} ,headers=:{headers}, params=:{params}".format(
490
+ url=url,
491
+ headers=headers,
492
+ params=params))
493
+ rt = self.send_request(
494
+ method='GET',
495
+ url=url,
496
+ appid=self._conf._appid,
497
+ auth=CosS3Auth(self._conf, path, params=params),
498
+ params=params,
499
+ headers=headers,
500
+ ci_request=True)
501
+
502
+ data = xml_to_dict(rt.content)
503
+ format_dict(data, ['Response'])
504
+
505
+ response = dict(**rt.headers)
506
+ return response, data
507
+
508
+ def ci_describe_dataset_binding(self, DatasetName, Uri, **kwargs):
509
+ """ 查询数据集与存储桶的绑定关系 https://cloud.tencent.com/document/product/460/106485
510
+
511
+ :param DatasetName:(string) 数据集名称,同一个账户下唯一。.
512
+ :param Uri:(string) 资源标识字段,表示需要与数据集绑定的资源,当前仅支持COS存储桶,字段规则:cos://,其中BucketName表示COS存储桶名称,例如(需要进行urlencode):cos%3A%2F%2Fexample-125000.
513
+ :param kwargs:(dict) 设置上传的headers.
514
+ :return(dict): response header.
515
+ :return(dict): 请求成功返回的结果,dict类型.
516
+
517
+ .. code-block:: python
518
+
519
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
520
+ client = CosS3Client(config)
521
+ # 查询数据集与存储桶的绑定关系
522
+ response, data = client.ci_describe_dataset_binding(
523
+ DatasetName='',
524
+ Uri=''
525
+ )
526
+ print data
527
+ print response
528
+ """
529
+ headers = mapped(kwargs)
530
+ final_headers = {}
531
+ params = {}
532
+ for key in headers:
533
+ if key.startswith("response"):
534
+ params[key] = headers[key]
535
+ else:
536
+ final_headers[key] = headers[key]
537
+ headers = final_headers
538
+ params["datasetname"] = DatasetName
539
+ params["uri"] = Uri
540
+
541
+ params = format_values(params)
542
+
543
+ path = "/" + "datasetbinding"
544
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
545
+
546
+ logger.info("ci_describe_dataset_binding result, url=:{url} ,headers=:{headers}, params=:{params}".format(
547
+ url=url,
548
+ headers=headers,
549
+ params=params))
550
+ rt = self.send_request(
551
+ method='GET',
552
+ url=url,
553
+ appid=self._conf._appid,
554
+ auth=CosS3Auth(self._conf, path, params=params),
555
+ params=params,
556
+ headers=headers,
557
+ ci_request=True)
558
+
559
+ data = xml_to_dict(rt.content)
560
+ format_dict(data, ['Response'])
561
+
562
+ response = dict(**rt.headers)
563
+ return response, data
564
+
565
+ def ci_describe_dataset_bindings(self, DatasetName, NextToken=None, MaxResults=100, **kwargs):
566
+ """ 查询绑定关系列表 https://cloud.tencent.com/document/product/460/106161
567
+
568
+ :param DatasetName:(string) 数据集名称,同一个账户下唯一。.
569
+ :param MaxResults:(int) 返回绑定关系的最大个数,取值范围为0~200。不设置此参数或者设置为0时,则默认值为100。.
570
+ :param NextToken:(string) 当绑定关系总数大于设置的MaxResults时,用于翻页的token。从NextToken开始按字典序返回绑定关系信息列表。第一次调用此接口时,设置为空。.
571
+ :param kwargs:(dict) 设置上传的headers.
572
+ :return(dict): response header.
573
+ :return(dict): 请求成功返回的结果,dict类型.
574
+
575
+ .. code-block:: python
576
+
577
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
578
+ client = CosS3Client(config)
579
+ # 查询绑定关系列表
580
+ response, data = client.ci_describe_dataset_bindings(
581
+ DatasetName='',
582
+ MaxResults='',
583
+ NextToken=''
584
+ )
585
+ print data
586
+ print response
587
+ """
588
+ headers = mapped(kwargs)
589
+ final_headers = {}
590
+ params = {}
591
+ for key in headers:
592
+ if key.startswith("response"):
593
+ params[key] = headers[key]
594
+ else:
595
+ final_headers[key] = headers[key]
596
+ headers = final_headers
597
+ params["datasetname"] = DatasetName
598
+ if NextToken is not None:
599
+ params["nexttoken"] = NextToken
600
+ params["maxresults"] = MaxResults
601
+
602
+ params = format_values(params)
603
+
604
+ path = "/" + "datasetbindings"
605
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
606
+
607
+ logger.info("ci_describe_dataset_bindings result, url=:{url} ,headers=:{headers}, params=:{params}".format(
608
+ url=url,
609
+ headers=headers,
610
+ params=params))
611
+ rt = self.send_request(
612
+ method='GET',
613
+ url=url,
614
+ appid=self._conf._appid,
615
+ auth=CosS3Auth(self._conf, path, params=params),
616
+ params=params,
617
+ headers=headers,
618
+ ci_request=True)
619
+
620
+ data = xml_to_dict(rt.content)
621
+ format_dict(data, ['Response'])
622
+
623
+ response = dict(**rt.headers)
624
+ return response, data
625
+
626
+ def ci_describe_datasets(self, NextToken=None, Prefix=None, MaxResults=100, **kwargs):
627
+ """ 列出数据集 https://cloud.tencent.com/document/product/460/106158
628
+
629
+ :param MaxResults:(int) 本次返回数据集的最大个数,取值范围为0~200。不设置此参数或者设置为0时,则默认值为100。.
630
+ :param NextToken:(string) 翻页标记。当文件总数大于设置的MaxResults时,用于翻页的Token。从NextToken开始按字典序返回文件信息列表。填写上次查询返回的值,首次使用时填写为空。.
631
+ :param Prefix:(string) 数据集名称前缀。.
632
+ :param kwargs:(dict) 设置上传的headers.
633
+ :return(dict): response header.
634
+ :return(dict): 请求成功返回的结果,dict类型.
635
+
636
+ .. code-block:: python
637
+
638
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
639
+ client = CosS3Client(config)
640
+ # 列出数据集
641
+ response, data = client.ci_describe_datasets(
642
+ MaxResults='',
643
+ NextToken='',
644
+ Prefix=''
645
+ )
646
+ print data
647
+ print response
648
+ """
649
+ headers = mapped(kwargs)
650
+ final_headers = {}
651
+ params = {}
652
+ for key in headers:
653
+ if key.startswith("response"):
654
+ params[key] = headers[key]
655
+ else:
656
+ final_headers[key] = headers[key]
657
+ headers = final_headers
658
+ if NextToken is not None:
659
+ params["nexttoken"] = NextToken
660
+ if Prefix is not None:
661
+ params["prefix"] = Prefix
662
+ params["maxresults"] = MaxResults
663
+
664
+ params = format_values(params)
665
+
666
+ path = "/" + "datasets"
667
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
668
+
669
+ logger.info("ci_describe_datasets result, url=:{url} ,headers=:{headers}, params=:{params}".format(
670
+ url=url,
671
+ headers=headers,
672
+ params=params))
673
+ rt = self.send_request(
674
+ method='GET',
675
+ url=url,
676
+ appid=self._conf._appid,
677
+ auth=CosS3Auth(self._conf, path, params=params),
678
+ params=params,
679
+ headers=headers,
680
+ ci_request=True)
681
+
682
+ data = xml_to_dict(rt.content)
683
+ format_dict(data, ['Response'])
684
+
685
+ response = dict(**rt.headers)
686
+ return response, data
687
+
688
+ def ci_describe_file_meta_index(self, DatasetName, Uri, **kwargs):
689
+ """ 查询元数据索引 https://cloud.tencent.com/document/product/460/106164
690
+
691
+ :param DatasetName:(string) 数据集名称,同一个账户下唯一。.
692
+ :param Uri:(string) 资源标识字段,表示需要建立索引的文件地址,当前仅支持 COS 上的文件,字段规则:cos://<BucketName>/<ObjectKey>,其中BucketName表示 COS 存储桶名称,ObjectKey 表示文件完整路径,例如:cos://examplebucket-1250000000/test1/img.jpg。 注意: 仅支持本账号内的 COS 文件 不支持 HTTP 开头的地址 需 UrlEncode.
693
+ :param kwargs:(dict) 设置上传的headers.
694
+ :return(dict): response header.
695
+ :return(dict): 请求成功返回的结果,dict类型.
696
+
697
+ .. code-block:: python
698
+
699
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
700
+ client = CosS3Client(config)
701
+ # 查询元数据索引
702
+ response, data = client.ci_describe_file_meta_index(
703
+ Datasetname='',
704
+ Uri=''
705
+ )
706
+ print data
707
+ print response
708
+ """
709
+ headers = mapped(kwargs)
710
+ final_headers = {}
711
+ params = {}
712
+ for key in headers:
713
+ if key.startswith("response"):
714
+ params[key] = headers[key]
715
+ else:
716
+ final_headers[key] = headers[key]
717
+ headers = final_headers
718
+ params["datasetname"] = DatasetName
719
+ params["uri"] = Uri
720
+
721
+ params = format_values(params)
722
+
723
+ path = "/" + "filemeta"
724
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
725
+
726
+ logger.info("ci_describe_file_meta_index result, url=:{url} ,headers=:{headers}, params=:{params}".format(
727
+ url=url,
728
+ headers=headers,
729
+ params=params))
730
+ rt = self.send_request(
731
+ method='GET',
732
+ url=url,
733
+ appid=self._conf._appid,
734
+ auth=CosS3Auth(self._conf, path, params=params),
735
+ params=params,
736
+ headers=headers,
737
+ ci_request=True)
738
+
739
+ data = xml_to_dict(rt.content)
740
+ format_dict(data, ['Response'])
741
+
742
+ response = dict(**rt.headers)
743
+ return response, data
744
+
745
+ def ci_search_image(self, Body, **kwargs):
746
+ """ 图像检索 https://cloud.tencent.com/document/product/460/106376
747
+
748
+ :param Body:(dict) 图像检索配置信息.
749
+ :param kwargs:(dict) 设置上传的headers.
750
+ :return(dict): response header.
751
+ :return(dict): 请求成功返回的结果,dict类型.
752
+
753
+ .. code-block:: python
754
+
755
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
756
+ client = CosS3Client(config)
757
+ # 图像检索
758
+ response, data = client.ci_search_image(
759
+ Body={}
760
+ )
761
+ print data
762
+ print response
763
+ """
764
+ headers = mapped(kwargs)
765
+ final_headers = {}
766
+ params = {}
767
+ for key in headers:
768
+ if key.startswith("response"):
769
+ params[key] = headers[key]
770
+ else:
771
+ final_headers[key] = headers[key]
772
+ headers = final_headers
773
+
774
+ params = format_values(params)
775
+ body = json.dumps(Body)
776
+ path = "/" + "datasetquery" + "/" + "imagesearch"
777
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
778
+
779
+ logger.info("ci_search_image result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
780
+ url=url,
781
+ headers=headers,
782
+ params=params,
783
+ body=body))
784
+ rt = self.send_request(
785
+ method='POST',
786
+ url=url,
787
+ appid=self._conf._appid,
788
+ data=body,
789
+ auth=CosS3Auth(self._conf, path, params=params),
790
+ params=params,
791
+ headers=headers,
792
+ ci_request=True)
793
+
794
+ data = xml_to_dict(rt.content)
795
+ format_dict(data, ['Response'])
796
+
797
+ response = dict(**rt.headers)
798
+ return response, data
799
+
800
+ def ci_update_dataset(self, Body, **kwargs):
801
+ """ 更新数据集 https://cloud.tencent.com/document/product/460/106156
802
+
803
+ :param Body:(dict) 更新数据集配置信息.
804
+ :param kwargs:(dict) 设置上传的headers.
805
+ :return(dict): response header.
806
+ :return(dict): 请求成功返回的结果,dict类型.
807
+
808
+ .. code-block:: python
809
+
810
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
811
+ client = CosS3Client(config)
812
+ # 更新数据集
813
+ response, data = client.ci_update_dataset(
814
+ Body={}
815
+ )
816
+ print data
817
+ print response
818
+ """
819
+ headers = mapped(kwargs)
820
+ final_headers = {}
821
+ params = {}
822
+ for key in headers:
823
+ if key.startswith("response"):
824
+ params[key] = headers[key]
825
+ else:
826
+ final_headers[key] = headers[key]
827
+ headers = final_headers
828
+
829
+ params = format_values(params)
830
+ body = json.dumps(Body)
831
+ path = "/" + "dataset"
832
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
833
+
834
+ logger.info("ci_update_dataset result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
835
+ url=url,
836
+ headers=headers,
837
+ params=params,
838
+ body=body))
839
+ rt = self.send_request(
840
+ method='PUT',
841
+ url=url,
842
+ appid=self._conf._appid,
843
+ data=body,
844
+ auth=CosS3Auth(self._conf, path, params=params),
845
+ params=params,
846
+ headers=headers,
847
+ ci_request=True)
848
+
849
+ data = xml_to_dict(rt.content)
850
+ format_dict(data, ['Response'])
851
+
852
+ response = dict(**rt.headers)
853
+ return response, data
854
+
855
+ def ci_update_file_meta_index(self, Body, **kwargs):
856
+ """ 更新元数据索引 https://cloud.tencent.com/document/product/460/106162
857
+
858
+ :param Body:(dict) 更新元数据索引配置信息.
859
+ :param kwargs:(dict) 设置上传的headers.
860
+ :return(dict): response header.
861
+ :return(dict): 请求成功返回的结果,dict类型.
862
+
863
+ .. code-block:: python
864
+
865
+ config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
866
+ client = CosS3Client(config)
867
+ # 更新元数据索引
868
+ response, data = client.ci_update_file_meta_index(
869
+ Body={}
870
+ )
871
+ print data
872
+ print response
873
+ """
874
+ headers = mapped(kwargs)
875
+ final_headers = {}
876
+ params = {}
877
+ for key in headers:
878
+ if key.startswith("response"):
879
+ params[key] = headers[key]
880
+ else:
881
+ final_headers[key] = headers[key]
882
+ headers = final_headers
883
+
884
+ params = format_values(params)
885
+ body = json.dumps(Body)
886
+ path = "/" + "filemeta"
887
+ url = self._conf.uri(path=path, endpoint=self._conf._endpoint_ci, useAppid=True)
888
+
889
+ logger.info("ci_update_file_meta_index result, url=:{url} ,headers=:{headers}, params=:{params},body=:{body}".format(
890
+ url=url,
891
+ headers=headers,
892
+ params=params,
893
+ body=body))
894
+ rt = self.send_request(
895
+ method='PUT',
896
+ url=url,
897
+ appid=self._conf._appid,
898
+ data=body,
899
+ auth=CosS3Auth(self._conf, path, params=params),
900
+ params=params,
901
+ headers=headers,
902
+ ci_request=True)
903
+
904
+ data = xml_to_dict(rt.content)
905
+ format_dict(data, ['Response'])
906
+
907
+ response = dict(**rt.headers)
908
+ return response, data
wemm/lib/python3.10/site-packages/qcloud_cos/resumable_downloader.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import json
4
+ import os
5
+ import sys
6
+ import threading
7
+ import logging
8
+ import uuid
9
+ import hashlib
10
+ import crcmod
11
+ from .cos_comm import *
12
+ from .streambody import StreamBody
13
+ from .cos_threadpool import SimpleThreadPool
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class ResumableDownLoader(object):
19
+ def __init__(self, cos_client, bucket, key, dest_filename, object_info, part_size=20, max_thread=5,
20
+ enable_crc=False, progress_callback=None, dump_record_dir=None, key_simplify_check=True, **kwargs):
21
+ self.__cos_client = cos_client
22
+ self.__bucket = bucket
23
+ self.__key = key
24
+ self.__dest_file_path = os.path.abspath(dest_filename)
25
+ self.__object_info = object_info
26
+ self.__max_thread = max_thread
27
+ self.__enable_crc = enable_crc
28
+ self.__progress_callback = progress_callback
29
+ self.__headers = kwargs
30
+ self.__key_simplify_check = key_simplify_check
31
+
32
+ self.__max_part_count = 100 # 取决于服务端是否对并发有限制
33
+ self.__min_part_size = 1024 * 1024 # 1M
34
+ self.__part_size = self.__determine_part_size_internal(int(object_info['Content-Length']), part_size)
35
+ self.__finished_parts = []
36
+ self.__lock = threading.Lock()
37
+ self.__record = None # 记录当前的上下文
38
+ if not dump_record_dir:
39
+ self.__dump_record_dir = os.path.join(os.path.expanduser('~'), '.cos_download_tmp_file')
40
+ else:
41
+ self.__dump_record_dir = dump_record_dir
42
+
43
+ record_filename = self.__get_record_filename(bucket, key, self.__dest_file_path)
44
+ self.__record_filepath = os.path.join(self.__dump_record_dir, record_filename)
45
+ self.__tmp_file = None
46
+
47
+ if not os.path.exists(self.__dump_record_dir):
48
+ os.makedirs(self.__dump_record_dir)
49
+ logger.debug('resumale downloader init finish, bucket: {0}, key: {1}'.format(bucket, key))
50
+
51
+ def start(self):
52
+ logger.debug('start resumable downloade, bucket: {0}, key: {1}'.format(self.__bucket, self.__key))
53
+ self.__load_record() # 从record文件中恢复读取上下文
54
+
55
+ assert self.__tmp_file
56
+ open(self.__tmp_file, 'a').close()
57
+
58
+ # 已完成分块先设置下载进度
59
+ if self.__progress_callback:
60
+ for finished_part in self.__finished_parts:
61
+ self.__progress_callback.report(finished_part.length)
62
+
63
+ parts_need_to_download = self.__get_parts_need_to_download()
64
+ logger.debug('parts_need_to_download: {0}'.format(parts_need_to_download))
65
+ pool = SimpleThreadPool(self.__max_thread)
66
+ for part in parts_need_to_download:
67
+ part_range = "bytes=" + str(part.start) + "-" + str(part.start + part.length - 1)
68
+ headers = dict.copy(self.__headers)
69
+ headers["Range"] = part_range
70
+ pool.add_task(self.__download_part, part, headers)
71
+
72
+ pool.wait_completion()
73
+ result = pool.get_result()
74
+ if not result['success_all']:
75
+ raise CosClientError('some download_part fail after max_retry, please downloade_file again')
76
+
77
+ if os.path.exists(self.__dest_file_path):
78
+ os.remove(self.__dest_file_path)
79
+ os.rename(self.__tmp_file, self.__dest_file_path)
80
+
81
+ if self.__enable_crc:
82
+ self.__check_crc()
83
+
84
+ self.__del_record()
85
+ logger.debug('download success, bucket: {0}, key: {1}'.format(self.__bucket, self.__key))
86
+
87
+ def __get_record_filename(self, bucket, key, dest_file_path):
88
+ dest_file_path_md5 = hashlib.md5(dest_file_path.encode("utf-8")).hexdigest()
89
+ key_md5 = hashlib.md5(key.encode("utf-8")).hexdigest()
90
+ return '{0}_{1}.{2}'.format(bucket, key_md5, dest_file_path_md5)
91
+
92
+ def __determine_part_size_internal(self, file_size, part_size):
93
+ real_part_size = part_size * 1024 * 1024 # MB
94
+ if real_part_size < self.__min_part_size:
95
+ real_part_size = self.__min_part_size
96
+
97
+ while real_part_size * self.__max_part_count < file_size:
98
+ real_part_size = real_part_size * 2
99
+ logger.debug('finish to determine part size, file_size: {0}, part_size: {1}'.format(file_size, real_part_size))
100
+ return real_part_size
101
+
102
+ def __splite_to_parts(self):
103
+ parts = []
104
+ file_size = int(self.__object_info['Content-Length'])
105
+ num_parts = int((file_size + self.__part_size - 1) / self.__part_size)
106
+ for i in range(num_parts):
107
+ start = i * self.__part_size
108
+ if i == num_parts - 1:
109
+ length = file_size - start
110
+ else:
111
+ length = self.__part_size
112
+
113
+ parts.append(PartInfo(i + 1, start, length))
114
+ return parts
115
+
116
+ def __get_parts_need_to_download(self):
117
+ all_set = set(self.__splite_to_parts())
118
+ logger.debug('all_set: {0}'.format(len(all_set)))
119
+ finished_set = set(self.__finished_parts)
120
+ logger.debug('finished_set: {0}'.format(len(finished_set)))
121
+ return list(all_set - finished_set)
122
+
123
+ def __download_part(self, part, headers):
124
+ with open(self.__tmp_file, 'rb+') as f:
125
+ f.seek(part.start, 0)
126
+ range = None
127
+ traffic_limit = None
128
+ if 'Range' in headers:
129
+ range = headers['Range']
130
+
131
+ if 'TrafficLimit' in headers:
132
+ traffic_limit = headers['TrafficLimit']
133
+ logger.debug("part_id: {0}, part_range: {1}, traffic_limit:{2}".format(part.part_id, range, traffic_limit))
134
+ result = self.__cos_client.get_object(Bucket=self.__bucket, Key=self.__key, KeySimplifyCheck=self.__key_simplify_check, **headers)
135
+ result["Body"].pget_stream_to_file(f, part.start, part.length)
136
+
137
+ self.__finish_part(part)
138
+
139
+ if self.__progress_callback:
140
+ self.__progress_callback.report(part.length)
141
+
142
+ def __finish_part(self, part):
143
+ logger.debug('download part finished,bucket: {0}, key: {1}, part_id: {2}'.
144
+ format(self.__bucket, self.__key, part.part_id))
145
+ with self.__lock:
146
+ self.__finished_parts.append(part)
147
+ self.__record['parts'].append({'part_id': part.part_id, 'start': part.start, 'length': part.length})
148
+ self.__dump_record(self.__record)
149
+
150
+ def __dump_record(self, record):
151
+ record_filepath = self.__record_filepath
152
+ if os.path.exists(self.__record_filepath):
153
+ record_filepath += '.tmp'
154
+ with open(record_filepath, 'w') as f:
155
+ json.dump(record, f)
156
+ logger.debug(
157
+ 'dump record to {0}, bucket: {1}, key: {2}'.format(record_filepath, self.__bucket, self.__key))
158
+ if record_filepath != self.__record_filepath:
159
+ os.remove(self.__record_filepath)
160
+ os.rename(record_filepath, self.__record_filepath)
161
+
162
+ def __load_record(self):
163
+ record = None
164
+
165
+ if os.path.exists(self.__record_filepath):
166
+ with open(self.__record_filepath, 'r') as f:
167
+ record = json.load(f)
168
+ ret = self.__check_record(record)
169
+ # record记录是否跟head object的一致,不一致则删除
170
+ if not ret:
171
+ self.__del_record()
172
+ record = None
173
+ else:
174
+ self.__part_size = record['part_size']
175
+ self.__tmp_file = record['tmp_filename']
176
+ if not os.path.exists(self.__tmp_file):
177
+ record = None
178
+ self.__tmp_file = None
179
+ self.__del_record()
180
+ else:
181
+ self.__finished_parts = list(
182
+ PartInfo(p['part_id'], p['start'], p['length']) for p in record['parts'])
183
+ logger.debug('load record: finished parts nums: {0}'.format(len(self.__finished_parts)))
184
+ self.__record = record
185
+
186
+ if not record:
187
+ self.__tmp_file = "{file_name}_{uuid}".format(file_name=self.__dest_file_path, uuid=uuid.uuid4().hex)
188
+ record = {'bucket': self.__bucket, 'key': self.__key, 'tmp_filename': self.__tmp_file,
189
+ 'mtime': self.__object_info['Last-Modified'], 'etag': self.__object_info['ETag'],
190
+ 'file_size': self.__object_info['Content-Length'], 'part_size': self.__part_size, 'parts': []}
191
+ self.__record = record
192
+ self.__dump_record(record)
193
+
194
+ def __check_record(self, record):
195
+ return record['etag'] == self.__object_info['ETag'] and \
196
+ record['mtime'] == self.__object_info['Last-Modified'] and \
197
+ record['file_size'] == self.__object_info['Content-Length']
198
+
199
+ def __del_record(self):
200
+ os.remove(self.__record_filepath)
201
+ logger.debug('ResumableDownLoader delete record_file, path: {0}'.format(self.__record_filepath))
202
+
203
+ def __check_crc(self):
204
+ logger.debug('start to check crc')
205
+ c64 = crcmod.mkCrcFun(0x142F0E1EBA9EA3693, initCrc=0, xorOut=0xffffffffffffffff, rev=True)
206
+ with open(self.__dest_file_path, 'rb') as f:
207
+ local_crc64 = str(c64(f.read()))
208
+ object_crc64 = self.__object_info['x-cos-hash-crc64ecma']
209
+ if local_crc64 is not None and object_crc64 is not None and local_crc64 != object_crc64:
210
+ raise CosClientError('crc of client: {0} is mismatch with cos: {1}'.format(local_crc64, object_crc64))
211
+
212
+
213
+ class PartInfo(object):
214
+ def __init__(self, part_id, start, length):
215
+ self.part_id = part_id
216
+ self.start = start
217
+ self.length = length
218
+
219
+ def __eq__(self, other):
220
+ return self.__key() == other.__key()
221
+
222
+ def __hash__(self):
223
+ return hash(self.__key())
224
+
225
+ def __key(self):
226
+ return self.part_id, self.start, self.length
wemm/lib/python3.10/site-packages/qcloud_cos/streambody.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding=utf-8
2
+ import os
3
+ import uuid
4
+
5
+
6
+ class StreamBody(object):
7
+ def __init__(self, rt):
8
+ self._rt = rt
9
+ self._read_len = 0
10
+ self._content_len = 0
11
+ self._use_chunked = False
12
+ self._use_encoding = False
13
+ if 'Content-Length' in self._rt.headers:
14
+ self._content_len = int(self._rt.headers['Content-Length'])
15
+ elif 'Transfer-Encoding' in self._rt.headers and self._rt.headers['Transfer-Encoding'] == "chunked":
16
+ self._use_chunked = True
17
+ else:
18
+ raise IOError("create StreamBody failed without Content-Length header or Transfer-Encoding header")
19
+
20
+ if 'Content-Encoding' in self._rt.headers:
21
+ self._use_encoding = True
22
+
23
+ def __iter__(self):
24
+ """提供一个默认的迭代器"""
25
+ return self._rt.iter_content(1024)
26
+
27
+ def __len__(self):
28
+ return self._content_len
29
+
30
+ def get_raw_stream(self):
31
+ """提供原始流"""
32
+ return self._rt.raw
33
+
34
+ def get_stream(self, chunk_size=1024):
35
+ """提供一个chunk可变的迭代器"""
36
+ return self._rt.iter_content(chunk_size=chunk_size)
37
+
38
+ def read(self, chunk_size=1024, auto_decompress=False):
39
+ chunk = None
40
+ if self._use_encoding and not auto_decompress:
41
+ chunk = self._rt.raw.read(chunk_size)
42
+ else:
43
+ try:
44
+ chunk = next(self._rt.iter_content(chunk_size))
45
+ except StopIteration:
46
+ return ''
47
+ return chunk
48
+
49
+ def get_stream_to_file(self, file_name, auto_decompress=False):
50
+ """保存流到本地文件"""
51
+ self._read_len = 0
52
+ tmp_file_name = "{file_name}_{uuid}".format(file_name=file_name, uuid=uuid.uuid4().hex)
53
+ with open(tmp_file_name, 'wb') as fp:
54
+ while 1:
55
+ chunk = self.read(1024, auto_decompress)
56
+ if not chunk:
57
+ break
58
+ self._read_len += len(chunk)
59
+ fp.write(chunk)
60
+
61
+ if not self._use_chunked and not (
62
+ self._use_encoding and auto_decompress) and self._read_len != self._content_len:
63
+ if os.path.exists(tmp_file_name):
64
+ os.remove(tmp_file_name)
65
+ raise IOError("download failed with incomplete file")
66
+ if os.path.exists(file_name):
67
+ os.remove(file_name)
68
+ os.rename(tmp_file_name, file_name)
69
+
70
+ def pget_stream_to_file(self, fdst, offset, expected_len, auto_decompress=False):
71
+ """保存流到本地文件的offset偏移"""
72
+ self._read_len = 0
73
+ fdst.seek(offset, 0)
74
+ chunk_size = 1024 * 1024
75
+ while 1:
76
+ chunk = self.read(chunk_size, auto_decompress)
77
+ if not chunk:
78
+ break
79
+ self._read_len += len(chunk)
80
+ fdst.write(chunk)
81
+
82
+ if not self._use_chunked and not (self._use_encoding and auto_decompress) and self._read_len != expected_len:
83
+ raise IOError("download failed with incomplete file")
wemm/lib/python3.10/site-packages/qcloud_cos/version.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = '5.1.9.33'
wemm/lib/python3.10/site-packages/torchgen/api/__init__.py ADDED
File without changes
wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (162 Bytes). View file
 
wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/meta.cpython-310.pyc ADDED
Binary file (402 Bytes). View file
 
wemm/lib/python3.10/site-packages/torchgen/api/__pycache__/native.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
wemm/lib/python3.10/site-packages/torchgen/api/autograd.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import re
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Match, Optional, Sequence, Set, Tuple
5
+
6
+ from torchgen.api import cpp
7
+ from torchgen.api.types import BaseCType, Binding, NamedCType, tensorListT
8
+ from torchgen.model import (
9
+ FunctionSchema,
10
+ NativeFunction,
11
+ NativeFunctionsViewGroup,
12
+ SchemaKind,
13
+ Type,
14
+ )
15
+ from torchgen.utils import IDENT_REGEX
16
+
17
+ # Represents a saved attribute involved in backward calculation.
18
+ # Note that it can be a derived property of an input argument, e.g.:
19
+ # we could save `other.scalar_type()` instead of the entire `other` tensor.
20
+ @dataclass(frozen=True)
21
+ class SavedAttribute:
22
+ # The NamedCType holds the updated name and cpp type of the attribute
23
+ # for the name, Suffix is appended if it's derived property, e.g.: `other_scalar_type`
24
+ nctype: NamedCType
25
+
26
+ # The expression to read the derived property at save time, e.g.:
27
+ # `other.scalar_type()`.
28
+ expr: str
29
+
30
+
31
+ # Represents a backward formula that calculates derivatives for one
32
+ # or more tensors.
33
+ @dataclass(frozen=True)
34
+ class Derivative:
35
+ # The formula string (legit C++ expression).
36
+ # Note that expressions against input arguments have been replaced with the
37
+ # corresponding saved attributes.
38
+ # E.g.:
39
+ # raw formula: `mul_tensor_backward(grad, self, other.scalar_type())`
40
+ # here: `mul_tensor_backward(grad, self, other_scalar_type)`
41
+ formula: str
42
+
43
+ # The formula string before input argument replacement
44
+ original_formula: str
45
+
46
+ # Names of the arguments for which this formula calculates derivatives.
47
+ var_names: Tuple[str, ...]
48
+
49
+ # Saved inputs that are referenced by the formula.
50
+ saved_inputs: Tuple[SavedAttribute, ...]
51
+
52
+ # Saved outputs that are referenced by the formula.
53
+ saved_outputs: Tuple[SavedAttribute, ...]
54
+
55
+ # Gradients that are referenced by name in the formula.
56
+ named_gradients: Set[str]
57
+
58
+
59
+ # Represents a forward formula that calculates forward derivatives
60
+ # for one tensor.
61
+ @dataclass(frozen=True)
62
+ class ForwardDerivative:
63
+ # The formula string (legit C++ expression).
64
+ # Note that special keywords such as "linear" or "element_wise" have been
65
+ # replaced by the automatically generated formula.
66
+ formula: str
67
+
68
+ # Name of the output arguments for which this formula calculates forward
69
+ # derivatives
70
+ var_names: Tuple[str, ...]
71
+
72
+ # Type of the output arguments for which this formula calculates forward
73
+ # derivatives
74
+ var_types: Tuple[Type, ...]
75
+
76
+ # Inputs for which the forward derivatives are required for this formula
77
+ required_inputs_fw_grad: Optional[Tuple[str, ...]]
78
+
79
+ # Inputs for which the primal is required for this formula
80
+ required_inputs_primal: Optional[Tuple[str, ...]]
81
+
82
+ # Flag to specify if this formula requires the original value of self
83
+ # This is only used by inplace operations
84
+ required_original_self_value: bool
85
+
86
+ # If this formula is specified in derivatives.yaml or if we are re-using the
87
+ # out of place formula for inplace
88
+ is_reusing_outplace_formula: bool
89
+
90
+
91
+ # Represents differentiability info for a NativeFunction.
92
+ @dataclass(frozen=True)
93
+ class DifferentiabilityInfo:
94
+ # The base name read from derivatives.yaml.
95
+ name: str
96
+
97
+ # The matching native function.
98
+ #
99
+ # There can be multiple NativeFunction having the same base name:
100
+ # - different overloads with different types of input arguments;
101
+ # - in-place/out/functional variants of the same function;
102
+ #
103
+ # We first use the schema string (under the 'name' key) in derivatives.yaml
104
+ # to find the NativeFunction having the same schema string.
105
+ # Then we find the in-place/out/functional variants of the matching function.
106
+ # Among these variants, we choose the one having the same name as the
107
+ # derivatives.yaml entry. If there is no exact match, then we choose the
108
+ # in-place variant.
109
+ # TODO: maybe the logic to search for all variants is no longer necessary?
110
+ func: NativeFunction
111
+
112
+ # The name of the generated autograd function.
113
+ # It's set only if we will calculate a derivative, i.e.
114
+ # 'args_with_derivatives' is not empty.
115
+ op: Optional[str]
116
+
117
+ # The derivatives formulae for this function.
118
+ # Note that the length of this sequence is the number of differentiable inputs
119
+ derivatives: Sequence[Derivative]
120
+
121
+ # The forward derivatives formulae for this function.
122
+ # Note that the length of this sequence is the number of differentiable outputs
123
+ forward_derivatives: Sequence[ForwardDerivative]
124
+
125
+ # The union of 'saved_inputs' of all 'derivatives'.
126
+ all_saved_inputs: Sequence[SavedAttribute]
127
+
128
+ # The union of 'saved_outputs' of all 'derivatives'.
129
+ all_saved_outputs: Sequence[SavedAttribute]
130
+
131
+ # All named gradients that are available for use, in the same
132
+ # order as in the grads vector.
133
+ available_named_gradients: Sequence[str]
134
+
135
+ # The named gradients that are used in any of the derivatives.
136
+ # Invariant: all(name in available_named_gradients for name in used_named_gradients)
137
+ used_named_gradients: Set[str]
138
+
139
+ # The function's input arguments for which it calculates derivatives.
140
+ # It's the union of 'var_names' of all 'derivatives', sorted by the
141
+ # argument order in the function schema.
142
+ args_with_derivatives: Sequence[Binding]
143
+
144
+ # Names of arguments whose derivative formula is 'non_differentiable'.
145
+ non_differentiable_arg_names: Sequence[str]
146
+
147
+ # Raw data read from derivatives.yaml.
148
+ output_differentiability: Optional[List[bool]]
149
+
150
+ # output_differentiability in derivatives.yaml can be a list of
151
+ # conditions that express if the output is differentiable. In this case,
152
+ # the number of conditions must match the number of outputs
153
+ # (NB: we only support one condition right now).
154
+ # output_differentiability gets populated with True for each condition,
155
+ # while output_differentiability_conditions gets populated with the conditions
156
+ output_differentiability_conditions: Optional[List[str]]
157
+
158
+ @property
159
+ def has_derivatives(self) -> bool:
160
+ return len(self.args_with_derivatives) > 0
161
+
162
+ # Generates a new DifferentiabilityInfo using the exact same set of derivative information,
163
+ # but with a new operator name.
164
+ # This is used when generating "copy" variants of view ops,
165
+ # which are able to use the exact same derivative formula as the original view op
166
+ # See Note [Codegen'd {view}_copy Operators]
167
+ def create_view_copy_from_view_derivative(
168
+ self, g: NativeFunctionsViewGroup
169
+ ) -> Optional["DifferentiabilityInfo"]:
170
+ if g.view_copy is None:
171
+ return None
172
+ f = g.view_copy
173
+
174
+ name_split_by_period = self.name.split(".", maxsplit=2)
175
+ # Append a "_copy" to the base name of the operator (but keep the overload name the same)
176
+ view_copy_name = f"{name_split_by_period[0]}_copy." + ".".join(
177
+ name_split_by_period[1:]
178
+ )
179
+ view_copy_op_name = None if self.op is None else f"{self.op}_copy"
180
+
181
+ return DifferentiabilityInfo(
182
+ # Use the "_copy" version of name/func/op
183
+ name=view_copy_name,
184
+ func=f,
185
+ op=view_copy_op_name,
186
+ # But keep all derivative info the same
187
+ derivatives=self.derivatives,
188
+ forward_derivatives=self.forward_derivatives,
189
+ all_saved_inputs=self.all_saved_inputs,
190
+ all_saved_outputs=self.all_saved_outputs,
191
+ available_named_gradients=self.available_named_gradients,
192
+ used_named_gradients=self.used_named_gradients,
193
+ args_with_derivatives=self.args_with_derivatives,
194
+ non_differentiable_arg_names=self.non_differentiable_arg_names,
195
+ output_differentiability=self.output_differentiability,
196
+ output_differentiability_conditions=self.output_differentiability_conditions,
197
+ )
198
+
199
+
200
+ def uses_ident(info: Optional[DifferentiabilityInfo], ident: str) -> bool:
201
+ if info is None:
202
+ return False
203
+ for derivative in info.derivatives:
204
+ formula = derivative.formula
205
+ if re.search(IDENT_REGEX.format(ident), formula):
206
+ return True
207
+ return False
208
+
209
+
210
+ def uses_retain_variables(info: Optional[DifferentiabilityInfo]) -> bool:
211
+ return uses_ident(info, "retain_variables")
212
+
213
+
214
+ def uses_single_grad(info: Optional[DifferentiabilityInfo]) -> bool:
215
+ return uses_ident(info, "grad")
216
+
217
+
218
+ # Represents a differentiable `Argument`.
219
+ # How is it different from the `Argument` type?
220
+ # - It's processed Arguments which are differentiable and only used in the
221
+ # context of the autograd codegen;
222
+ # - It can represent SelfArgument or regular Argument but not TensorOptionsArgument;
223
+ @dataclass(frozen=True)
224
+ class DifferentiableInput:
225
+ name: str
226
+ type: Type
227
+
228
+ # TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
229
+ cpp_type: str
230
+
231
+
232
+ # Represents a differentiable `Return`.
233
+ # How it it different from the `Return` type?
234
+ # - The name in `Return` is optional. Here it is always populated using the same
235
+ # `cpp.return_names()` method.
236
+ # TODO: some cpp naming logic (e.g. resolving name conflict) might be irrelevant?
237
+ # - It's processed Returns which are differentiable, in compliance with the
238
+ # `output_differentiability` field defined in derivatives.yaml (if specified),
239
+ # and are only used in the context of the autograd codegen;
240
+ @dataclass(frozen=True)
241
+ class DifferentiableOutput:
242
+ name: str
243
+ type: Type
244
+
245
+ # TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
246
+ cpp_type: str
247
+
248
+
249
+ @dataclass(frozen=True)
250
+ class NativeFunctionWithDifferentiabilityInfo:
251
+ func: NativeFunction
252
+ info: Optional[Dict[str, DifferentiabilityInfo]]
253
+ fw_derivatives: Optional[Dict[str, Sequence[ForwardDerivative]]]
254
+
255
+
256
+ # TODO: Update comment below since it is out of date.
257
+ def dispatch_strategy(fn: NativeFunctionWithDifferentiabilityInfo) -> str:
258
+ """How are we going to call the underlying implementation of a
259
+ declaration? There are two strategies:
260
+ - use_derived: we want to call the implementation on CPUDoubleType
261
+ (or a similar, derived Type instance). Because these derived
262
+ instances deal in Tensors, not Variables (it's a completely different
263
+ object, so it doesn't dispatch back to VariableType), code on
264
+ this dispatch path needs to wrap/unwrap tensors. If the
265
+ derived implementation takes and returns tensors, the
266
+ implementation is usually differentiable (although we also use
267
+ the derived dispatch path for non-differentiable functions
268
+ that we still want to dispatch on the derived Type instance;
269
+ e.g., size())
270
+ - use_type: we want to call the implementation on Type, because
271
+ it is implemented concretely, and the functions it invokes will
272
+ get dispatched back to VariableType (which will ensure that they
273
+ are differentiable.)
274
+ """
275
+ # fn is derived as long as any of its per-key differentiability infos
276
+ # has_derivatives. dispatch_strategy() is used to guard generation of fns in VariableType
277
+ # and ADInplaceOrViewType. We want to generate these functions as long as a
278
+ # derivative is defined for ANY dispatch key.
279
+ if fn.func.is_abstract or (
280
+ fn.info is not None and any(info.has_derivatives for info in fn.info.values())
281
+ ):
282
+ # If the function is abstract (not implemented on at::Type), we must
283
+ # call the implementation on the derived type with unpacked tensors.
284
+
285
+ # If the function has a derivative specified and is concrete, we could
286
+ # call either implementation. We prefer the calling the derived
287
+ # type's implementation with unpacked tensors because it is more
288
+ # performant in some cases: any internal calls to other ATen functions
289
+ # won't have the history tracked.
290
+
291
+ # If the function has a type dispatched argument (i.e. is a factory),
292
+ # we prefer calling the derived type's implementation both because it is
293
+ # more performant and to ensure factory functions return tensors with _version
294
+ # of 0 (probably not strictly necessary, but nice to have to keeps versions simple
295
+ # to understand.
296
+
297
+ return "use_derived"
298
+ else:
299
+ # If the function is concrete (we don't have to override it) and we
300
+ # didn't declare it in derivatives.yaml, we'll assume that it is
301
+ # actually implemented out of differentiable functions. (This
302
+ # assumption might not hold, but then you'll see gradcheck fail.)
303
+ return "use_type"
304
+
305
+
306
+ def match_differentiability_info(
307
+ native_functions: List[NativeFunction],
308
+ differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
309
+ ) -> List[NativeFunctionWithDifferentiabilityInfo]:
310
+ """Sets the "derivative" key on declarations to matching autograd function
311
+ In-place functions will use the out-of-place derivative definition if there
312
+ is no in-place specific derivative.
313
+ """
314
+
315
+ functional_info_by_signature = {
316
+ schema.signature(strip_default=True): info_dict
317
+ for schema, info_dict in differentiability_infos.items()
318
+ if schema.kind() == SchemaKind.functional
319
+ }
320
+ non_functional_info_by_signature = {
321
+ schema.signature(strip_default=True): info_dict
322
+ for schema, info_dict in differentiability_infos.items()
323
+ if schema.kind() != SchemaKind.functional
324
+ }
325
+
326
+ def find_info(
327
+ f: NativeFunction,
328
+ ) -> Tuple[Optional[Dict[str, DifferentiabilityInfo]], bool]:
329
+ # Don't bother matching info to generated out= variants
330
+ if "generated" in f.tags and f.func.kind() == SchemaKind.out:
331
+ return None, False
332
+
333
+ # (1) Check for an exact match
334
+ if f.func in differentiability_infos:
335
+ return differentiability_infos[f.func], True
336
+
337
+ # (2) If no exact match, check if the out-of-place variant
338
+ # of this operator has a match.
339
+ # i.e mul() for mul_() or mul_out()
340
+ f_sig = f.func.signature(strip_default=True)
341
+ if f_sig in functional_info_by_signature:
342
+ return functional_info_by_signature[f_sig], False
343
+
344
+ # (3) Some operators have a derivative explicitly defined for the mutable
345
+ # variant, but get a code-generated out-of-place variant which does *not*
346
+ # come with a derivative formula.
347
+ # For the generated out-of-place variant, use the mutable variant's formula
348
+ # if it exists.
349
+ if "generated" in f.tags and f_sig in non_functional_info_by_signature:
350
+ info_dict = non_functional_info_by_signature[f_sig]
351
+ # See https://github.com/pytorch/pytorch/pull/76320/files#r874816389
352
+ assert not any(
353
+ any("self" in str(inpt.nctype.name) for inpt in info.all_saved_inputs)
354
+ for info in info_dict.values()
355
+ ), f"""\
356
+ Attempted to convert a derivative formula for a mutable operator
357
+ to be used by automatically by its functional variant ("{str(f.func)}").
358
+ this is not currently supported (we'd need to fix up the formula in the codegen)."""
359
+ return info_dict, False
360
+
361
+ # (4) Generate derivative information of unary foreach functions if none is defined in `derivatives.yaml`
362
+ base_op_name = f.func.name.name
363
+ if (
364
+ base_op_name.base.startswith("_foreach")
365
+ and not base_op_name.inplace
366
+ and len(f.func.arguments.post_self_positional) == 0
367
+ ):
368
+ ref_native_op_name = base_op_name.base.split("_foreach_")[-1]
369
+ for function_schema in functional_info_by_signature:
370
+ if (
371
+ function_schema.name.name.base == ref_native_op_name
372
+ and not function_schema.name.name.inplace
373
+ ):
374
+ all_saved_inputs = []
375
+ all_saved_outputs = []
376
+ diff_info_dict = copy.deepcopy(
377
+ differentiability_infos[function_schema]
378
+ )
379
+ diff_info = diff_info_dict["Default"]
380
+ modified_derivative_formulas = []
381
+ for derivative in diff_info.derivatives:
382
+ saved_inputs = []
383
+ saved_outputs = []
384
+ modified_formula = (
385
+ derivative.formula.replace("grad", "grads[i]")
386
+ .replace("self", "self[i]")
387
+ .replace("result", "result[i]")
388
+ )
389
+ if "self" in modified_formula:
390
+ saved_inputs.append(
391
+ SavedAttribute(
392
+ nctype=NamedCType(
393
+ name="self", type=BaseCType(tensorListT)
394
+ ),
395
+ expr="self",
396
+ )
397
+ )
398
+ all_saved_inputs.append(saved_inputs[-1])
399
+ if "result" in modified_formula:
400
+ saved_outputs.append(
401
+ SavedAttribute(
402
+ nctype=NamedCType(
403
+ name="result", type=BaseCType(tensorListT)
404
+ ),
405
+ expr="result",
406
+ )
407
+ )
408
+ all_saved_outputs.append(saved_outputs[-1])
409
+ modified_derivative = Derivative(
410
+ formula=modified_formula,
411
+ original_formula=derivative.original_formula,
412
+ var_names=("self",),
413
+ saved_inputs=tuple(saved_inputs),
414
+ saved_outputs=tuple(saved_outputs),
415
+ named_gradients=set(),
416
+ )
417
+ modified_derivative_formulas.append(modified_derivative)
418
+ assert f.func.arguments.self_arg is not None
419
+ diff_info = DifferentiabilityInfo(
420
+ name=base_op_name.base,
421
+ func=f,
422
+ op=f"Foreach{diff_info.op}",
423
+ derivatives=modified_derivative_formulas,
424
+ forward_derivatives=[],
425
+ all_saved_inputs=tuple(set(all_saved_inputs)),
426
+ all_saved_outputs=tuple(set(all_saved_outputs)),
427
+ available_named_gradients=(),
428
+ used_named_gradients=set(),
429
+ args_with_derivatives=[
430
+ Binding(
431
+ name="self",
432
+ nctype=NamedCType(
433
+ name="self", type=BaseCType(tensorListT)
434
+ ),
435
+ argument=f.func.arguments.self_arg.argument,
436
+ default=None,
437
+ )
438
+ ],
439
+ non_differentiable_arg_names=[],
440
+ output_differentiability=None,
441
+ output_differentiability_conditions=None,
442
+ )
443
+ diff_info_dict["Default"] = diff_info
444
+ if f.func not in differentiability_infos:
445
+ differentiability_infos[f.func] = diff_info_dict
446
+ functional_info_by_signature[f.func] = diff_info_dict
447
+ return diff_info_dict, True
448
+
449
+ return None, False
450
+
451
+ result: List[NativeFunctionWithDifferentiabilityInfo] = []
452
+ for f in native_functions:
453
+ info_dict, is_exact_match = find_info(f)
454
+
455
+ # Currently, the '.strides()' to 'strides_or_error' replacement does not support
456
+ # 'self' derivatives of an inplace function, so we must check for this case.
457
+ if f.func.kind() == SchemaKind.inplace and (info_dict is not None):
458
+ for info in info_dict.values():
459
+ for derivative in info.derivatives:
460
+ if "self" in derivative.var_names:
461
+ for saved_input in derivative.saved_inputs:
462
+ assert "strides_or_error" not in saved_input.expr, (
463
+ "Calling '.strides()' in the 'self' derivative formula of an "
464
+ f"in-place function is not supported: {f.func}"
465
+ )
466
+
467
+ if not info_dict:
468
+ result.append(
469
+ NativeFunctionWithDifferentiabilityInfo(
470
+ func=f, info=None, fw_derivatives=None
471
+ )
472
+ )
473
+ continue
474
+
475
+ fw_derivative_dict: Dict[str, Sequence[ForwardDerivative]] = {}
476
+ for key, info in info_dict.items():
477
+ if not info.forward_derivatives:
478
+ fw_derivative_dict[key] = []
479
+ continue
480
+
481
+ forward_derivatives = info.forward_derivatives
482
+
483
+ # For functions that have a single def for out-of-place and inplace (like abs())
484
+ if f.func.kind() == SchemaKind.inplace:
485
+ # For inplace functions there is a little bit of work to do:
486
+ # 1) Validate the formula and make sure the input that is modified in not used:
487
+ # - If there is a formula for the inplace variant of the function (is_exact_match == True) then
488
+ # we make sure that the original value of the input that is being modified inplace (self_p) is
489
+ # not used in the formula. Note that the formula can use "original_self_p" here and that would
490
+ # trigger a clone of the original input.
491
+ # - If we are re-using the out of place formula (is_exact_match == False) then we replace every
492
+ # occurrence of self_p and self_t by original_self_p and original_self_t. These will be
493
+ # populated by cloned version of the original input (either the clone done by the backward AD
494
+ # logic if self is also used in a backward formula or a special clone that we add).
495
+ # 2) At this point, there cannot be a self_p in the formula.
496
+ # 3) Change "result" into "self_p" as by design, in the inplace function codegen, the result is
497
+ # simply called self (as it is modified inplace).
498
+ # 4) Update the required primals data in case it used to contain "result" but should now contain
499
+ # "self"
500
+ # 5) If it is not an exact match, the user formula is not modifying the existing forward grad
501
+ # inplace as it should. So add some code that makes sure that we do so if the forward grad
502
+ # already exists.
503
+
504
+ assert (
505
+ len(info.forward_derivatives) == 1
506
+ ) # Only single output inplace should exist
507
+ fw_info = info.forward_derivatives[0]
508
+ formula = fw_info.formula
509
+
510
+ def replace_self_with_original_self(formula: str, postfix: str) -> str:
511
+ def repl(m: Match[str]) -> str:
512
+ return f"{m.group(1)}original_self{postfix}{m.group(2)}"
513
+
514
+ return re.sub(IDENT_REGEX.format(f"self{postfix}"), repl, formula)
515
+
516
+ if re.search(IDENT_REGEX.format("self_p"), formula):
517
+ if is_exact_match:
518
+ # For manually defined formulas, don't allow the original value to be used
519
+ raise RuntimeError(
520
+ f'The formula for "{f.func.name}" is using the original value of self '
521
+ "that is being modified inplace. This would lead to wrong forward gradients. "
522
+ 'Please use "result" in the formula only.'
523
+ )
524
+ else:
525
+ # When the original formula is out of place, we save a clone of the primal
526
+ # value to be able to access this value if needed
527
+ # replace "self_p"/"self_t" from the formula by "original_self_p"/"original_self_t"
528
+ formula = replace_self_with_original_self(formula, "_p")
529
+ formula = replace_self_with_original_self(formula, "_t")
530
+
531
+ # replace "result" from the formula by "self_p"
532
+ def repl(m: Match[str]) -> str:
533
+ return f"{m.group(1)}self_p{m.group(2)}"
534
+
535
+ formula = re.sub(IDENT_REGEX.format("result"), repl, formula)
536
+
537
+ required_primals = fw_info.required_inputs_primal
538
+ if re.search(IDENT_REGEX.format("self_p"), formula):
539
+ required_primals = (
540
+ required_primals + ("self",) if required_primals else ("self",)
541
+ )
542
+
543
+ if not is_exact_match:
544
+ # NOTE [In-place forward AD formula Optimization]
545
+ #
546
+ # This optimization transforms the formula to directly do inplace, i.e.
547
+ # instead of self_t.copy_(self_t.op()) we do self_t.op_() when the following are met:
548
+ #
549
+ # 1) the formula satisfies the pattern: "self_t.op(*args)"
550
+ # 2) "op" in (1) needs to be the same as the op the derivative is for
551
+ #
552
+ # (2) may seem too strict, but currently the only ops that satisfy (1) also satisfy (2)
553
+ # If there is a need, we can relax (2) to allow any op that has an in-place variant
554
+ is_single_method_on_self_t = False
555
+ directly_do_inplace = False
556
+ op_name: Optional[str] = None
557
+ between_parens: Optional[str] = None
558
+ match = re.fullmatch(r"self_t.([\w]*)\((.*)\)", formula)
559
+ if match:
560
+ op_name, between_parens = match.group(1), match.group(2)
561
+
562
+ # We want to...
563
+ # Match: self_t.op1(other_p.op2(arg))
564
+ # Avoid: self_t.op1(args) + self_t.op2(args)
565
+ # Avoid: self_t.op1(other_p.op2(arg)) + self_t.op2(args)
566
+ def check_parens_nest_level_gt_zero(s: str) -> bool:
567
+ level = 1
568
+ for ch in s:
569
+ if ch == ")":
570
+ level -= 1
571
+ if level == 0:
572
+ return False
573
+ if ch == "(":
574
+ level += 1
575
+ return True
576
+
577
+ is_single_method_on_self_t = check_parens_nest_level_gt_zero(
578
+ between_parens
579
+ )
580
+ directly_do_inplace = (
581
+ is_single_method_on_self_t and op_name == info.name
582
+ )
583
+
584
+ if directly_do_inplace:
585
+ assert op_name is not None
586
+ assert between_parens is not None
587
+ formula = f"self_t_raw.defined() ? self_t_raw.{op_name}_({between_parens}) : {formula}"
588
+ else:
589
+ # Make sure that the forward grad is modified inplace when the original formula
590
+ # is out of place
591
+ formula = f"self_t_raw.defined() ? self_t_raw.copy_({formula}) : {formula}"
592
+
593
+ required_original_self_value = bool(
594
+ re.search(IDENT_REGEX.format("original_self_p"), formula)
595
+ ) or bool(re.search(IDENT_REGEX.format("original_self_t"), formula))
596
+
597
+ forward_derivatives = [
598
+ ForwardDerivative(
599
+ formula=formula,
600
+ var_names=("self",),
601
+ var_types=fw_info.var_types,
602
+ required_inputs_fw_grad=fw_info.required_inputs_fw_grad,
603
+ required_inputs_primal=required_primals,
604
+ required_original_self_value=required_original_self_value,
605
+ is_reusing_outplace_formula=not is_exact_match,
606
+ ),
607
+ ]
608
+
609
+ fw_derivative_dict[key] = forward_derivatives
610
+
611
+ result.append(
612
+ NativeFunctionWithDifferentiabilityInfo(
613
+ func=f, info=info_dict, fw_derivatives=fw_derivative_dict
614
+ )
615
+ )
616
+
617
+ return result
618
+
619
+
620
+ def is_differentiable(
621
+ name: str, type: Type, info: Optional[DifferentiabilityInfo]
622
+ ) -> bool:
623
+ return type.is_tensor_like() and (
624
+ info is None or name not in info.non_differentiable_arg_names
625
+ )
626
+
627
+
628
+ def gen_differentiable_outputs(
629
+ fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
630
+ ) -> List[DifferentiableOutput]:
631
+ f = fn.func
632
+ info = fn.info[key] if fn.info else None
633
+ outputs: List[DifferentiableOutput] = [
634
+ DifferentiableOutput(
635
+ name=name,
636
+ type=ret.type,
637
+ cpp_type=cpp.return_type(ret, symint=True).cpp_type(),
638
+ )
639
+ for name, ret in zip(cpp.return_names(f), f.func.returns)
640
+ ]
641
+ output_differentiability = info.output_differentiability if info else None
642
+ if output_differentiability is not None:
643
+ if len(output_differentiability) != len(outputs):
644
+ raise RuntimeError(
645
+ f"The length of output_differentiability ({len(output_differentiability)}), "
646
+ f"does not match the number of outputs ({len(outputs)})."
647
+ )
648
+ differentiable_outputs: List[DifferentiableOutput] = []
649
+ if False in output_differentiability and f.func.kind() == SchemaKind.inplace:
650
+ raise RuntimeError(
651
+ "output_differentiability=False for inplace operation (version_counter won't get updated)"
652
+ )
653
+ for differentiable, output in zip(output_differentiability, outputs):
654
+ if differentiable:
655
+ differentiable_outputs.append(output)
656
+ return differentiable_outputs
657
+ candidate_differentiable_outputs = list(
658
+ filter(lambda r: is_differentiable(r.name, r.type, info), outputs)
659
+ )
660
+ if uses_single_grad(info):
661
+ return candidate_differentiable_outputs[:1]
662
+ else:
663
+ return candidate_differentiable_outputs
wemm/lib/python3.10/site-packages/torchgen/api/cpp.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Sequence, Set, Union
2
+
3
+ from torchgen import local
4
+ from torchgen.api.types import (
5
+ ArgName,
6
+ ArrayCType,
7
+ ArrayRefCType,
8
+ BaseCType,
9
+ BaseTypeToCppMapping,
10
+ Binding,
11
+ boolT,
12
+ ConstRefCType,
13
+ CType,
14
+ dimnameListT,
15
+ intArrayRefT,
16
+ iTensorListRefT,
17
+ ListCType,
18
+ longT,
19
+ MutRefCType,
20
+ NamedCType,
21
+ OptionalCType,
22
+ optionalIntArrayRefT,
23
+ optionalSymIntArrayRefT,
24
+ scalarT,
25
+ SpecialArgName,
26
+ symIntArrayRefT,
27
+ SymIntT,
28
+ tensorListT,
29
+ tensorOptionsT,
30
+ tensorT,
31
+ TupleCType,
32
+ VectorCType,
33
+ voidT,
34
+ )
35
+ from torchgen.model import (
36
+ Argument,
37
+ Arguments,
38
+ BaseTy,
39
+ BaseType,
40
+ FunctionSchema,
41
+ ListType,
42
+ NativeFunction,
43
+ OptionalType,
44
+ Return,
45
+ SelfArgument,
46
+ TensorOptionsArguments,
47
+ Type,
48
+ )
49
+ from torchgen.utils import assert_never
50
+
51
+ # This file describes the translation of JIT schema to the public C++
52
+ # API, which is what people use when they call functions like at::add.
53
+ #
54
+ # Prominent characteristics of the C++ API:
55
+ #
56
+ # - dtype, layout, device and pin_memory are collected into
57
+ # a single C++ type TensorOptions (the native functions API
58
+ # also has this, but tensor options is really most relevant
59
+ # for the C++ API; it makes calling kwarg factory functions
60
+ # pleasant)
61
+ #
62
+ # - defaulting lives here (in fact, the dispatcher is completely
63
+ # oblivious of defaults!)
64
+ #
65
+ # BTW: policy on name collisions: we try not to have types with
66
+ # collisions, but functions are fair game to collide
67
+
68
+
69
+ def name(
70
+ func: FunctionSchema,
71
+ *,
72
+ faithful_name_for_out_overloads: bool = False,
73
+ symint_overload: bool = False,
74
+ ) -> str:
75
+ name = str(func.name.name)
76
+ if symint_overload:
77
+ name += "_symint"
78
+ if func.is_out_fn():
79
+ if faithful_name_for_out_overloads:
80
+ name += "_outf"
81
+ else:
82
+ name += "_out"
83
+
84
+ return name
85
+
86
+
87
+ # Translation of "value types" in JIT schema to C++ API type. Value
88
+ # types look the same no matter if they are argument types or return
89
+ # types. Returns None if the type in question is not a value type.
90
+ def valuetype_type(
91
+ t: Type,
92
+ *,
93
+ binds: ArgName,
94
+ remove_non_owning_ref_types: bool = False,
95
+ symint: bool = False,
96
+ ) -> Optional[NamedCType]:
97
+ if isinstance(t, BaseType):
98
+ if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
99
+ return None
100
+ elif str(t) == "SymInt":
101
+ if symint:
102
+ return NamedCType(binds, BaseCType(SymIntT))
103
+ else:
104
+ return NamedCType(binds, BaseCType(longT))
105
+ if remove_non_owning_ref_types:
106
+ if t.name == BaseTy.str:
107
+ raise AssertionError(
108
+ "string ref->value conversion: not implemented yet"
109
+ )
110
+ # All other BaseType currently map directly to BaseCppTypes.
111
+ return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
112
+ elif isinstance(t, OptionalType):
113
+ elem = valuetype_type(t.elem, binds=binds, symint=symint)
114
+ if elem is None:
115
+ return None
116
+ return NamedCType(binds, OptionalCType(elem.type))
117
+ elif isinstance(t, ListType):
118
+ if str(t.elem) == "bool":
119
+ assert t.size is not None
120
+ return NamedCType(binds, ArrayCType(BaseCType(boolT), t.size))
121
+ else:
122
+ return None
123
+ else:
124
+ raise AssertionError(f"unrecognized type {repr(t)}")
125
+
126
+
127
+ # Translation of types occuring in JIT arguments to a C++ argument type.
128
+ # If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
129
+ # For example, we'll return std::vector<int> instead of IntArrayRef.
130
+ # See Note [translation from C++ reference to value types]
131
+ def argumenttype_type(
132
+ t: Type,
133
+ *,
134
+ mutable: bool,
135
+ binds: ArgName,
136
+ remove_non_owning_ref_types: bool = False,
137
+ symint: bool = False,
138
+ ) -> NamedCType:
139
+ # If it's a value type, do the value type translation
140
+ r = valuetype_type(
141
+ t,
142
+ binds=binds,
143
+ symint=symint,
144
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
145
+ )
146
+ if r is not None:
147
+ return r
148
+
149
+ if isinstance(t, BaseType):
150
+ if t.name == BaseTy.Tensor:
151
+ if mutable and not local.use_const_ref_for_mutable_tensors():
152
+ return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
153
+ else:
154
+ return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
155
+ elif t.name == BaseTy.Scalar:
156
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
157
+ else:
158
+ raise AssertionError(f"base type should have been value type {t}")
159
+ elif isinstance(t, OptionalType):
160
+ if str(t.elem) == "Tensor":
161
+ if mutable and not local.use_const_ref_for_mutable_tensors():
162
+ return NamedCType(
163
+ binds, MutRefCType(BaseCType(tensorT))
164
+ ) # TODO: fix this discrepancy
165
+ else:
166
+ return NamedCType(
167
+ binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
168
+ )
169
+ elif str(t.elem) == "Scalar":
170
+ return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
171
+ elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
172
+ return NamedCType(binds, BaseCType(optionalIntArrayRefT))
173
+ elif isinstance(t.elem, ListType) and str(t.elem.elem) == "SymInt":
174
+ if symint:
175
+ return NamedCType(binds, BaseCType(optionalSymIntArrayRefT))
176
+ else:
177
+ return NamedCType(binds, BaseCType(optionalIntArrayRefT))
178
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
179
+ return NamedCType(binds, OptionalCType(elem.type))
180
+ elif isinstance(t, ListType):
181
+ # TODO: remove these special cases, ArrayRef fallthrough works fine
182
+ if str(t.elem) == "int":
183
+ if remove_non_owning_ref_types:
184
+ return NamedCType(binds, VectorCType(BaseCType(longT)))
185
+ else:
186
+ return NamedCType(binds, BaseCType(intArrayRefT))
187
+ if str(t.elem) == "SymInt":
188
+ if remove_non_owning_ref_types:
189
+ if symint:
190
+ return NamedCType(binds, VectorCType(BaseCType(SymIntT)))
191
+ else:
192
+ return NamedCType(binds, VectorCType(BaseCType(longT)))
193
+ else:
194
+ if symint:
195
+ return NamedCType(binds, BaseCType(symIntArrayRefT))
196
+ else:
197
+ return NamedCType(binds, BaseCType(intArrayRefT))
198
+ if str(t.elem) == "Tensor":
199
+ if local.use_ilistref_for_tensor_lists():
200
+ return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT)))
201
+ else:
202
+ return NamedCType(binds, BaseCType(tensorListT))
203
+ elif str(t.elem) == "Scalar":
204
+ return NamedCType(binds, ArrayRefCType(BaseCType(scalarT)))
205
+ elif str(t.elem) == "Dimname":
206
+ return NamedCType(binds, BaseCType(dimnameListT))
207
+ elif str(t.elem) == "Tensor?":
208
+ return NamedCType(
209
+ binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
210
+ )
211
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
212
+ return NamedCType(binds, ArrayRefCType(elem.type))
213
+ else:
214
+ raise AssertionError(f"unrecognized type {repr(t)}")
215
+
216
+
217
+ # Translate a JIT argument into its C++ type
218
+ def argument_type(a: Argument, *, binds: ArgName, symint: bool = False) -> NamedCType:
219
+ return argumenttype_type(a.type, mutable=a.is_write, symint=symint, binds=binds)
220
+
221
+
222
+ # Translation of a (non-multi) return type from JIT to C++
223
+ # N.B: returntype_type returns a CType, not a NamedCType.
224
+ # This is mostly because of the mismatch between return types and return names.
225
+ # e.g. a function with a return type of 'void' has 0 return names,
226
+ # and a function with a return type of 'std::tuple' has >1 return name.
227
+ def returntype_type(t: Type, *, mutable: bool, symint: bool = False) -> CType:
228
+ # placeholder is ignored
229
+ r = valuetype_type(t, binds="__placeholder__", symint=symint)
230
+ if r is not None:
231
+ return r.type
232
+
233
+ if isinstance(t, BaseType):
234
+ if t.name == BaseTy.Tensor:
235
+ if mutable:
236
+ if local.use_const_ref_for_mutable_tensors():
237
+ return ConstRefCType(BaseCType(tensorT))
238
+ else:
239
+ return MutRefCType(BaseCType(tensorT))
240
+ else:
241
+ # Note [Tensor Copy Returns]
242
+ # Currently, we use "Argument.is_write" to determine
243
+ # whether or not Tensor return types should be copies or references.
244
+ # If that ever changes, take a look at other locations of this note!
245
+ return BaseCType(tensorT)
246
+ elif t.name == BaseTy.Scalar:
247
+ return BaseCType(scalarT)
248
+ elif isinstance(t, ListType):
249
+ assert (
250
+ not mutable
251
+ ), "Native functions should never return a mutable tensor list. They should return void."
252
+ elem = returntype_type(t.elem, mutable=False, symint=symint)
253
+ assert t.size is None, f"fixed size list returns not supported: {t}"
254
+ return VectorCType(elem)
255
+
256
+ raise AssertionError(f"unrecognized return type {t}")
257
+
258
+
259
+ # Translation of a single return to its C++ type
260
+ def return_type(r: Return, *, symint: bool = False) -> CType:
261
+ return returntype_type(r.type, mutable=r.is_write, symint=symint)
262
+
263
+
264
+ # Translation of a full (possibly multi) return from JIT to its C++ type
265
+ def returns_type(rs: Sequence[Return], *, symint: bool = False) -> CType:
266
+ if len(rs) == 0:
267
+ return BaseCType(voidT)
268
+ elif len(rs) == 1:
269
+ return return_type(rs[0], symint=symint)
270
+ else:
271
+ return TupleCType([return_type(r, symint=symint) for r in rs])
272
+
273
+
274
+ def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
275
+ returns: List[str] = []
276
+ for i, r in enumerate(f.func.returns):
277
+ # If we have an inplace function, the return argument is
278
+ # implicitly named self.
279
+ # TODO: Consider incorporating this into the data model
280
+ if f.func.name.name.inplace:
281
+ assert i == 0, "illegal inplace function with multiple returns"
282
+ name = "self"
283
+ # If we are out function, the name is the name of the
284
+ # corresponding output function (r.name will get recorded
285
+ # in field_name later.)
286
+ elif f.func.is_out_fn():
287
+ name = f.func.arguments.out[i].name
288
+ # If the return argument is explicitly named...
289
+ elif r.name:
290
+ name_conflict = any(
291
+ r.name == a.name for a in f.func.schema_order_arguments()
292
+ )
293
+ if name_conflict and not f.func.is_out_fn():
294
+ name = f"{r.name}_return"
295
+ else:
296
+ name = r.name
297
+ # If there is no explicit name and no fallback name was passed in, we just name the output result,
298
+ # unless it's a multi-return, in which case it's result0,
299
+ # result1, etc (zero-indexed)
300
+ else:
301
+ name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
302
+ returns.append(name)
303
+ return returns
304
+
305
+
306
+ JIT_TO_CPP_DEFAULT = {
307
+ "False": "false",
308
+ "True": "true",
309
+ "None": "c10::nullopt", # UGH this one is type directed
310
+ "Mean": "at::Reduction::Mean",
311
+ "[]": "{}",
312
+ "contiguous_format": "MemoryFormat::Contiguous",
313
+ "long": "at::kLong",
314
+ }
315
+
316
+ # Convert a JIT default into C++ expression representing the default
317
+ def default_expr(d: str, t: Type, *, symint: bool) -> str:
318
+ if d == "None" and str(t) == "Tensor?":
319
+ return "{}"
320
+ if isinstance(t, BaseType) and t.name is BaseTy.str:
321
+ # Schema allows single quotes but C++ needs double
322
+ if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
323
+ s = ""
324
+ i = 1
325
+ while i + 1 < len(d):
326
+ if d[i] != "\\":
327
+ if d[i] == '"':
328
+ s += '\\"'
329
+ else:
330
+ s += d[i]
331
+ i += 1
332
+ else:
333
+ if d[i + 1] == "'":
334
+ s += "'"
335
+ else:
336
+ s += d[i : i + 2]
337
+ i += 2
338
+
339
+ return f'"{s}"'
340
+
341
+ if isinstance(t, OptionalType):
342
+ if d == "None":
343
+ return "c10::nullopt"
344
+
345
+ return default_expr(d, t.elem, symint=symint)
346
+
347
+ if isinstance(t, ListType):
348
+ if d.startswith("[") and d.endswith("]"):
349
+ return "{" + d[1:-1] + "}"
350
+ elif symint and d.isdigit() and str(t.elem) == "SymInt":
351
+ return f"c10::SymInt({d})"
352
+ elif t.size is None:
353
+ # NOTE: Sized lists can have scalar defaults
354
+ raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
355
+
356
+ return JIT_TO_CPP_DEFAULT.get(d, d)
357
+
358
+
359
+ # Convert an argument into its C++ API form
360
+
361
+
362
+ def argument(
363
+ a: Union[Argument, TensorOptionsArguments, SelfArgument],
364
+ *,
365
+ cpp_no_default_args: Set[str],
366
+ method: bool,
367
+ faithful: bool,
368
+ symint: bool = False,
369
+ has_tensor_options: bool,
370
+ ) -> List[Binding]:
371
+ def sub_argument(
372
+ a: Union[Argument, TensorOptionsArguments, SelfArgument]
373
+ ) -> List[Binding]:
374
+ return argument(
375
+ a,
376
+ cpp_no_default_args=cpp_no_default_args,
377
+ method=method,
378
+ faithful=faithful,
379
+ symint=symint,
380
+ has_tensor_options=has_tensor_options,
381
+ )
382
+
383
+ if isinstance(a, Argument):
384
+ binds: ArgName
385
+ if a.name == "memory_format" and has_tensor_options:
386
+ binds = SpecialArgName.possibly_redundant_memory_format
387
+ else:
388
+ binds = a.name
389
+ default: Optional[str] = None
390
+ if a.name not in cpp_no_default_args and a.default is not None:
391
+ default = default_expr(a.default, a.type, symint=symint)
392
+ return [
393
+ Binding(
394
+ nctype=argument_type(a, binds=binds, symint=symint),
395
+ name=a.name,
396
+ default=default,
397
+ argument=a,
398
+ )
399
+ ]
400
+ elif isinstance(a, TensorOptionsArguments):
401
+ if faithful:
402
+ return (
403
+ sub_argument(a.dtype)
404
+ + sub_argument(a.layout)
405
+ + sub_argument(a.device)
406
+ + sub_argument(a.pin_memory)
407
+ )
408
+ else:
409
+ default = None
410
+ # Enforced by NativeFunction.__post_init__
411
+ assert "options" not in cpp_no_default_args
412
+ if all(x.default == "None" for x in a.all()):
413
+ default = "{}"
414
+ elif a.dtype.default == "long":
415
+ default = "at::kLong" # TODO: this is wrong
416
+ return [
417
+ Binding(
418
+ nctype=NamedCType("options", BaseCType(tensorOptionsT)),
419
+ name="options",
420
+ default=default,
421
+ argument=a,
422
+ )
423
+ ]
424
+ elif isinstance(a, SelfArgument):
425
+ if method:
426
+ # Caller is responsible for installing implicit this in context!
427
+ return []
428
+ else:
429
+ return sub_argument(a.argument)
430
+ else:
431
+ assert_never(a)
432
+
433
+
434
+ def arguments(
435
+ arguments: Arguments,
436
+ *,
437
+ faithful: bool,
438
+ symint: bool = False,
439
+ method: bool,
440
+ cpp_no_default_args: Set[str],
441
+ ) -> List[Binding]:
442
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
443
+ if faithful:
444
+ args.extend(arguments.non_out)
445
+ args.extend(arguments.out)
446
+ else:
447
+ args.extend(arguments.out)
448
+ args.extend(arguments.non_out)
449
+ return [
450
+ r.no_default() if faithful else r
451
+ for a in args
452
+ for r in argument(
453
+ a,
454
+ faithful=faithful,
455
+ symint=symint,
456
+ method=method,
457
+ has_tensor_options=arguments.tensor_options is not None,
458
+ cpp_no_default_args=cpp_no_default_args,
459
+ )
460
+ ]
wemm/lib/python3.10/site-packages/torchgen/api/dispatcher.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from typing import List, Sequence, Union
3
+
4
+ from torchgen.api import cpp
5
+
6
+ from torchgen.api.types import ArgName, Binding, CType, NamedCType
7
+ from torchgen.model import (
8
+ Argument,
9
+ FunctionSchema,
10
+ Return,
11
+ SelfArgument,
12
+ TensorOptionsArguments,
13
+ Type,
14
+ )
15
+ from torchgen.utils import assert_never, concatMap
16
+
17
+ # This file describes the translation of JIT schema to the dispatcher
18
+ # API, the *unboxed* calling convention by which invocations through
19
+ # the dispatcher are made. Historically, the dispatcher API matched
20
+ # the C++ API, but with the establishment of the boxed API, we've
21
+ # made changes to the dispatcher API to so that the unboxed API
22
+ # better aligns with the boxed API. The dispatcher API hooks heavily
23
+ # into our template based boxing/unboxing machinery, so changes
24
+ # to this convention will usually need template updates too.
25
+ #
26
+ # Prominent characteristics of the dispatcher API:
27
+ #
28
+ # - dtype, layout, device and pin_memory are represented as separate
29
+ # arguments.
30
+ #
31
+
32
+
33
+ def name(func: FunctionSchema) -> str:
34
+ return cpp.name(func)
35
+
36
+
37
+ def argumenttype_type(
38
+ t: Type,
39
+ *,
40
+ mutable: bool,
41
+ binds: ArgName,
42
+ remove_non_owning_ref_types: bool = False,
43
+ symint: bool = True,
44
+ ) -> NamedCType:
45
+ # This is a faux amis. If it makes sense in the future to add
46
+ # more special cases here, or invert things so cpp.argument_type
47
+ # calls this, or just completely inline the function, please do
48
+ # it.
49
+ return cpp.argumenttype_type(
50
+ t,
51
+ mutable=mutable,
52
+ binds=binds,
53
+ symint=symint,
54
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
55
+ )
56
+
57
+
58
+ def argument_type(
59
+ a: Argument,
60
+ *,
61
+ binds: ArgName,
62
+ remove_non_owning_ref_types: bool = False,
63
+ symint: bool = True,
64
+ ) -> NamedCType:
65
+ return argumenttype_type(
66
+ a.type,
67
+ mutable=a.is_write,
68
+ binds=binds,
69
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
70
+ symint=symint,
71
+ )
72
+
73
+
74
+ def returns_type(rs: Sequence[Return], *, symint: bool = True) -> CType:
75
+ # At present, there is no difference. But there could be!
76
+ return cpp.returns_type(rs, symint=symint)
77
+
78
+
79
+ def jit_arguments(func: FunctionSchema) -> List[Argument]:
80
+ def to_argument(
81
+ a: Union[Argument, TensorOptionsArguments, SelfArgument]
82
+ ) -> List[Argument]:
83
+ if isinstance(a, Argument):
84
+ return [a]
85
+ elif isinstance(a, SelfArgument):
86
+ return [a.argument]
87
+ elif isinstance(a, TensorOptionsArguments):
88
+ return [a.dtype, a.layout, a.device, a.pin_memory]
89
+ else:
90
+ assert_never(a)
91
+
92
+ return list(
93
+ concatMap(
94
+ to_argument,
95
+ itertools.chain(
96
+ func.arguments.positional, func.arguments.kwarg_only, func.arguments.out
97
+ ),
98
+ )
99
+ )
100
+
101
+
102
+ def argument(
103
+ a: Argument, *, remove_non_owning_ref_types: bool = False, symint: bool = True
104
+ ) -> Binding:
105
+ return Binding(
106
+ nctype=argument_type(
107
+ a,
108
+ binds=a.name,
109
+ remove_non_owning_ref_types=remove_non_owning_ref_types,
110
+ symint=symint,
111
+ ),
112
+ name=a.name,
113
+ argument=a,
114
+ )
115
+
116
+
117
+ def arguments(func: FunctionSchema, *, symint: bool = True) -> List[Binding]:
118
+ return [argument(a, symint=symint) for a in jit_arguments(func)]
wemm/lib/python3.10/site-packages/torchgen/api/native.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Sequence, Union
2
+
3
+ from torchgen import local
4
+ from torchgen.api import cpp
5
+
6
+ from torchgen.api.types import (
7
+ ArgName,
8
+ BaseCType,
9
+ Binding,
10
+ boolT,
11
+ ConstRefCType,
12
+ CType,
13
+ deviceT,
14
+ layoutT,
15
+ ListCType,
16
+ MutRefCType,
17
+ NamedCType,
18
+ OptionalCType,
19
+ scalarT,
20
+ scalarTypeT,
21
+ tensorT,
22
+ )
23
+ from torchgen.model import (
24
+ Argument,
25
+ FunctionSchema,
26
+ Return,
27
+ SelfArgument,
28
+ TensorOptionsArguments,
29
+ Type,
30
+ )
31
+ from torchgen.utils import assert_never
32
+
33
+ # This file describes the translation of JIT schema to the native functions API.
34
+ # This looks a lot like the C++ API (which makes historical sense, because the
35
+ # idea was you wrote native functions to implement functions in the C++ API),
36
+ # but over time we have evolved the C++ API without actually changing our
37
+ # native:: kernels. The intention is to make native API and dispatcher API
38
+ # line up as closely as possible, since this results in the least overhead
39
+ # (no translation is needed from dispatcher API to native API).
40
+ #
41
+ # NB: this is symint aware, you will get the non-SymInt variant for some
42
+ # dispatch entries and SymInt for others.
43
+
44
+
45
+ def name(func: FunctionSchema) -> str:
46
+ name = str(func.name.name)
47
+ # TODO: delete this!
48
+ if func.is_out_fn():
49
+ name += "_out"
50
+ if func.name.overload_name:
51
+ name += f"_{func.name.overload_name}"
52
+ return name
53
+
54
+
55
+ def argumenttype_type(
56
+ t: Type, *, mutable: bool, binds: ArgName, symint: bool
57
+ ) -> NamedCType:
58
+ if str(t) == "Tensor?":
59
+ tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
60
+ if mutable and not local.use_const_ref_for_mutable_tensors():
61
+ return NamedCType(binds, MutRefCType(tensor_type))
62
+ else:
63
+ return NamedCType(binds, ConstRefCType(tensor_type))
64
+ elif str(t) == "Tensor?[]":
65
+ return NamedCType(
66
+ binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
67
+ )
68
+ elif str(t) == "Scalar":
69
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
70
+ elif str(t) == "Scalar?":
71
+ return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
72
+ return cpp.argumenttype_type(t, mutable=mutable, binds=binds, symint=symint)
73
+
74
+
75
+ def returns_type(rs: Sequence[Return], *, symint: bool) -> CType:
76
+ return cpp.returns_type(rs, symint=symint)
77
+
78
+
79
+ def argument_type(a: Argument, *, binds: ArgName, symint: bool) -> NamedCType:
80
+ return argumenttype_type(a.type, mutable=a.is_write, binds=binds, symint=symint)
81
+
82
+
83
+ def argument(
84
+ a: Union[Argument, SelfArgument, TensorOptionsArguments],
85
+ *,
86
+ is_out: bool,
87
+ symint: bool,
88
+ ) -> List[Binding]:
89
+ # Ideally, we NEVER default native functions. However, there are a number
90
+ # of functions that call native:: directly and rely on the defaulting
91
+ # existing. So for BC, we generate defaults for non-out variants (but not
92
+ # for out variants, where it is impossible to generate an appropriate
93
+ # default)
94
+ should_default = not is_out
95
+ if isinstance(a, Argument):
96
+ default: Optional[str] = None
97
+ if should_default and a.default is not None:
98
+ default = cpp.default_expr(a.default, a.type, symint=symint)
99
+ return [
100
+ Binding(
101
+ nctype=argument_type(a, binds=a.name, symint=symint),
102
+ name=a.name,
103
+ default=default,
104
+ argument=a,
105
+ )
106
+ ]
107
+ elif isinstance(a, SelfArgument):
108
+ # Erase SelfArgument from the distinction
109
+ return argument(a.argument, is_out=is_out, symint=symint)
110
+ elif isinstance(a, TensorOptionsArguments):
111
+ default = None
112
+ if should_default:
113
+ default = "{}"
114
+ # TODO: Not sure why the arguments assigned here are for
115
+ # TensorOptionsArguments and not the constituent pieces. It seems
116
+ # to matter
117
+ return [
118
+ Binding(
119
+ nctype=NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))),
120
+ name="dtype",
121
+ default=default,
122
+ argument=a,
123
+ ),
124
+ Binding(
125
+ nctype=NamedCType("layout", OptionalCType(BaseCType(layoutT))),
126
+ name="layout",
127
+ default=default,
128
+ argument=a,
129
+ ),
130
+ Binding(
131
+ nctype=NamedCType("device", OptionalCType(BaseCType(deviceT))),
132
+ name="device",
133
+ default=default,
134
+ argument=a,
135
+ ),
136
+ Binding(
137
+ nctype=NamedCType("pin_memory", OptionalCType(BaseCType(boolT))),
138
+ name="pin_memory",
139
+ default=default,
140
+ argument=a,
141
+ ),
142
+ ]
143
+ else:
144
+ assert_never(a)
145
+
146
+
147
+ def arguments(func: FunctionSchema, *, symint: bool) -> List[Binding]:
148
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
149
+ args.extend(func.arguments.non_out)
150
+ args.extend(func.arguments.out)
151
+ return [
152
+ r for arg in args for r in argument(arg, symint=symint, is_out=func.is_out_fn())
153
+ ]
wemm/lib/python3.10/site-packages/torchgen/api/structured.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union
2
+
3
+ from torchgen.api import cpp
4
+
5
+ from torchgen.api.types import (
6
+ ArgName,
7
+ ArrayRefCType,
8
+ BaseCType,
9
+ Binding,
10
+ ConstRefCType,
11
+ dimnameListT,
12
+ intArrayRefT,
13
+ iOptTensorListRefT,
14
+ iTensorListRefT,
15
+ NamedCType,
16
+ OptionalCType,
17
+ optionalIntArrayRefT,
18
+ optionalScalarRefT,
19
+ optionalTensorRefT,
20
+ scalarT,
21
+ tensorT,
22
+ )
23
+ from torchgen.model import (
24
+ Argument,
25
+ BaseTy,
26
+ BaseType,
27
+ ListType,
28
+ NativeFunctionsGroup,
29
+ OptionalType,
30
+ SelfArgument,
31
+ TensorOptionsArguments,
32
+ Type,
33
+ )
34
+ from torchgen.utils import assert_never
35
+
36
+ # This file describes the translation of JIT schema to the structured functions API.
37
+ # This is similar to native API, but a number of historical problems with native
38
+ # API have been fixed.
39
+
40
+ # Translation of types occuring in JIT arguments to a C++ argument type.
41
+ # NB: For now, mutable doesn't do anything; but it could if we make
42
+ # some more nominal types
43
+ def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
44
+ # If it's a value type, do the value type translation
45
+ # NB: structured kernels ALWAYS have symint off, since they involve actual
46
+ # kernels that require real ints. The one exception is the
47
+ # CompositeExplicitAutograd and the meta function (which could
48
+ # hypothetically be SymInt), but for simplicity we plan for these to just
49
+ # be handled in Python
50
+ r = cpp.valuetype_type(t, symint=False, binds=binds)
51
+ if r is not None:
52
+ return r
53
+
54
+ if isinstance(t, BaseType):
55
+ if t.name == BaseTy.Tensor:
56
+ return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
57
+ elif t.name == BaseTy.Scalar:
58
+ return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
59
+ else:
60
+ raise AssertionError(f"base type should have been value type {t}")
61
+ elif isinstance(t, OptionalType):
62
+ if t.elem == BaseType(BaseTy.Tensor):
63
+ return NamedCType(binds, BaseCType(optionalTensorRefT))
64
+ elif t.elem == BaseType(BaseTy.Scalar):
65
+ return NamedCType(binds, BaseCType(optionalScalarRefT))
66
+ elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
67
+ return NamedCType(binds, BaseCType(optionalIntArrayRefT))
68
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
69
+ return NamedCType(binds, OptionalCType(elem.type))
70
+ elif isinstance(t, ListType):
71
+ if t.elem == BaseType(BaseTy.Tensor):
72
+ return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT)))
73
+ elif t.elem == OptionalType(BaseType(BaseTy.Tensor)):
74
+ return NamedCType(binds, BaseCType(iOptTensorListRefT))
75
+ # TODO: delete these special cases; see torchgen.api.cpp--these
76
+ # must be changed in tandem, but there are problems; see
77
+ # https://github.com/pytorch/pytorch/pull/51485
78
+ elif str(t.elem) == "int":
79
+ return NamedCType(binds, BaseCType(intArrayRefT))
80
+ elif str(t.elem) == "Dimname":
81
+ return NamedCType(binds, BaseCType(dimnameListT))
82
+ elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
83
+ return NamedCType(binds, ArrayRefCType(elem.type))
84
+ else:
85
+ raise AssertionError(f"unrecognized type {repr(t)}")
86
+
87
+
88
+ def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
89
+ return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
90
+
91
+
92
+ # returns_type intentionally omitted, because structured kernels never "return";
93
+ # instead, they always indirectly report their outputs (in the case of a meta
94
+ # function, by calling set_output; in the case of an impl function, by writing
95
+ # directly into the provided out argument).
96
+
97
+ # Structured kernels are never defaulted
98
+ def argument(a: Union[Argument, SelfArgument, TensorOptionsArguments]) -> List[Binding]:
99
+ if isinstance(a, Argument):
100
+ return [
101
+ Binding(
102
+ nctype=argument_type(a, binds=a.name),
103
+ name=a.name,
104
+ default=None,
105
+ argument=a,
106
+ )
107
+ ]
108
+ elif isinstance(a, SelfArgument):
109
+ return argument(a.argument)
110
+ elif isinstance(a, TensorOptionsArguments):
111
+ raise AssertionError("structured kernels don't support TensorOptions yet")
112
+ else:
113
+ assert_never(a)
114
+
115
+
116
+ def impl_arguments(g: NativeFunctionsGroup) -> List[Binding]:
117
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
118
+
119
+ if g.out.precomputed:
120
+ # A list of parameters for the impl function with
121
+ # certain parameters replaced with precomputed counterparts
122
+ # as specified in native_functions.yaml.
123
+ non_out_args_replaced: List[
124
+ Union[Argument, TensorOptionsArguments, SelfArgument]
125
+ ] = []
126
+ for a in g.out.func.arguments.non_out:
127
+ if isinstance(a, Argument) and a.name in g.out.precomputed.replace:
128
+ # If a is in precompute.replace, append the parameters
129
+ # that should replace it onto non_out_args_replaced.
130
+ for replacement in g.out.precomputed.replace[a.name]:
131
+ non_out_args_replaced.append(replacement)
132
+ else:
133
+ # If not, push a as it is.
134
+ non_out_args_replaced.append(a)
135
+
136
+ args.extend(non_out_args_replaced)
137
+ # g.out.precomputed.add is the list of parameters that are added
138
+ # without replacement after the non out args and just before the out args
139
+ args.extend(g.out.precomputed.add)
140
+ else:
141
+ args.extend(g.out.func.arguments.non_out)
142
+
143
+ args.extend(g.out.func.arguments.out)
144
+ return [r for arg in args for r in argument(arg)]
145
+
146
+
147
+ def meta_arguments(g: NativeFunctionsGroup) -> List[Binding]:
148
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
149
+ args.extend(g.functional.func.arguments.non_out)
150
+ return [r for arg in args for r in argument(arg)]
151
+
152
+
153
+ def out_arguments(g: NativeFunctionsGroup) -> List[Binding]:
154
+ args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
155
+ args.extend(g.out.func.arguments.out)
156
+ return [r for arg in args for r in argument(arg)]
wemm/lib/python3.10/site-packages/torchgen/api/translate.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, NoReturn, Sequence, Union
2
+
3
+ from torchgen.api.types import (
4
+ ArrayRefCType,
5
+ BaseCType,
6
+ Binding,
7
+ boolT,
8
+ ConstRefCType,
9
+ deviceT,
10
+ Expr,
11
+ intArrayRefT,
12
+ iOptTensorListRefT,
13
+ layoutT,
14
+ ListCType,
15
+ longT,
16
+ memoryFormatT,
17
+ MutRefCType,
18
+ NamedCType,
19
+ opmath_t,
20
+ OptionalCType,
21
+ optionalIntArrayRefT,
22
+ optionalScalarRefT,
23
+ optionalSymIntArrayRefT,
24
+ optionalTensorRefT,
25
+ scalar_t,
26
+ scalarT,
27
+ scalarTypeT,
28
+ SpecialArgName,
29
+ symIntArrayRefT,
30
+ SymIntT,
31
+ tensorOptionsT,
32
+ tensorT,
33
+ VectorCType,
34
+ )
35
+
36
+ # This file implements a small program synthesis engine that implements
37
+ # conversions between one API to another.
38
+ #
39
+ # The key data type in this file in NamedCType, short for Named C++ semantic type. A NamedCType
40
+ # represents a C++ type, plus semantic information about what it represents.
41
+ # For example, consider the argument "bool pin_memory"; its normal C++ type is
42
+ # "bool", but its C++ semantic type also keeps track that this represents a
43
+ # "pin_memory"; you can't just use a random other boolean in a context where you
44
+ # need a "pin_memory"!
45
+ #
46
+ # The translator takes a list of needed NamedCTypes, and then figures out how
47
+ # to construct expressions with these NamedCTypes from the given bindings. Many
48
+ # of these expressions are trivial (I need a Tensor other; there's a Tensor
49
+ # other scope); others are more nontrivial and may require packing/unpacking.
50
+ # Some examples of non-trivial action:
51
+ #
52
+ # - Need the "dtype" binding? Well, maybe "dtype" isn't available
53
+ # in the context, instead, "options" is, and you need to extract
54
+ # it from there. (Gather)
55
+ #
56
+ # - Need the "context" binding? Well, maybe "context" isn't available
57
+ # in the context, and you need to construct it from "dtype", "device",
58
+ # etc. (Scatter)
59
+ #
60
+ # - Need the "memory_format" binding? Well, actually, it's available
61
+ # from both "memory_format" and "options", so you had better make sure
62
+ # they are consistent. (Join)
63
+
64
+ options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT)))
65
+
66
+ out_tensor_ctype = NamedCType("out", ConstRefCType(BaseCType(tensorT)))
67
+
68
+ longVec_ctype = VectorCType(BaseCType(longT))
69
+ longSymVec_ctype = VectorCType(BaseCType(SymIntT))
70
+ optionalLongVec_ctype = OptionalCType(VectorCType(BaseCType(longT)))
71
+ optionalScalar_ctype = OptionalCType(BaseCType(scalarT))
72
+ optionalTensor_ctype = OptionalCType(BaseCType(tensorT))
73
+
74
+
75
+ class UnsatError(RuntimeError):
76
+ pass
77
+
78
+
79
+ # Given a set of in-scope bindings and a set of target bindings, synthesize
80
+ # a list of expressions that uses only the in-scope bindings (bindings) that
81
+ # have all of the types of goals. You may want to use this function if
82
+ # you're generating code for a function like:
83
+ #
84
+ # void f({args}) {
85
+ # g({exprs}); // g is a different API
86
+ # }
87
+ #
88
+ # and you need to generate "exprs".
89
+ #
90
+ # Typically, a list of Bindings is convenient to get (you usually call something
91
+ # like arguments() to get them); but technically you only need less information:
92
+ # for 'bindings' an (un-ordered) list of Exprs is sufficient; similarly, for
93
+ # 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
94
+ # something more complicated, e.g., tracking the set of bindings in a context,
95
+ # you may find using these smaller types more convenient.
96
+ def translate(
97
+ bindings: Sequence[Union[Expr, Binding]],
98
+ goals: Sequence[Union[NamedCType, Binding]],
99
+ *,
100
+ method: bool = False,
101
+ allow_expensive_conversions: bool = False,
102
+ ) -> List[Expr]:
103
+
104
+ binding_exprs: List[Expr] = []
105
+ for b in bindings:
106
+ if isinstance(b, Binding):
107
+ binding_exprs.append(
108
+ Expr(
109
+ expr=b.name,
110
+ type=b.nctype,
111
+ )
112
+ )
113
+ else:
114
+ binding_exprs.append(b)
115
+
116
+ goal_ctypes: List[NamedCType] = []
117
+ for g in goals:
118
+ if isinstance(g, Binding):
119
+ goal_ctypes.append(g.nctype)
120
+ else:
121
+ goal_ctypes.append(g)
122
+
123
+ # Add all the bindings to the context
124
+ ctx: Dict[NamedCType, str] = {}
125
+ for b in binding_exprs:
126
+ ctx[b.type] = b.expr
127
+
128
+ # While we're at it, do some simple forward inference, looking through
129
+ # constructors.
130
+ #
131
+ # NB: When should you do forward inference versus backward inference?
132
+ # The general idea:
133
+ #
134
+ # - Backward inference WHEN the goal gets smaller
135
+ # - Forward inference WHEN the hypothesis gets smaller
136
+ #
137
+ # This helps ensure termination: backward inference starts with a goal
138
+ # and tries to make it simpler and simpler until it's trivial; if the
139
+ # goal can grow in size, we blow up to a really huge goal size.
140
+ # Similarly, with forward inference we take hypotheses and decompose
141
+ # them into simpler hypotheses; if hypotheses could expand in size,
142
+ # we also have potential nontermination. (In the code below, forward
143
+ # inference is only ever carried out at a single step, but you could
144
+ # imagine repeated application of forward inference being profitable.)
145
+ #
146
+ # A good starting point in the literature for exploring more about proof
147
+ # search are these lecture notes
148
+ # https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf
149
+ #
150
+ # TODO: My kingdom for a pattern matcher
151
+ # https://www.python.org/dev/peps/pep-0634/
152
+ #
153
+ # TODO: This could get us in recomputation trouble if b.expr is nontrivial.
154
+ # Fix this by implementing some sort of sharing so that if multiple
155
+ # goals share the same expression, we only compute it once. This seems
156
+ # to matter in practice as compiler is often unwilling to CSE nontrivial
157
+ # expressions like scalar.to<scalar_t>()
158
+ t = b.type
159
+ if (
160
+ isinstance(t, ConstRefCType)
161
+ and isinstance(t.elem, OptionalCType)
162
+ and isinstance(t.elem.elem, BaseCType)
163
+ and str(t.elem.elem.type) == "at::Tensor"
164
+ ):
165
+ ctx[
166
+ NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))
167
+ ] = f"({b.expr}.has_value() ? *{b.expr} : at::Tensor())"
168
+
169
+ if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))):
170
+ ctx[
171
+ NamedCType(t.name, BaseCType(optionalTensorRefT))
172
+ ] = f"(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())"
173
+
174
+ if t.type == ConstRefCType(BaseCType(scalarT)):
175
+ ctx[NamedCType(t.name, BaseCType(opmath_t))] = f"({b.expr}).to<opmath_t>()"
176
+
177
+ if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))):
178
+ ctx[
179
+ NamedCType(t.name, BaseCType(optionalScalarRefT))
180
+ ] = f"({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())"
181
+
182
+ if t.type == BaseCType(scalar_t):
183
+ ctx[
184
+ NamedCType(t.name, BaseCType(opmath_t))
185
+ ] = f"static_cast<opmath_t>({b.expr})"
186
+
187
+ # [Note: IOptTensorListRef]
188
+ if t.type == ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))):
189
+ ctx[
190
+ NamedCType(t.name, BaseCType(iOptTensorListRefT))
191
+ ] = f"at::IOptTensorListRef({b.expr})"
192
+
193
+ # Add implicit bindings if the generated code is inside a Tensor method
194
+ if method:
195
+ ctx[
196
+ NamedCType("self", MutRefCType(BaseCType(tensorT)))
197
+ ] = "const_cast<Tensor&>(*this)"
198
+ ctx[
199
+ NamedCType("self", ConstRefCType(BaseCType(tensorT)))
200
+ ] = "const_cast<Tensor&>(*this)"
201
+ # This is better! Byte-for-byte compat
202
+ # ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this"
203
+
204
+ def unsat(goal: NamedCType) -> NoReturn:
205
+ ctx_desc = "\n".join(
206
+ f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()
207
+ )
208
+ raise UnsatError(
209
+ f"""
210
+ Failed to synthesize the expression "{goal.cpp_type()} {goal.name}".
211
+ When I failed, the following bindings were available in the context:
212
+
213
+ {ctx_desc}
214
+
215
+ This probably means there is a missing rule in the rules of torchgen.api.translate.
216
+ Check this module for more information.
217
+ """
218
+ )
219
+
220
+ # A shitty backtracking search implementation. It's shitty because it
221
+ # does backtracking via stack (bad idea!) and for the most part tries to
222
+ # avoid backtracking. In particular, if
223
+ # direct=True, we won't try to do any fancy synthesis, just trivial
224
+ # conversions (e.g., "T a" is OK for "const T& a"). So all of the
225
+ # existing rules in this function simply try to solve immediately,
226
+ # and bail if things don't work out.
227
+ def solve(goal: NamedCType, *, direct: bool) -> str:
228
+ def direct_solve(goal: NamedCType) -> str:
229
+ return solve(goal, direct=True)
230
+
231
+ if goal in ctx:
232
+ # Trivial
233
+ return ctx[goal]
234
+
235
+ # const & is satisfied with mutable &
236
+ if isinstance(goal.type, ConstRefCType):
237
+ try:
238
+ # WARNING: not strictly decreasing; be careful not
239
+ # to add a direct conversion that goes satisfies
240
+ # mutable& with const&
241
+ return solve(
242
+ NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct
243
+ )
244
+ except UnsatError:
245
+ pass
246
+
247
+ # mutable & is satisfied with value
248
+ if isinstance(goal.type, MutRefCType):
249
+ try:
250
+ return solve(NamedCType(goal.name, goal.type.elem), direct=direct)
251
+ except UnsatError:
252
+ pass
253
+
254
+ # TODO: These are referentially equal, shouldn't have to do this;
255
+ # ensuring we don't use type synonym IntArrayRef in codegen would
256
+ # help
257
+ if goal.type == ArrayRefCType(BaseCType(longT)):
258
+ return solve(NamedCType(goal.name, BaseCType(intArrayRefT)), direct=direct)
259
+
260
+ if direct:
261
+ unsat(goal)
262
+
263
+ # For now, all of these rules are mutually exclusive.
264
+ if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))):
265
+ memory_format = direct_solve(
266
+ NamedCType(
267
+ SpecialArgName.possibly_redundant_memory_format,
268
+ OptionalCType(BaseCType(memoryFormatT)),
269
+ )
270
+ )
271
+ # No need to join "memory_format" and "options" if the target API takes "options" directly.
272
+ # Otherwise it will cause the redundant memory_format error.
273
+ if options_ctype in goal_ctypes:
274
+ return memory_format
275
+ try:
276
+ options = direct_solve(options_ctype)
277
+ return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})"
278
+ except UnsatError:
279
+ return memory_format
280
+ elif goal == NamedCType("options", BaseCType(tensorOptionsT)):
281
+ dtype = direct_solve(
282
+ NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))
283
+ )
284
+ pin_memory = direct_solve(
285
+ NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))
286
+ )
287
+ device = direct_solve(
288
+ NamedCType("device", OptionalCType(BaseCType(deviceT)))
289
+ )
290
+ layout = direct_solve(
291
+ NamedCType("layout", OptionalCType(BaseCType(layoutT)))
292
+ )
293
+ return f"TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})"
294
+
295
+ elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))):
296
+ try:
297
+ options = direct_solve(options_ctype)
298
+ return f"optTypeMetaToScalarType({options}.dtype_opt())"
299
+ except UnsatError:
300
+ out_tensor = direct_solve(out_tensor_ctype)
301
+ return f"{out_tensor}.scalar_type()"
302
+
303
+ elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))):
304
+ try:
305
+ options = direct_solve(options_ctype)
306
+ return f"{options}.layout_opt()"
307
+ except UnsatError:
308
+ out_tensor = direct_solve(out_tensor_ctype)
309
+ return f"{out_tensor}.layout()"
310
+
311
+ elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))):
312
+ try:
313
+ options = direct_solve(options_ctype)
314
+ return f"{options}.device_opt()"
315
+ except UnsatError:
316
+ out_tensor = direct_solve(out_tensor_ctype)
317
+ return f"{out_tensor}.device()"
318
+
319
+ elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))):
320
+ try:
321
+ options = direct_solve(options_ctype)
322
+ return f"{options}.pinned_memory_opt()"
323
+ except UnsatError:
324
+ # If we're calling a factory op from its out= variant,
325
+ # We don't actually care about the value of pin_memory.
326
+ out_tensor = direct_solve(out_tensor_ctype)
327
+ return "c10::nullopt"
328
+
329
+ # We can always do translations from value types to reference types, like vector<int> -> IntArrayRef
330
+ elif goal.type == BaseCType(intArrayRefT):
331
+ try:
332
+ return direct_solve(NamedCType(goal.name, longVec_ctype))
333
+ except UnsatError:
334
+ # We can also go SymIntArrayRef -> IntArrayRef
335
+ symIntArrayRef_type = direct_solve(
336
+ NamedCType(goal.name, BaseCType(symIntArrayRefT))
337
+ )
338
+ return f"C10_AS_INTARRAYREF_SLOW({symIntArrayRef_type})"
339
+ elif goal.type == BaseCType(symIntArrayRefT):
340
+ try:
341
+ r = direct_solve(NamedCType(goal.name, BaseCType(intArrayRefT)))
342
+ return f"c10::fromIntArrayRefSlow({r})"
343
+ except UnsatError:
344
+ return direct_solve(NamedCType(goal.name, longSymVec_ctype))
345
+ elif goal.type == BaseCType(SymIntT):
346
+ return direct_solve(NamedCType(goal.name, BaseCType(longT)))
347
+ elif goal.type == OptionalCType(BaseCType(SymIntT)):
348
+ argname = direct_solve(
349
+ NamedCType(goal.name, OptionalCType(BaseCType(longT)))
350
+ )
351
+ return f"{argname}.has_value() ? c10::make_optional(c10::SymInt(*{argname})) : c10::nullopt"
352
+ elif goal.type == BaseCType(longT):
353
+ symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT)))
354
+ return f"{symInt_type}.expect_int()"
355
+ elif goal.type == OptionalCType(BaseCType(longT)):
356
+ argname = direct_solve(
357
+ NamedCType(goal.name, OptionalCType(BaseCType(SymIntT)))
358
+ )
359
+ return f"{argname}.has_value() ? c10::make_optional({argname}->expect_int()) : c10::nullopt"
360
+ elif goal.type == BaseCType(optionalIntArrayRefT):
361
+ try:
362
+ return direct_solve(NamedCType(goal.name, optionalLongVec_ctype))
363
+ except UnsatError:
364
+ argname = direct_solve(
365
+ NamedCType(goal.name, BaseCType(optionalSymIntArrayRefT))
366
+ )
367
+ return f"{argname}.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : c10::nullopt"
368
+ elif goal.type == BaseCType(optionalSymIntArrayRefT):
369
+ # TODO: You might also want to solve this from longSymVec_ctype or
370
+ # an optional version of it
371
+ argname = direct_solve(
372
+ NamedCType(goal.name, BaseCType(optionalIntArrayRefT))
373
+ )
374
+ return f"{argname}.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*{argname})) : c10::nullopt"
375
+ elif goal.type == BaseCType(optionalScalarRefT):
376
+ return direct_solve(NamedCType(goal.name, optionalScalar_ctype))
377
+ elif goal.type == BaseCType(optionalTensorRefT):
378
+ return direct_solve(NamedCType(goal.name, optionalTensor_ctype))
379
+
380
+ # Note [translation from C++ reference to value types]
381
+ # The below cases are all for when we have an argument with a reference type,
382
+ # and a corresponding goal with a value type.
383
+ # These are needed when we populate the inputs to a lambda capture and we need
384
+ # to guarantee the lifetime of each captured argument.
385
+ # We guard it with an explicit kwarg because converting to a value type is expensive
386
+ # (O(n)) to convert from IntArrayRef to vector<int>),
387
+ # so the caller of translate() should be explicit that they need it.
388
+ if allow_expensive_conversions:
389
+ if goal.type == VectorCType(BaseCType(longT)):
390
+ intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT))
391
+ argname = direct_solve(intArrayRef_ctype)
392
+ return f"{argname}.vec()"
393
+ if goal.type == VectorCType(BaseCType(SymIntT)):
394
+ symIntArrayRef_ctype = NamedCType(goal.name, BaseCType(symIntArrayRefT))
395
+ argname = direct_solve(symIntArrayRef_ctype)
396
+ return f"{argname}.vec()"
397
+ elif goal.type == OptionalCType(VectorCType(BaseCType(longT))):
398
+ optionalIntArrayRef_ctype = NamedCType(
399
+ goal.name, BaseCType(optionalIntArrayRefT)
400
+ )
401
+ argname = direct_solve(optionalIntArrayRef_ctype)
402
+ return f"{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt"
403
+ elif goal.type == OptionalCType(BaseCType(scalarT)):
404
+ optionalScalarRef_ctype = NamedCType(
405
+ goal.name, BaseCType(optionalScalarRefT)
406
+ )
407
+ argname = direct_solve(optionalScalarRef_ctype)
408
+ return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
409
+ elif goal.type == OptionalCType(BaseCType(scalarT)):
410
+ optionalTensorRef_ctype = NamedCType(
411
+ goal.name, BaseCType(optionalTensorRefT)
412
+ )
413
+ argname = direct_solve(optionalTensorRef_ctype)
414
+ return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
415
+ # Technically, we also need to handle cases of C++ containers holding reference types.
416
+ # But there currently aren't any ops that require lambda capture codegen
417
+ # With arguments like std::vector<IntArrayRef>.
418
+ # If that changes, we'll have to add the translation here.
419
+
420
+ # We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor.
421
+ # We could probably generalize this to non-tensor types too.
422
+ if goal.type == MutRefCType(BaseCType(tensorT)):
423
+ const_ref_tensor_ctype = NamedCType(
424
+ goal.name, ConstRefCType(BaseCType(tensorT))
425
+ )
426
+ argname = direct_solve(const_ref_tensor_ctype)
427
+ return f"const_cast<Tensor&>({argname})"
428
+
429
+ unsat(goal)
430
+
431
+ return [Expr(solve(g, direct=False), g) for g in goal_ctypes]
wemm/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc ADDED
Binary file (9.67 kB). View file
 
wemm/lib/python3.10/site-packages/torchgen/api/types/types.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Where should I add a new type? `types_base.py` vs `types.py`
3
+
4
+ This file defines data model classes for torchgen typing system, as well as some base types such as int32_t.
5
+
6
+ `types.py` defines ATen Tensor type and some c10 types, along with signatures that use these types.
7
+
8
+ The difference between these two files, is `types_base.py` should be implementation-agnostic, meaning it shouldn't
9
+ contain any type definition that is tight to a specific C++ library (e.g., ATen), so that it can be easily reused
10
+ if we want to generate code for another C++ library.
11
+
12
+ Add new types to `types.py` if these types are ATen/c10 related.
13
+ Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
14
+ """
15
+ from dataclasses import dataclass
16
+ from typing import Dict, TypeVar
17
+
18
+ from torchgen.model import BaseTy, ScalarType
19
+
20
+ from .types_base import (
21
+ BaseCppType,
22
+ BaseCType,
23
+ boolT,
24
+ byteT,
25
+ charT,
26
+ CType,
27
+ doubleT,
28
+ floatT,
29
+ int32T,
30
+ longT,
31
+ shortT,
32
+ )
33
+
34
+ _T = TypeVar("_T")
35
+
36
+ TENSOR_LIST_LIKE_CTYPES = [
37
+ "at::TensorList",
38
+ "const c10::List<c10::optional<at::Tensor>> &",
39
+ "const at::ITensorListRef &",
40
+ ]
41
+
42
+
43
+ halfT = BaseCppType("at", "Half")
44
+ complexHalfT = BaseCppType(
45
+ "c10", "complex<c10::Half>"
46
+ ) # stuffing template param here is an abuse
47
+ complexFloatT = BaseCppType("c10", "complex<float>")
48
+ complexDoubleT = BaseCppType("c10", "complex<double>")
49
+ bfloat16T = BaseCppType("at", "BFloat16")
50
+ stringT = BaseCppType("c10", "string_view")
51
+ generatorT = BaseCppType("at", "Generator")
52
+ scalarTypeT = BaseCppType("at", "ScalarType")
53
+ tensorT = BaseCppType("at", "Tensor")
54
+ optionalTensorRefT = BaseCppType("at", "OptionalTensorRef")
55
+ tensorListT = BaseCppType("at", "TensorList")
56
+ iTensorListRefT = BaseCppType("at", "ITensorListRef")
57
+ iOptTensorListRefT = BaseCppType("at", "IOptTensorListRef")
58
+ dimnameT = BaseCppType("at", "Dimname")
59
+ dimnameListT = BaseCppType("at", "DimnameList")
60
+ dimVectorT = BaseCppType("at", "DimVector")
61
+ layoutT = BaseCppType("at", "Layout")
62
+ deviceT = BaseCppType("at", "Device")
63
+ scalarT = BaseCppType("at", "Scalar")
64
+ optionalScalarRefT = BaseCppType("at", "OptionalScalarRef")
65
+ memoryFormatT = BaseCppType("at", "MemoryFormat")
66
+ qschemeT = BaseCppType("at", "QScheme")
67
+ storageT = BaseCppType("at", "Storage")
68
+ streamT = BaseCppType("at", "Stream")
69
+ intArrayRefT = BaseCppType("at", "IntArrayRef")
70
+ optionalIntArrayRefT = BaseCppType("at", "OptionalIntArrayRef")
71
+ optionalSymIntArrayRefT = BaseCppType("at", "OptionalSymIntArrayRef")
72
+ tensorOptionsT = BaseCppType("at", "TensorOptions")
73
+ typeAndSizeT = BaseCppType("torch::autograd::generated", "TypeAndSize")
74
+ tensorGeometryT = BaseCppType("at", "TensorGeometry")
75
+ SymIntT = BaseCppType("c10", "SymInt")
76
+ symIntArrayRefT = BaseCppType("c10", "SymIntArrayRef")
77
+
78
+ # Types representing template parameters. Technically, we probably shouldn't
79
+ # represent them this way in codegen, but it was pretty convenient.
80
+ scalar_t = BaseCppType("", "scalar_t")
81
+ opmath_t = BaseCppType("", "opmath_t")
82
+
83
+ ScalarTypeToCppMapping: Dict[ScalarType, BaseCppType] = {
84
+ ScalarType.Byte: byteT,
85
+ ScalarType.Char: charT,
86
+ ScalarType.Short: shortT,
87
+ ScalarType.Int: int32T,
88
+ ScalarType.Long: longT,
89
+ ScalarType.Half: halfT,
90
+ ScalarType.Float: floatT,
91
+ ScalarType.Double: doubleT,
92
+ ScalarType.ComplexHalf: complexHalfT,
93
+ ScalarType.ComplexFloat: complexFloatT,
94
+ ScalarType.ComplexDouble: complexDoubleT,
95
+ ScalarType.Bool: boolT,
96
+ ScalarType.BFloat16: bfloat16T,
97
+ }
98
+
99
+ BaseTypeToCppMapping: Dict[BaseTy, BaseCppType] = {
100
+ BaseTy.int: longT,
101
+ BaseTy.float: doubleT,
102
+ BaseTy.bool: boolT,
103
+ BaseTy.str: stringT,
104
+ BaseTy.Generator: generatorT,
105
+ BaseTy.ScalarType: scalarTypeT,
106
+ BaseTy.Tensor: tensorT,
107
+ BaseTy.Dimname: dimnameT,
108
+ BaseTy.DimVector: dimVectorT,
109
+ BaseTy.Layout: layoutT,
110
+ BaseTy.Device: deviceT,
111
+ BaseTy.Scalar: scalarT,
112
+ BaseTy.MemoryFormat: memoryFormatT,
113
+ BaseTy.QScheme: qschemeT,
114
+ BaseTy.Storage: storageT,
115
+ BaseTy.Stream: streamT,
116
+ BaseTy.SymInt: SymIntT,
117
+ }
118
+
119
+ # CTypes encode C++ type structure as needed for translation.
120
+
121
+
122
+ @dataclass(frozen=True)
123
+ class OptionalCType(CType):
124
+ elem: "CType"
125
+
126
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
127
+ # Do not pass `strip_ref` recursively.
128
+ return f"c10::optional<{self.elem.cpp_type()}>"
129
+
130
+ def cpp_type_registration_declarations(self) -> str:
131
+ return f"c10::optional<{self.elem.cpp_type_registration_declarations()}>"
132
+
133
+ def remove_const_ref(self) -> "CType":
134
+ return OptionalCType(self.elem.remove_const_ref())
135
+
136
+
137
+ @dataclass(frozen=True)
138
+ class ListCType(CType):
139
+ elem: "CType"
140
+
141
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
142
+ # Do not pass `strip_ref` recursively.
143
+ return f"c10::List<{self.elem.cpp_type()}>"
144
+
145
+ def cpp_type_registration_declarations(self) -> str:
146
+ return f"c10::List<{self.elem.cpp_type_registration_declarations()}>"
147
+
148
+ def remove_const_ref(self) -> "CType":
149
+ return ListCType(self.elem.remove_const_ref())
150
+
151
+
152
+ @dataclass(frozen=True)
153
+ class ArrayRefCType(CType):
154
+ elem: "CType"
155
+
156
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
157
+ # Do not pass `strip_ref` recursively.
158
+ return f"at::ArrayRef<{self.elem.cpp_type()}>"
159
+
160
+ def cpp_type_registration_declarations(self) -> str:
161
+ return f"ArrayRef<{self.elem.cpp_type_registration_declarations()}>"
162
+
163
+ def remove_const_ref(self) -> "CType":
164
+ return ArrayRefCType(self.elem.remove_const_ref())
165
+
166
+
167
+ @dataclass(frozen=True)
168
+ class VectorizedCType(CType):
169
+ # This template is explicitly specialized, so the only valid
170
+ # elems are those we have specializations for (e.g., float, double, ...)
171
+ # scalar_t is also a common argument here (when we are codegen in
172
+ # a templated context)
173
+ elem: BaseCType
174
+
175
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
176
+ return f"at::vec::Vectorized<{self.elem.cpp_type()}>"
177
+
178
+ def cpp_type_registration_declarations(self) -> str:
179
+ raise NotImplementedError
180
+
181
+ def remove_const_ref(self) -> "CType":
182
+ return self
wemm/lib/python3.10/site-packages/torchgen/api/types/types_base.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Where should I add a new type? `types_base.py` vs `types.py`
3
+
4
+ This file defines data model classes for torchgen typing system, as well as some base types such as int32_t.
5
+
6
+ `types.py` defines ATen Tensor type and some c10 types, along with signatures that use these types.
7
+
8
+ The difference between these two files, is `types_base.py` should be implementation-agnostic, meaning it shouldn't
9
+ contain any type definition that is tight to a specific C++ library (e.g., ATen), so that it can be easily reused
10
+ if we want to generate code for another C++ library.
11
+
12
+ Add new types to `types.py` if these types are ATen/c10 related.
13
+ Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
14
+ """
15
+ from abc import ABC
16
+ from dataclasses import dataclass
17
+ from enum import auto, Enum
18
+ from typing import List, Optional, Union
19
+
20
+ from torchgen.model import Argument, SelfArgument, TensorOptionsArguments
21
+
22
+ # An ArgName is just the str name of the argument in schema;
23
+ # but in some special circumstances, we may add a little extra
24
+ # context. The Enum SpecialArgName covers all of these cases;
25
+ # grep for their construction sites to see when they can occr.
26
+
27
+
28
+ class SpecialArgName(Enum):
29
+ possibly_redundant_memory_format = auto()
30
+
31
+
32
+ ArgName = Union[str, SpecialArgName]
33
+
34
+
35
+ # This class shouldn't be created directly; instead, use/create one of the singletons below.
36
+ @dataclass(frozen=True)
37
+ class BaseCppType:
38
+ ns: Optional[str]
39
+ name: str
40
+
41
+ def __str__(self) -> str:
42
+ if self.ns is None or self.ns == "":
43
+ return self.name
44
+ return f"{self.ns}::{self.name}"
45
+
46
+
47
+ # The set of all non-templated, valid, fully-qualified names of C++ types that are used in the codegen.
48
+ # Templated types get their own dataclass, mainly to make namespace parsing easier.
49
+ byteT = BaseCppType("", "uint8_t")
50
+ charT = BaseCppType("", "int8_t")
51
+ shortT = BaseCppType("", "int16_t")
52
+ # It would be more symmetric for this to be called intT, but it easy to mix
53
+ # this up with JIT int (which is int64_t in C++), so we intentionally don't
54
+ # define intT to make it obvious when you've stuffed it up
55
+ int32T = BaseCppType("", "int32_t")
56
+ longT = BaseCppType("", "int64_t")
57
+ doubleT = BaseCppType("", "double")
58
+ floatT = BaseCppType("", "float")
59
+ boolT = BaseCppType("", "bool")
60
+ voidT = BaseCppType("", "void")
61
+
62
+
63
+ class CType(ABC):
64
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
65
+ raise NotImplementedError
66
+
67
+ def cpp_type_registration_declarations(self) -> str:
68
+ raise NotImplementedError
69
+
70
+ def remove_const_ref(self) -> "CType":
71
+ return self
72
+
73
+
74
+ @dataclass(frozen=True)
75
+ class BaseCType(CType):
76
+ type: BaseCppType
77
+
78
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
79
+ return str(self.type)
80
+
81
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
82
+ # TODO: Kill this when we eventually remove it!
83
+ def cpp_type_registration_declarations(self) -> str:
84
+ return str(self.type).replace("at::", "")
85
+
86
+ def remove_const_ref(self) -> "CType":
87
+ return self
88
+
89
+
90
+ @dataclass(frozen=True)
91
+ class ConstRefCType(CType):
92
+ elem: "CType"
93
+
94
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
95
+ if strip_ref:
96
+ return self.elem.cpp_type(strip_ref=strip_ref)
97
+ return f"const {self.elem.cpp_type()} &"
98
+
99
+ def cpp_type_registration_declarations(self) -> str:
100
+ return f"const {self.elem.cpp_type_registration_declarations()} &"
101
+
102
+ def remove_const_ref(self) -> "CType":
103
+ return self.elem.remove_const_ref()
104
+
105
+
106
+ @dataclass(frozen=True)
107
+ class VectorCType(CType):
108
+ elem: "CType"
109
+
110
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
111
+ # Do not pass `strip_ref` recursively.
112
+ return f"::std::vector<{self.elem.cpp_type()}>"
113
+
114
+ def cpp_type_registration_declarations(self) -> str:
115
+ return f"::std::vector<{self.elem.cpp_type_registration_declarations()}>"
116
+
117
+ def remove_const_ref(self) -> "CType":
118
+ return VectorCType(self.elem.remove_const_ref())
119
+
120
+
121
+ @dataclass(frozen=True)
122
+ class ArrayCType(CType):
123
+ elem: "CType"
124
+ size: int
125
+
126
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
127
+ # Do not pass `strip_ref` recursively.
128
+ return f"::std::array<{self.elem.cpp_type()},{self.size}>"
129
+
130
+ def cpp_type_registration_declarations(self) -> str:
131
+ return f"::std::array<{self.elem.cpp_type_registration_declarations()},{self.size}>"
132
+
133
+ def remove_const_ref(self) -> "CType":
134
+ return ArrayCType(self.elem.remove_const_ref(), self.size)
135
+
136
+
137
+ @dataclass(frozen=True)
138
+ class TupleCType(CType):
139
+ elems: List["CType"]
140
+
141
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
142
+ # Do not pass `strip_ref` recursively.
143
+ return f'::std::tuple<{",".join([e.cpp_type() for e in self.elems])}>'
144
+
145
+ def cpp_type_registration_declarations(self) -> str:
146
+ return f'::std::tuple<{",".join([e.cpp_type_registration_declarations() for e in self.elems])}>'
147
+
148
+ def remove_const_ref(self) -> "CType":
149
+ return TupleCType([e.remove_const_ref() for e in self.elems])
150
+
151
+
152
+ @dataclass(frozen=True)
153
+ class MutRefCType(CType):
154
+ elem: "CType"
155
+
156
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
157
+ if strip_ref:
158
+ return self.elem.cpp_type(strip_ref=strip_ref)
159
+ return f"{self.elem.cpp_type()} &"
160
+
161
+ def cpp_type_registration_declarations(self) -> str:
162
+ return f"{self.elem.cpp_type_registration_declarations()} &"
163
+
164
+ def remove_const_ref(self) -> "CType":
165
+ return self.elem.remove_const_ref()
166
+
167
+
168
+ # A NamedCType is short for Named C++ semantic type. A NamedCType represents a C++ type, plus
169
+ # semantic information about what it represents. For example, consider the
170
+ # argument "bool pin_memory"; its normal C++ type is "bool", but its C++
171
+ # semantic type also keeps track that this represents a "pin_memory"; you can't
172
+ # just use a random other boolean in a context where you need a "pin_memory"!
173
+ #
174
+
175
+
176
+ @dataclass(frozen=True)
177
+ class NamedCType:
178
+ name: ArgName
179
+ type: CType
180
+
181
+ def cpp_type(self, *, strip_ref: bool = False) -> str:
182
+ return self.type.cpp_type(strip_ref=strip_ref)
183
+
184
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
185
+ # TODO: Kill this when we eventually remove it!
186
+ def cpp_type_registration_declarations(self) -> str:
187
+ return self.type.cpp_type_registration_declarations()
188
+
189
+ def remove_const_ref(self) -> "NamedCType":
190
+ return NamedCType(self.name, self.type.remove_const_ref())
191
+
192
+ def with_name(self, name: str) -> "NamedCType":
193
+ return NamedCType(name, self.type)
194
+
195
+
196
+ # A binding represents any C++ binding site for a formal parameter.
197
+ # We don't distinguish between binding sites for different APIs;
198
+ # instead, all of the important distinctions are encoded in CType,
199
+ # which you can use to figure out if a given Binding is appropriate
200
+ # for use in another context. (See torchgen.api.translate)
201
+
202
+
203
+ @dataclass(frozen=True)
204
+ class Binding:
205
+ name: str
206
+ nctype: NamedCType
207
+ argument: Union[Argument, TensorOptionsArguments, SelfArgument]
208
+ # TODO: maybe don't represent default here
209
+ default: Optional[str] = None
210
+
211
+ def rename(self, name: str) -> "Binding":
212
+ return Binding(
213
+ name=name,
214
+ nctype=self.nctype,
215
+ argument=self.argument,
216
+ default=self.default,
217
+ )
218
+
219
+ @property
220
+ def type(self) -> str:
221
+ return self.nctype.cpp_type()
222
+
223
+ def no_default(self) -> "Binding":
224
+ return Binding(
225
+ name=self.name,
226
+ nctype=self.nctype,
227
+ default=None,
228
+ argument=self.argument,
229
+ )
230
+
231
+ def decl(self, *, func_ptr_cast: bool = False) -> str:
232
+ mb_default = ""
233
+ if self.default is not None:
234
+ mb_default = f"={self.default}"
235
+
236
+ # casting only needs to know the type
237
+ if func_ptr_cast:
238
+ return f"{self.type}"
239
+ else:
240
+ return f"{self.type} {self.name}{mb_default}"
241
+
242
+ # For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
243
+ # TODO: Kill this when we eventually remove it!
244
+ def decl_registration_declarations(self) -> str:
245
+ type_s = self.nctype.cpp_type_registration_declarations()
246
+ mb_default = ""
247
+ if self.default is not None:
248
+ mb_default = f"={self.default}"
249
+ return f"{type_s} {self.name}{mb_default}"
250
+
251
+ def defn(self) -> str:
252
+ return f"{self.type} {self.name}"
253
+
254
+ def with_name(self, name: str) -> "Binding":
255
+ return Binding(
256
+ name=name, nctype=self.nctype, argument=self.argument, default=self.default
257
+ )
258
+
259
+
260
+ # An Expr is a C++ expression. It has a C++ string representing its syntax,
261
+ # as well as a CType saying what it provides.
262
+
263
+
264
+ @dataclass(frozen=True)
265
+ class Expr:
266
+ expr: str
267
+ type: NamedCType
wemm/lib/python3.10/site-packages/torchgen/dest/lazy_ir.py ADDED
@@ -0,0 +1,710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from abc import ABC
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Optional, Tuple, Union
5
+
6
+ import torchgen.api.dispatcher as dispatcher
7
+ from torchgen.api.lazy import (
8
+ getValueT,
9
+ isValueType,
10
+ LazyArgument,
11
+ LazyIrProperties,
12
+ LazyIrSchema,
13
+ tensorListValueT,
14
+ )
15
+ from torchgen.api.translate import translate
16
+ from torchgen.api.types import (
17
+ BaseCType,
18
+ Binding,
19
+ deviceT,
20
+ DispatcherSignature,
21
+ kernel_signature,
22
+ NativeSignature,
23
+ OptionalCType,
24
+ VectorCType,
25
+ )
26
+ from torchgen.context import method_with_native_function
27
+ from torchgen.dest.lazy_ts_lowering import ts_lowering_body
28
+ from torchgen.model import (
29
+ Argument,
30
+ BackendIndex,
31
+ BackendMetadata,
32
+ BaseTy,
33
+ BaseType,
34
+ FunctionSchema,
35
+ ListType,
36
+ NativeFunction,
37
+ NativeFunctionsGroup,
38
+ )
39
+
40
+
41
+ def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str:
42
+ """
43
+ Given a LazyArgument,
44
+ generate a c++ string for materializing an rvalue of that arg for passing into
45
+ a lazy Node constructor.
46
+ """
47
+
48
+ # TODO: Matching on CType seems wrong; should be matching on Type
49
+ if isValueType(arg.lazy_type):
50
+ if isinstance(arg.lazy_type, BaseCType):
51
+ if arg.is_wrapped_scalar:
52
+ return f"node_{arg.name}"
53
+ elif arg.lazy_type.type is tensorListValueT:
54
+ return f"lazy_{arg.name}_tensorlist"
55
+ elif arg.is_symint_or_list:
56
+ return f"GetSymIntValue({arg.name})"
57
+ return f"lazy_{arg.name}->GetIrValue()"
58
+ elif isinstance(arg.lazy_type, OptionalCType):
59
+ if arg.is_symint_or_list:
60
+ # TODO: I don't understand when you should put lazy_ in the name
61
+ # or not
62
+ return f"{arg.name} ? c10::make_optional(GetSymIntValue(*{arg.name})) : c10::nullopt"
63
+ elif arg.is_wrapped_scalar:
64
+ return f"node_{arg.name}"
65
+ return (
66
+ f"lazy_{arg.name} ? "
67
+ f"c10::make_optional(lazy_{arg.name}->GetIrValue()) : "
68
+ "c10::nullopt"
69
+ )
70
+ else:
71
+ raise AssertionError(
72
+ f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
73
+ )
74
+ else:
75
+ # NB: this is here because right now we aren't treating SymInt[] as a
76
+ # value type; when we do this needs to move above
77
+ # NB: we cannot test arg.lazy_type as we've already specified it is an
78
+ # int64_t and so we cannot distinguish between SymInt and int64_t
79
+ if isinstance(arg.orig_type, ListType) and arg.orig_type.elem == BaseType(
80
+ BaseTy.SymInt
81
+ ):
82
+ if arg.symint:
83
+ return f"GetSymIntArrayRefValue({arg.name})"
84
+ else:
85
+ return f"std::vector<int64_t>({arg.name}.begin(), {arg.name}.end())"
86
+ elif isinstance(arg.lazy_type, VectorCType) and isinstance(
87
+ arg.lazy_type.elem, BaseCType
88
+ ):
89
+ return f"std::vector<{arg.lazy_type.elem.type}>({arg.name}.begin(), {arg.name}.end())"
90
+ elif (
91
+ isinstance(arg.lazy_type, OptionalCType)
92
+ and isinstance(arg.lazy_type.elem, VectorCType)
93
+ and isinstance(arg.lazy_type.elem.elem, BaseCType)
94
+ ):
95
+ return f"torch::lazy::ToOptionalVector<{arg.lazy_type.elem.elem.type}>({arg.name})"
96
+ else:
97
+ return f"{arg.name}"
98
+
99
+
100
+ def node_ctor_inputs(schema: LazyIrSchema) -> str:
101
+ """
102
+ Produce a formatted string with the arguments as passed into the constructor of a node class.
103
+ """
104
+ node_ctor_values = [
105
+ node_ctor_arg_rvalue_string(arg) for arg in schema.filtered_args()
106
+ ]
107
+ return ", ".join(node_ctor_values)
108
+
109
+
110
+ def gen_fallback_code(
111
+ schema: LazyIrSchema,
112
+ sig: Union[DispatcherSignature, NativeSignature],
113
+ overload_name: str,
114
+ ) -> str:
115
+ """
116
+ Generate code that falls back to eager conditioned on a predicate
117
+ """
118
+ dispatcher_sig = DispatcherSignature.from_schema(schema.func)
119
+ exprs = translate(sig.arguments(), dispatcher_sig.arguments())
120
+ fallback_args = ",\n ".join([a.expr for a in exprs])
121
+ if len(overload_name):
122
+ aten_op_str = f"ATEN_OP2({schema.aten_name}, {overload_name})"
123
+ else:
124
+ aten_op_str = f"ATEN_OP({schema.aten_name})"
125
+ or_has_generator = ""
126
+ if schema.generator_arg:
127
+ # generators are always optional and there is never more than one, at least currently
128
+ or_has_generator = f" || ({schema.generator_arg.name}.has_value() && {schema.generator_arg.name}->defined())"
129
+ return f"""
130
+ if (force_eager_fallback({aten_symbol(schema)}){or_has_generator}) {{
131
+ return at::native::call_fallback_fn_symint<&ltc_eager_fallback, {aten_op_str}>::call(
132
+ {fallback_args}
133
+ );
134
+ }}
135
+ """
136
+
137
+
138
+ def aten_symbol(schema: LazyIrSchema) -> str:
139
+ missing_interned_strings = {
140
+ "sigmoid_backward",
141
+ }
142
+ if schema.aten_name in missing_interned_strings:
143
+ return f'c10::Symbol::fromQualString("aten::{schema.aten_name}")'
144
+
145
+ if not schema.aten_name.startswith("at::"):
146
+ return f"at::aten::{schema.aten_name}"
147
+ else:
148
+ return schema.aten_name
149
+
150
+
151
+ # converts all tensor-like arguments to meta tensors. Returns:
152
+ # (1) a string containing all of the logic that does the conversions.
153
+ # (2) a context, to be used by translate(), with all of the relevant bindings.
154
+ def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
155
+ context: List[Binding] = []
156
+ unwrapped_tensor_args: List[str] = []
157
+ for arg in sig.arguments():
158
+ if isinstance(arg.argument, Argument) and arg.argument.type.is_tensor_like():
159
+ unwrapped_name = f"{arg.name}_meta"
160
+ unwrapped_tensor_args.append(
161
+ f"auto {unwrapped_name} = to_meta({arg.name});"
162
+ )
163
+ context.append(arg.with_name(unwrapped_name))
164
+ else:
165
+ context.append(arg)
166
+ unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
167
+ return unwrap_tensor_args_str, context
168
+
169
+
170
+ @dataclass(frozen=True)
171
+ class GenLazyIR(ABC):
172
+ backend_index: BackendIndex
173
+ backend_name: str
174
+ node_base: str
175
+ use_lazy_shape: bool
176
+
177
+ @method_with_native_function
178
+ def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
179
+ func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
180
+ metadata = self.backend_index.get_kernel(
181
+ f.functional if isinstance(f, NativeFunctionsGroup) else f
182
+ )
183
+ schema = LazyIrSchema(
184
+ func, symint=metadata is not None and metadata.supports_symint()
185
+ )
186
+ return self.gen(schema)
187
+
188
+ # there is no lowering functionality generated unless this IR base class is subclassed and
189
+ # implemented as a backend-specific node
190
+ def lowering_function(self, schema: LazyIrSchema) -> str:
191
+ return ""
192
+
193
+ def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
194
+ return ""
195
+
196
+ def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
197
+ return f"""bool CanBeReused({node_ctor_args}) const {{
198
+ return false;
199
+ }}"""
200
+
201
+ def node_base_ctor_call(self, schema: LazyIrSchema) -> str:
202
+ value_args = schema.filtered_args(values=True, scalars=False)
203
+ # backends can customize the way the node base class constructor is called,
204
+ # as long as all of its arguments can be generated from information available from the schema
205
+ base_ctor_value_args_list = []
206
+ for arg in value_args:
207
+ if isinstance(arg.lazy_type, BaseCType) or isinstance(
208
+ arg.lazy_type, VectorCType
209
+ ):
210
+ base_ctor_value_args_list.append(f"{arg.name}")
211
+ elif isinstance(arg.lazy_type, OptionalCType):
212
+ base_ctor_value_args_list.append(f"{arg.name}.value_or(kNullValue)")
213
+ else:
214
+ raise AssertionError(
215
+ f"Unsupported type ({arg.lazy_type}) - add support if necessary"
216
+ )
217
+ base_ctor_value_args = ", ".join(base_ctor_value_args_list)
218
+
219
+ scalar_args = schema.filtered_args(values=False, scalars=True)
220
+
221
+ # Shape constuction.
222
+ # Conditionally build shape depending on specified shape property
223
+ if schema.properties.ShapePrecompute:
224
+ shape_ctor_arg = "std::move(shapes),"
225
+ elif schema.properties.ShapeCompute:
226
+ shape_args = [a.name for a in value_args]
227
+ shape_args.extend(a.name for a in scalar_args)
228
+ shape_ctor_arg = f"compute_shape_{schema.name}({', '.join(shape_args)}),"
229
+ elif schema.properties.ShapeCache:
230
+ shape_args = [f"operand({i})" for i in range(len(value_args))]
231
+ shape_args.extend(a.name for a in scalar_args)
232
+ shape_ctor_arg = f"[&](){{ return compute_shape_{schema.name}({', '.join(shape_args)})[0]; }},"
233
+ else:
234
+ shape_ctor_arg = ""
235
+
236
+ scalar_hashes = ", ".join(f"{a.name}" for a in scalar_args)
237
+
238
+ return f"""{self.node_base}(
239
+ {schema.node_name}::ClassOpKind(),
240
+ OpList{{{base_ctor_value_args}}},
241
+ {shape_ctor_arg}
242
+ /* num_outputs */ {len(schema.returns)},
243
+ torch::lazy::MHash({scalar_hashes}))"""
244
+
245
+ def gen(self, schema: LazyIrSchema) -> List[str]:
246
+ opkind = schema.opkind or aten_symbol(schema)
247
+
248
+ # for now, we just want one IR class decl and soon after also the method defs
249
+ # and we use the functional version not out/inplace.
250
+ all_args = schema.filtered_args()
251
+ value_args = schema.filtered_args(values=True, scalars=False)
252
+ scalar_args = schema.filtered_args(values=False, scalars=True)
253
+
254
+ ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args]
255
+ reuse_ctor_args = ", ".join(ctor_args)
256
+ if self.use_lazy_shape and schema.properties.ShapePrecompute:
257
+ ctor_args.append("std::vector<torch::lazy::Shape>&& shapes")
258
+ node_ctor_args = ", ".join(ctor_args)
259
+
260
+ scalar_initializers = ",\n ".join(
261
+ [
262
+ # This code is just special casing the mapping from string_view -> strings
263
+ f"{a.name}({a.name}.has_value() ? c10::make_optional(std::string(*{a.name})) : c10::nullopt)"
264
+ if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
265
+ else f"{a.name}({a.name})"
266
+ for a in scalar_args
267
+ ]
268
+ )
269
+ if len(scalar_initializers):
270
+ scalar_initializers = f",\n {scalar_initializers}"
271
+ scalar_decls = "\n ".join(
272
+ [
273
+ f"std::string {a.name};"
274
+ if a.lazy_type.cpp_type() == "c10::string_view"
275
+ else f"c10::optional<std::string> {a.name};"
276
+ if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
277
+ else f"{a.lazy_type.cpp_type()} {a.name};"
278
+ for a in scalar_args
279
+ ]
280
+ )
281
+ optional_values = [
282
+ arg.name
283
+ for arg in schema.filtered_args(values=True, scalars=False)
284
+ if isinstance(arg.lazy_type, OptionalCType)
285
+ ]
286
+ has_optional_decls = "\n ".join(
287
+ [f"bool has_{value}: 1;" for value in optional_values]
288
+ )
289
+ has_optional_defs = "\n ".join(
290
+ [f"has_{value} = !!{value};" for value in optional_values]
291
+ )
292
+ members_to_string = []
293
+ for arg in scalar_args:
294
+ if isinstance(arg.lazy_type, OptionalCType):
295
+ members_to_string.append(
296
+ f"""if ({arg.name}.has_value()) {{
297
+ ss << ", {arg.name}=" << {arg.name}.value();
298
+ }} else {{
299
+ ss << ", {arg.name}=null";
300
+ }}"""
301
+ )
302
+ else:
303
+ members_to_string.append(f'ss << ", {arg.name}=" << {arg.name};')
304
+ members_to_string_str = "\n ".join(members_to_string)
305
+
306
+ return [
307
+ f"""\
308
+ class {schema.node_name} : public {self.node_base} {{
309
+ public:
310
+ static torch::lazy::OpKind ClassOpKind() {{
311
+ return torch::lazy::OpKind({opkind});
312
+ }}
313
+
314
+ {schema.node_name}({node_ctor_args})
315
+ : {self.node_base_ctor_call(schema)}{scalar_initializers}
316
+ {{
317
+ {has_optional_defs}
318
+ }}
319
+
320
+ std::string ToString() const override {{
321
+ std::stringstream ss;
322
+ ss << {self.node_base}::ToString();
323
+ {members_to_string_str}
324
+ return ss.str();
325
+ }}
326
+
327
+ {self.create_function(schema, reuse_ctor_args)}
328
+
329
+ {self.can_be_reused_function(schema, reuse_ctor_args)}
330
+
331
+ {self.lowering_function(schema)}
332
+
333
+ {scalar_decls}
334
+ {has_optional_decls}
335
+
336
+ }};
337
+
338
+ """,
339
+ ]
340
+
341
+
342
+ @dataclass(frozen=True)
343
+ class GenTSLazyIR(GenLazyIR):
344
+ def lowering_function(self, schema: LazyIrSchema) -> str:
345
+ signature = """
346
+ torch::lazy::TSOpVector Lower(
347
+ std::shared_ptr<torch::jit::GraphFunction> function,
348
+ torch::lazy::TSLoweringContext* loctx) const override"""
349
+
350
+ if schema.properties.LowerDeclOnly:
351
+ return f"{signature};"
352
+ elif schema.properties.Lower:
353
+ return f"""{signature} {{
354
+ {ts_lowering_body(schema)}
355
+ }}
356
+ """
357
+ else:
358
+ return ""
359
+
360
+ def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
361
+ signature = f"static NodePtr Create({node_ctor_args})"
362
+ if schema.properties.CreateFnDeclOnly:
363
+ return f"{signature};"
364
+ elif not schema.properties.CreateFn:
365
+ return ""
366
+ return f"""{signature} {{
367
+ return ReuseOrMakeNode<{schema.node_name}>(data);
368
+ }}"""
369
+
370
+ def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
371
+ signature = f"bool CanBeReused({node_ctor_args}) const"
372
+ if schema.properties.CanBeReusedDeclOnly:
373
+ return f"{signature};"
374
+ elif not schema.properties.CanBeReused:
375
+ return ""
376
+ value_comparison = []
377
+ for arg in itertools.chain(schema.positional_values, schema.keyword_values):
378
+ if isinstance(arg.lazy_type, OptionalCType):
379
+ value_comparison.append(
380
+ f"nullable_operand(i++) == {arg.name}.value_or(kNullValue)"
381
+ )
382
+ else:
383
+ value_comparison.append(f"operand(i++) == {arg.name}")
384
+ for arg in itertools.chain(schema.positional_scalars, schema.keyword_scalars):
385
+ if isinstance(arg.lazy_type, OptionalCType):
386
+ value_comparison.append(
387
+ f"((!this->{arg.name}&&!{arg.name}) || (this->{arg.name}&&{arg.name} && *(this->{arg.name}) == *{arg.name}))"
388
+ )
389
+ else:
390
+ value_comparison.append(f"this->{arg.name} == {arg.name}")
391
+ value_comparison_str = " &&\n ".join(value_comparison)
392
+
393
+ return f"""{signature} {{
394
+ size_t i = 0;
395
+ return ({value_comparison_str});
396
+ }}"""
397
+
398
+
399
+ @dataclass(frozen=True)
400
+ class GenLazyNativeFuncDefinition:
401
+ class_method_name: str
402
+ backend_index: BackendIndex
403
+ tensor_class: str
404
+ gen_forced_fallback_code: bool
405
+ backend_namespace: str
406
+ get_tensorlist: str
407
+ get_tensor_or_wrap_number: str
408
+ try_get_tensor: str
409
+ metrics_counter: str
410
+ create_tensor: str
411
+ create_from_first_tensor: bool
412
+ create_aten_from_ltc_tensor: str
413
+ tuple_aten_from_ltc_tensors: str
414
+ lazy_tensor_ptr: str
415
+ get_device_fn: str
416
+
417
+ def lazy_tensor_decls(self, func: NativeFunction, schema: LazyIrSchema) -> str:
418
+ value_args = schema.filtered_args(values=True, scalars=False)
419
+ # Generates lazy_{name} variables for LazyTensors wrapping input tensors
420
+ lazy_tensor_decls: List[str] = []
421
+ for arg in value_args:
422
+ if arg.is_wrapped_scalar:
423
+ if isinstance(arg.lazy_type, OptionalCType):
424
+ lazy_tensor_decls.append(
425
+ f"""auto node_{arg.name} = {arg.name} ?
426
+ c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
427
+ GetIrValueForScalarFromCodegen(*{arg.name}, *common_device)):
428
+ c10::nullopt;"""
429
+ )
430
+ else:
431
+ lazy_tensor_decls.append(
432
+ f"""auto node_{arg.name} = torch::lazy::LazyGraphExecutor::Get()->
433
+ GetIrValueForScalarFromCodegen({arg.name}, *common_device);"""
434
+ )
435
+ elif arg.is_symint_or_list:
436
+ continue # values are extracted in isValueType
437
+ elif isinstance(arg.lazy_type, BaseCType):
438
+ if arg.lazy_type.type is tensorListValueT:
439
+ lazy_tensor_decls.append(
440
+ f"auto lazy_{arg.name}_tensorlist = "
441
+ f"{self.backend_namespace}::{self.get_tensorlist}({arg.name});"
442
+ )
443
+ else:
444
+ lazy_tensor_decls.append(
445
+ f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
446
+ f"{self.backend_namespace}::{self.get_tensor_or_wrap_number}({arg.name}, *common_device);"
447
+ )
448
+ elif isinstance(arg.lazy_type, OptionalCType):
449
+ assert arg.lazy_type.elem == BaseCType(getValueT()), arg.lazy_type.elem
450
+ # TODO(alanwaketan): Maybe we want to apply GetLtcTensorOrCreateForWrappedNumber here, but hold it
451
+ # until we encounter a real world example.
452
+ lazy_tensor_decls.append(
453
+ f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
454
+ f"{self.backend_namespace}::{self.try_get_tensor}({arg.name}.value_or(at::Tensor()));"
455
+ )
456
+ else:
457
+ raise AssertionError(
458
+ f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
459
+ )
460
+ return ("\n ").join(lazy_tensor_decls)
461
+
462
+ def force_eager_fallback(
463
+ self,
464
+ func: NativeFunction,
465
+ schema: LazyIrSchema,
466
+ metadata: BackendMetadata,
467
+ sig: Union[DispatcherSignature, NativeSignature],
468
+ ) -> str:
469
+ if self.gen_forced_fallback_code:
470
+ return gen_fallback_code(
471
+ schema, sig, overload_name=func.func.name.overload_name
472
+ )
473
+ return ""
474
+
475
+ def metrics(self, func: NativeFunction, schema: LazyIrSchema) -> str:
476
+ return f"{self.metrics_counter};"
477
+
478
+ def get_device(self, func: NativeFunction, schema: LazyIrSchema) -> str:
479
+ value_args = schema.filtered_args(values=True, scalars=False)
480
+ scalar_args = schema.filtered_args(values=False, scalars=True)
481
+ value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
482
+ optional_device = OptionalCType(BaseCType(deviceT))
483
+ optional_devices = [
484
+ a.name for a in scalar_args if a.lazy_type == optional_device
485
+ ]
486
+ assert (
487
+ len(value_types_names) > 0 or len(optional_devices) > 0
488
+ ), "Expected at least one Value or Device type"
489
+ get_device_str = (
490
+ f"{self.get_device_fn}({', '.join(value_types_names + optional_devices)})"
491
+ )
492
+ return f"""auto common_device = {get_device_str};
493
+ TORCH_INTERNAL_ASSERT(common_device);
494
+ """
495
+
496
+ def shape_inference(self, func: NativeFunction, schema: LazyIrSchema) -> str:
497
+ metadata = self.backend_index.get_kernel(func)
498
+ assert metadata is not None
499
+ all_args = schema.filtered_args()
500
+ returns_length = len(schema.returns)
501
+ # call the meta kernel if it exists, to compute output shape/dtype for our IR
502
+ # Note [Generated LTC Shape Functions]
503
+ # LTC uses meta tensors from core to do shape inference when possible, and otherwise
504
+ # we generate a shape function declaration that needs to be manually implemented.
505
+ # How do we detect which ops are eligible to use meta tensors?
506
+ # In general we should be able to use meta tensors not just on structured operators,
507
+ # but also on composite operators that are implemented in terms of structured kernels.
508
+ # We don't currently have a way of knowing at codegen time which ops are implemented that way.
509
+ # This is the case for all view and view_copy operators however, so we're going to
510
+ # use them specifically for all of the view_copy ops (instead of manually writing shape rules for all of them).
511
+ is_view_copy_op = "view_copy" in func.tags
512
+ is_structured = func.structured or func.structured_delegate is not None
513
+ if is_structured or is_view_copy_op:
514
+ meta_out = """
515
+ std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};"""
516
+ if returns_length > 1:
517
+
518
+ def this_shape(i: int) -> str:
519
+ return f"torch::lazy::Shape(std::get<{i}>(out_meta).scalar_type(), std::get<{i}>(out_meta).sizes().vec())"
520
+
521
+ shapes_str = ",".join([this_shape(i) for i in range(returns_length)])
522
+ meta_out = "std::vector<torch::lazy::Shape> shapes{" + shapes_str + "};"
523
+
524
+ # Convert tensor args to the meta device and call it.
525
+ # (We can't pass in the input tensors directly, because they are "functional wrappers".
526
+ # If any of the meta kernels call a tensor op and redispatch, we don't want to hit the functionalize kernels.)
527
+ # Even at::meta:: functions might redispatch, e.g. if they call into view ops.
528
+ dispatcher_sig = DispatcherSignature.from_schema(func.func)
529
+ meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
530
+ meta_call_args = [
531
+ e.expr
532
+ for e in translate(
533
+ meta_call_ctx, dispatcher_sig.arguments(), method=False
534
+ )
535
+ ]
536
+ if is_view_copy_op:
537
+ # view_copy ops always have a CompositeExplicitAutogradNonFunctional kernel
538
+ assert func.has_composite_explicit_autograd_non_functional_kernel
539
+ dispatch_ns = "compositeexplicitautogradnonfunctional"
540
+ else:
541
+ dispatch_ns = "meta"
542
+ aten_name = schema.aten_name
543
+ # TODO: this is trolling
544
+ if func.func.has_symint() and metadata.supports_symint():
545
+ aten_name += "_symint"
546
+ shape_str = f"""\
547
+ {meta_conversion_str}
548
+ auto out_meta = at::{dispatch_ns}::{aten_name}({', '.join(meta_call_args)});
549
+ {meta_out}"""
550
+ else:
551
+ shape_sig = ComputeShapeSignature(
552
+ metadata.kernel, func, symint=metadata.supports_symint()
553
+ )
554
+ shape_str = f"""
555
+ auto shapes = {shape_sig.shape_call};"""
556
+
557
+ shape_str += f"""
558
+ TORCH_INTERNAL_ASSERT(shapes.size() == {returns_length});"""
559
+
560
+ # Calculating which dimensions are symbolic
561
+ func_schema_str = "aten::" + str(func.func)
562
+ shape_str += f"""
563
+ if(torch::lazy::symbolicShapeEnabled()){{
564
+ std::vector<torch::jit::IValue> inputs = {{ {', '.join(str(a.name) for a in all_args)} }};
565
+ const char* schema_str = "{func_schema_str}";
566
+ applySymbolicShapesOnLT(schema_str, inputs, shapes);
567
+ }}
568
+ """
569
+ return shape_str
570
+
571
+ def build_ir_node(self, func: NativeFunction, schema: LazyIrSchema) -> str:
572
+ node_ctor_input_str = node_ctor_inputs(schema)
573
+ return f"""torch::lazy::NodePtr node = torch::lazy::ReuseNode<{schema.node_name}>({node_ctor_input_str});
574
+ if (!node) {{
575
+ {self.shape_inference(func, schema)}
576
+ node = torch::lazy::MakeNode<{schema.node_name}>({node_ctor_input_str}, std::move(shapes));
577
+ CacheNode(node);
578
+ }}
579
+ """
580
+
581
+ def create_lazy_tensor(self, first_tensor_name: Optional[str] = None) -> str:
582
+ # xla uses an instance method for tensor creation, for the time being
583
+ if self.create_from_first_tensor:
584
+ # TODO(whc) remove this if XLA switches to using static method for creation
585
+ assert (
586
+ first_tensor_name is not None
587
+ ), "Requires first tensor to create lazy tensor"
588
+ return f"{first_tensor_name}.{self.create_tensor}"
589
+ return f"{self.backend_namespace}::{self.create_tensor}"
590
+
591
+ def return_aten_tensor(self, func: NativeFunction, schema: LazyIrSchema) -> str:
592
+ returns_length = len(schema.returns)
593
+ value_args = schema.filtered_args(values=True, scalars=False)
594
+ value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
595
+ first_tensor_name = value_types_names[0] if len(value_types_names) > 0 else None
596
+ bridge_str = f"""auto result = {self.create_aten_from_ltc_tensor}(
597
+ {self.create_lazy_tensor(first_tensor_name)}(std::move(node), *common_device));"""
598
+
599
+ if returns_length > 1:
600
+ assert (
601
+ len(value_types_names) > 0
602
+ ), "Code below assumes there is at least one tensor arg"
603
+ bridge_str = f"""std::vector<{self.lazy_tensor_ptr}> lazy_tensors;
604
+ for (int i = 0; i < {returns_length}; i++) {{
605
+ lazy_tensors.push_back({self.create_lazy_tensor(first_tensor_name)}({getValueT()}(node, i), *common_device));
606
+ }}
607
+ auto result = {self.tuple_aten_from_ltc_tensors}<{returns_length}>(lazy_tensors);"""
608
+
609
+ if schema.name.name.inplace or func.func.is_out_fn():
610
+ assert returns_length == 1, (
611
+ "We assumed there was no such case where an op is an in-place variant "
612
+ f"and has tuple outputs, but got tuple of len {returns_length}."
613
+ )
614
+ bridge_str = f"""lazy_{first_tensor_name}->SetInPlaceIrValue(node);
615
+ auto& result = {first_tensor_name};"""
616
+
617
+ bridge_str += """
618
+ return result;"""
619
+ return bridge_str
620
+
621
+ @method_with_native_function
622
+ def __call__(self, func: NativeFunction) -> List[str]:
623
+ sig = kernel_signature(func, self.backend_index)
624
+ metadata = self.backend_index.get_kernel(func)
625
+ assert metadata is not None
626
+ schema = LazyIrSchema(func.func, symint=metadata.supports_symint())
627
+ return [
628
+ f"""\
629
+ {sig.decl(name=f"{self.class_method_name}::{metadata.kernel}")} {{
630
+ {self.force_eager_fallback(func, schema, metadata, sig)}
631
+ {self.metrics(func, schema)}
632
+ {self.get_device(func, schema)}
633
+ {self.lazy_tensor_decls(func, schema)}
634
+ {self.build_ir_node(func, schema)}
635
+ {self.return_aten_tensor(func, schema)}
636
+ }}\n
637
+ """
638
+ ]
639
+
640
+
641
+ class ComputeShapeSignature:
642
+ """
643
+ Here we use the base name as the suffix of the signature to avoid generating for in-place variants.
644
+ """
645
+
646
+ def __init__(self, kernel_name: str, f: NativeFunction, *, symint: bool):
647
+ self.__schema = LazyIrSchema(f.func, symint=symint)
648
+ self.__dispatch_args = ", ".join(
649
+ [a.decl() for a in dispatcher.arguments(f.func, symint=symint)]
650
+ )
651
+ self.__call_args = ", ".join(
652
+ [f"{arg.name}" for arg in self.__schema.filtered_args(generator=True)]
653
+ )
654
+ self.__kernel_name = kernel_name
655
+
656
+ def __decl_suffix(self) -> str:
657
+ return f"{self.__kernel_name}({self.__dispatch_args})"
658
+
659
+ def __call_suffix(self) -> str:
660
+ return f"{self.__kernel_name}({self.__call_args})"
661
+
662
+ @property
663
+ def shape_decl(self) -> str:
664
+ return f"TORCH_API std::vector<torch::lazy::Shape> compute_shape_{self.__decl_suffix()}"
665
+
666
+ @property
667
+ def shape_call(self) -> str:
668
+ return f"torch::lazy::compute_shape_{self.__call_suffix()}"
669
+
670
+
671
+ @dataclass(frozen=True)
672
+ class GenLazyShapeInferenceDefinition:
673
+ backend_index: BackendIndex
674
+ tensor_class: str
675
+
676
+ @method_with_native_function
677
+ def __call__(self, f: NativeFunction) -> List[str]:
678
+ sig = kernel_signature(f, self.backend_index)
679
+ metadata = self.backend_index.get_kernel(f)
680
+ assert metadata is not None
681
+
682
+ # See Note [Generated LTC Shape Functions]
683
+ is_view_copy_op = "view_copy" in f.tags
684
+ is_structured = f.structured or f.structured_delegate is not None
685
+ if is_structured or is_view_copy_op:
686
+ return []
687
+ else:
688
+ shape_sig = ComputeShapeSignature(
689
+ metadata.kernel, f, symint=metadata.supports_symint()
690
+ )
691
+ return ["\n".join([f"{shape_sig.shape_decl};"])]
692
+
693
+
694
+ def generate_non_native_lazy_ir_nodes(
695
+ non_native: List[Dict[str, Any]], gen_lazy_ir: GenLazyIR
696
+ ) -> List[str]:
697
+ """Generate the non-native lazy IR node classes"""
698
+ nodes = []
699
+ for op in non_native:
700
+ # Set default properties for Non-Native IRs
701
+ properties = LazyIrProperties("ShapeCache", "CanBeReused", "LowerDeclOnly")
702
+ for p in op.get("properties", []):
703
+ setattr(properties, p, True)
704
+
705
+ # non-native is assumed to want symint bindings if you wrote symint
706
+ schema = LazyIrSchema(FunctionSchema.parse(op["func"]), properties, symint=True)
707
+ schema.opkind = op.get("opkind")
708
+ nodes.append(gen_lazy_ir.gen(schema)[0])
709
+
710
+ return nodes
wemm/lib/python3.10/site-packages/torchgen/dest/register_dispatch_key.py ADDED
@@ -0,0 +1,983 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import textwrap
3
+ from dataclasses import dataclass
4
+ from typing import List, Optional, Tuple, Union
5
+
6
+ from typing_extensions import Literal # Python 3.8+
7
+
8
+ import torchgen.api.cpp as cpp
9
+ import torchgen.api.meta as meta
10
+ import torchgen.api.structured as structured
11
+ from torchgen.api.translate import translate
12
+ from torchgen.api.types import (
13
+ BaseCType,
14
+ Binding,
15
+ ConstRefCType,
16
+ CppSignature,
17
+ CppSignatureGroup,
18
+ DispatcherSignature,
19
+ Expr,
20
+ kernel_signature,
21
+ MutRefCType,
22
+ NamedCType,
23
+ NativeSignature,
24
+ tensorT,
25
+ )
26
+
27
+ from torchgen.context import method_with_native_function, native_function_manager
28
+ from torchgen.model import (
29
+ Argument,
30
+ BackendIndex,
31
+ DeviceCheckType,
32
+ DispatchKey,
33
+ gets_generated_out_inplace_wrapper,
34
+ is_cuda_dispatch_key,
35
+ NativeFunction,
36
+ NativeFunctionsGroup,
37
+ SchemaKind,
38
+ TensorOptionsArguments,
39
+ )
40
+ from torchgen.selective_build.selector import SelectiveBuilder
41
+ from torchgen.utils import assert_never, mapMaybe, Target
42
+
43
+
44
+ def gen_registration_headers(
45
+ backend_index: BackendIndex,
46
+ per_operator_headers: bool,
47
+ rocm: bool,
48
+ ) -> List[str]:
49
+ if per_operator_headers:
50
+ headers = ["#include <ATen/ops/as_strided_native.h>"]
51
+ else:
52
+ headers = ["#include <ATen/NativeFunctions.h>"]
53
+
54
+ if backend_index.dispatch_key in (DispatchKey.CPU, DispatchKey.Meta):
55
+ headers.append("#include <ATen/EmptyTensor.h>")
56
+ elif backend_index.dispatch_key == DispatchKey.CUDA:
57
+ if rocm:
58
+ headers.append("#include <ATen/hip/EmptyTensor.h>")
59
+ else:
60
+ headers.append("#include <ATen/cuda/EmptyTensor.h>")
61
+ elif backend_index.dispatch_key == DispatchKey.MPS:
62
+ headers.append("#include <ATen/mps/EmptyTensor.h>")
63
+ elif per_operator_headers:
64
+ headers += [
65
+ "#include <ATen/ops/empty.h>",
66
+ "#include <ATen/ops/empty_strided.h>",
67
+ "#include <ATen/ops/_copy_from_and_resize.h>",
68
+ "#include <ATen/ops/_copy_from.h>",
69
+ ]
70
+ else:
71
+ headers.append("#include <ATen/Functions.h>")
72
+
73
+ return headers
74
+
75
+
76
+ def gen_empty_impl_names(
77
+ backend_index: BackendIndex,
78
+ ) -> Tuple[Optional[str], Optional[str]]:
79
+ empty_impl = None
80
+ empty_strided_impl = None
81
+
82
+ if backend_index.dispatch_key in (
83
+ DispatchKey.Meta,
84
+ DispatchKey.CPU,
85
+ DispatchKey.CUDA,
86
+ DispatchKey.MPS,
87
+ ):
88
+ dispatch = str(backend_index.dispatch_key).lower()
89
+ empty_impl = f"at::detail::empty_{dispatch}"
90
+ empty_strided_impl = f"at::detail::empty_strided_{dispatch}"
91
+ elif backend_index.dispatch_key in (
92
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
93
+ DispatchKey.QuantizedCPU,
94
+ DispatchKey.QuantizedCUDA,
95
+ ):
96
+ empty_impl = "at::empty"
97
+ empty_strided_impl = "at::empty_strided"
98
+
99
+ return empty_impl, empty_strided_impl
100
+
101
+
102
+ def gen_create_out_helper(backend_index: BackendIndex) -> List[str]:
103
+ if backend_index.dispatch_key == DispatchKey.Meta:
104
+ empty_options = "options.device(at::kMeta)"
105
+ else:
106
+ empty_options = "options"
107
+
108
+ empty_impl, empty_strided_impl = gen_empty_impl_names(backend_index)
109
+ if empty_impl is None:
110
+ return []
111
+
112
+ return [
113
+ f"""
114
+ Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{
115
+ if (strides.empty()) {{
116
+ return {empty_impl}(sizes, {empty_options});
117
+ }} else {{
118
+ return {empty_strided_impl}(sizes, strides, {empty_options});
119
+ }}
120
+ }}
121
+ """
122
+ ]
123
+
124
+
125
+ def gen_maybe_create_proxy_helper(backend_index: BackendIndex) -> List[str]:
126
+ _, empty_strided_impl = gen_empty_impl_names(backend_index)
127
+ return (
128
+ []
129
+ if empty_strided_impl is None
130
+ else [
131
+ f"""
132
+ c10::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{
133
+ if (out.strides() != strides) {{
134
+ return {empty_strided_impl}(sizes, strides, options);
135
+ }}
136
+ return c10::nullopt;
137
+ }}
138
+ """
139
+ ]
140
+ )
141
+
142
+
143
+ def gen_resize_out_helper(backend_index: BackendIndex) -> List[str]:
144
+ if backend_index.dispatch_key == DispatchKey.CompositeExplicitAutogradNonFunctional:
145
+ # The function isn't used by this key (since only functional ops have a kernel for this key),
146
+ # so we need to not include it to avoid a defined-but-not-used error.
147
+ return []
148
+ return [
149
+ """
150
+ void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
151
+ TORCH_CHECK(options.dtype() == out.dtype(),
152
+ "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
153
+ TORCH_CHECK(options.device() == out.device(),
154
+ "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
155
+ const bool resized = at::native::resize_output(out, sizes);
156
+ // Only restride if a resize occurred; otherwise we ignore the (advisory)
157
+ // strides from the meta function and directly use the output tensor's
158
+ // preexisting strides
159
+ if (resized) {
160
+ if (!strides.empty()) {
161
+ TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
162
+ // TODO: avoid the redispatch here
163
+ out.as_strided_(sizes, strides);
164
+ } else if (options.memory_format_opt().has_value()) {
165
+ out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
166
+ }
167
+ }
168
+ }
169
+ """
170
+ ]
171
+
172
+
173
+ def gen_check_inplace_helper(backend_index: BackendIndex) -> List[str]:
174
+ return [
175
+ """
176
+ void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
177
+ // These checks are needed on those operators that:
178
+ // 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
179
+ // 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
180
+ // For other operators (e.g. 'add'), 'TensorIterator' already checks
181
+ // these things separately.
182
+ TORCH_CHECK(options.dtype() == self.dtype(),
183
+ "Bad in-place call: ",
184
+ "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
185
+ TORCH_CHECK(options.device() == self.device(),
186
+ "Bad in-place call: ",
187
+ "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
188
+ TORCH_CHECK(sizes == self.sizes(),
189
+ "Bad in-place call: ",
190
+ "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
191
+ }
192
+ """
193
+ ]
194
+
195
+
196
+ def gen_registration_helpers(backend_index: BackendIndex) -> List[str]:
197
+ return [
198
+ *gen_create_out_helper(backend_index),
199
+ *gen_resize_out_helper(backend_index),
200
+ *gen_check_inplace_helper(backend_index),
201
+ *gen_maybe_create_proxy_helper(backend_index),
202
+ ]
203
+
204
+
205
+ # Generates Register{dispatch}.cpp (e.g., RegisterCPU.cpp).
206
+ #
207
+ # - The primary function of this file is to register all of the
208
+ # implementations for the given dispatch key to the dispatcher,
209
+ # so they are available for use in PyTorch. If dispatch is
210
+ # None, we generate schema (def) registrations and catchall
211
+ # registrations.
212
+ # - The secondary function of this file is to generate a wrapper
213
+ # around functions. In CPUType these wrappers do nothing
214
+ # (and should be removed), but in other cases they handle
215
+ # DeviceGuard. A small extra benefit of wrappers is they
216
+ # are not overloaded, so they can be used in the registration
217
+ # API without having to disambiguate which overload you want
218
+ # (as would be the case if you directly registered native::
219
+ # functions).
220
+ # - The tertiary function of this file is to generate *static*
221
+ # cpp API bindings which can be used to bypass dispatcher
222
+ # directly to kernels, but with user-friendly cpp-style API
223
+ @dataclass(frozen=True)
224
+ class RegisterDispatchKey:
225
+ backend_index: BackendIndex
226
+
227
+ target: Union[
228
+ Literal[Target.ANONYMOUS_DEFINITION],
229
+ Literal[Target.NAMESPACED_DEFINITION],
230
+ Literal[Target.NAMESPACED_DECLARATION],
231
+ Literal[Target.REGISTRATION],
232
+ ]
233
+
234
+ # Selector object to determine which operators to generate
235
+ # registration code for.
236
+ selector: SelectiveBuilder
237
+
238
+ # Whether or not we are actually code-genning for ROCm
239
+ rocm: bool
240
+
241
+ # Whether or not to generate symint registrations or not. External users
242
+ # of codegen who don't care about symints can set this to false to get
243
+ # non-SymInt codegen
244
+ symint: bool
245
+
246
+ # The class that all unstructured native functions live under. This is used to improve
247
+ # compiler error messages when a kernel writer adds a native function with the wrong signature.
248
+ # This is only used in unstructured kernels, since structured kernels already live in a class.
249
+ # Finally, this field is currently Optional because it is only used by external backends.
250
+ # It would be nice if we can add the same logic to in-tree kernels too, but that requires updating
251
+ # all of the existing kernel signatures scattered across aten/src/ATen/native.
252
+ class_method_name: Optional[str]
253
+
254
+ # Only set to true in lightweight dispatch. If lightweight dispatch is enabled we are registering
255
+ # operators into JIT op registry, thus we need to avoid generating code to register into the dispatcher.
256
+ skip_dispatcher_op_registration: bool
257
+
258
+ @staticmethod
259
+ def gen_device_check(
260
+ type: DeviceCheckType, args: List[Argument], method_name: str
261
+ ) -> str:
262
+ if type == DeviceCheckType.NoCheck:
263
+ return " // No device check\n"
264
+
265
+ device_check = "c10::optional<Device> common_device = nullopt;\n"
266
+ device_check += "(void)common_device; // Suppress unused variable warning\n"
267
+ for arg in args:
268
+ # Only tensor like arguments are eligible
269
+ if arg.type.is_tensor_like():
270
+ device_check += f"""
271
+ c10::impl::check_and_update_common_device(common_device, {arg.name}, "{method_name}", "{arg.name}");"""
272
+ return device_check
273
+
274
+ @method_with_native_function
275
+ def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
276
+ if isinstance(f, NativeFunctionsGroup):
277
+ g: NativeFunctionsGroup = f
278
+ # Note: We call gen_structured() if the operator is marked structured, regardless of the backend.
279
+ # gen_structured() has special logic to handle auto-generated kernels.
280
+ if g.structured:
281
+ return self.gen_structured(g)
282
+ else:
283
+ return list(
284
+ mapMaybe(lambda f: self.gen_unstructured(f, g), g.functions())
285
+ )
286
+ elif isinstance(f, NativeFunction):
287
+ r = self.gen_unstructured(f)
288
+ return [] if r is None else [r]
289
+ else:
290
+ assert_never(f)
291
+
292
+ def wrapper_kernel_sig(
293
+ self, f: NativeFunction
294
+ ) -> Union[NativeSignature, DispatcherSignature]:
295
+ # The prefix is just to ensure uniqueness. The Dispatcher API doesn't guarantee unique kernel names.
296
+ return DispatcherSignature.from_schema(
297
+ f.func,
298
+ prefix=f"wrapper_{self.backend_index.dispatch_key}_{f.func.name.overload_name}_",
299
+ symint=self.symint,
300
+ )
301
+
302
+ def gen_out_inplace_wrapper(
303
+ self, f: NativeFunction, g: Optional[NativeFunctionsGroup]
304
+ ) -> Optional[str]:
305
+ if g is None:
306
+ return None
307
+ k = f.func.kind()
308
+ if k is SchemaKind.inplace:
309
+ copy_op = "at::_copy_from"
310
+ elif k is SchemaKind.out:
311
+ copy_op = "at::_copy_from_and_resize"
312
+ else:
313
+ raise AssertionError("gen_out_inplace_wrapper called on a functional op")
314
+
315
+ sig = self.wrapper_kernel_sig(f)
316
+ name = sig.name()
317
+
318
+ func_res = f"{name}_tmp"
319
+ return_names = cpp.return_names(f)
320
+ if len(return_names) > 1:
321
+ updates = "\n ".join(
322
+ f"{copy_op}(std::get<{i}>({func_res}), {ret_name});"
323
+ for i, ret_name in enumerate(return_names)
324
+ )
325
+ returns = f'{sig.returns_type().cpp_type()}({", ".join(return_names)})'
326
+ else:
327
+ ret_name = return_names[0]
328
+ updates = f"{copy_op}({func_res}, {ret_name});"
329
+ returns = ret_name
330
+
331
+ functional_sig = self.wrapper_kernel_sig(g.functional)
332
+ wrapper_name = sig.name()
333
+
334
+ return f"""\
335
+ {sig.defn(name=wrapper_name)} {{
336
+ auto {func_res} = {functional_sig.name()}({", ".join(e.expr for e in translate(sig.arguments(), functional_sig.arguments()))});
337
+ {updates}
338
+ return {returns};
339
+ }}
340
+ """
341
+
342
+ def gen_structured(self, g: NativeFunctionsGroup) -> List[str]:
343
+ metadata = self.backend_index.get_kernel(g)
344
+ if self.backend_index.dispatch_key == DispatchKey.Meta:
345
+ assert not self.backend_index.has_kernel(g.out), (
346
+ "Do not explicitly specify Meta dispatch key on structured "
347
+ "functions, they will be automatically generated for you"
348
+ )
349
+ elif (
350
+ self.backend_index.dispatch_key
351
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
352
+ ):
353
+ assert not self.backend_index.has_kernel(g.out), (
354
+ "Do not explicitly specify CompositeExplicitAutograd dispatch key on structured "
355
+ "functions, they will be automatically generated for you"
356
+ )
357
+ elif metadata is None or not metadata.structured:
358
+ return list(mapMaybe(lambda f: self.gen_unstructured(f, g), g.functions()))
359
+ structured_gen = StructuredRegisterDispatchKey(
360
+ self.backend_index,
361
+ self.target,
362
+ self.selector,
363
+ self.rocm,
364
+ self.symint,
365
+ self.class_method_name,
366
+ self.skip_dispatcher_op_registration,
367
+ g,
368
+ )
369
+ return list(mapMaybe(structured_gen.gen_one, g.functions()))
370
+
371
+ def gen_unstructured(
372
+ self, f: NativeFunction, g: Optional[NativeFunctionsGroup] = None
373
+ ) -> Optional[str]:
374
+ with native_function_manager(f):
375
+ inplace_meta = False
376
+ gets_out_inplace_wrapper = False
377
+ if not self.backend_index.has_kernel(f):
378
+ if (
379
+ self.backend_index.dispatch_key == DispatchKey.Meta
380
+ and f.func.kind() is SchemaKind.inplace
381
+ and
382
+ # Defer to composites for meta implementation
383
+ not f.has_composite_kernel
384
+ and
385
+ # Inplace list operations are not supported
386
+ len(f.func.returns) == 1
387
+ ):
388
+ inplace_meta = True
389
+ elif (
390
+ not self.backend_index.use_out_as_primary
391
+ and g is not None
392
+ and gets_generated_out_inplace_wrapper(f, g, self.backend_index)
393
+ ):
394
+ # We want to generate inplace/out wrappers, that don't have a kernel for the backend.
395
+ gets_out_inplace_wrapper = True
396
+ else:
397
+ return None
398
+ if f.manual_kernel_registration:
399
+ return None
400
+
401
+ if (
402
+ self.target is Target.REGISTRATION
403
+ and not self.selector.is_native_function_selected(f)
404
+ ):
405
+ return None
406
+
407
+ sig = self.wrapper_kernel_sig(f)
408
+
409
+ name = sig.name()
410
+ returns_type = sig.returns_type().cpp_type()
411
+ args = sig.arguments()
412
+ args_str = ", ".join(a.defn() for a in args)
413
+
414
+ # See Note [Direct dispatch bindings]
415
+ cpp_sig_group = CppSignatureGroup.from_native_function(
416
+ f, method=False, fallback_binding=False
417
+ )
418
+
419
+ # TODO: dedupe this with the structured codegen
420
+ if self.target is Target.NAMESPACED_DECLARATION:
421
+ result = ""
422
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
423
+ result += f"TORCH_API {cpp_sig.decl()};\n"
424
+ return result
425
+ elif self.target is Target.NAMESPACED_DEFINITION:
426
+
427
+ def generate_defn(cpp_sig: CppSignature) -> str:
428
+ return f"""
429
+ {cpp_sig.defn()} {{
430
+ return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
431
+ }}
432
+ """
433
+
434
+ result = ""
435
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
436
+ result += generate_defn(cpp_sig)
437
+ return result
438
+
439
+ elif self.target is Target.ANONYMOUS_DEFINITION:
440
+ # short circuit for inplace_meta
441
+ if inplace_meta:
442
+ assert f.func.arguments.self_arg is not None
443
+ self_arg_name = f.func.arguments.self_arg.argument.name
444
+ # TODO: handle in place on tensor list
445
+ return f"""
446
+ {returns_type} {name}({args_str}) {{
447
+ TORCH_CHECK_NOT_IMPLEMENTED({self_arg_name}.is_meta(),
448
+ "Cannot inplace into non-meta tensor with meta tensor argument");
449
+ return {self_arg_name};
450
+ }}
451
+ """
452
+
453
+ # short circuit for generated inplace/out wrappers
454
+ if gets_out_inplace_wrapper:
455
+ return self.gen_out_inplace_wrapper(f, g)
456
+
457
+ metadata = self.backend_index.get_kernel(f)
458
+ if metadata is None:
459
+ return None
460
+ if self.class_method_name is None:
461
+ impl_name = f"{metadata.cpp_namespace}::{metadata.kernel}"
462
+ else:
463
+ impl_name = f"{metadata.cpp_namespace}::{self.class_method_name}::{metadata.kernel}"
464
+
465
+ kernel_sig = kernel_signature(f, self.backend_index)
466
+
467
+ args_exprs_str = ", ".join(
468
+ e.expr
469
+ for e in translate(
470
+ sig.arguments(), kernel_sig.arguments(), method=False
471
+ )
472
+ )
473
+
474
+ device_check = " // No device check\n"
475
+ # Backends that require device guards presumably also require device checks.
476
+ if self.backend_index.device_guard:
477
+ device_check_args = itertools.chain(
478
+ f.func.arguments.out, f.func.arguments.flat_positional
479
+ )
480
+ device_check = RegisterDispatchKey.gen_device_check(
481
+ f.device_check, list(device_check_args), name
482
+ )
483
+
484
+ device_guard = "// DeviceGuard omitted" # default
485
+ if f.device_guard and self.backend_index.device_guard:
486
+ has_tensor_options = any(
487
+ isinstance(a, TensorOptionsArguments)
488
+ for a in f.func.arguments.non_out
489
+ )
490
+ if has_tensor_options:
491
+ # kernel is creating a tensor
492
+ device_guard = """
493
+ const DeviceGuard device_guard(device_or_default(device));"""
494
+
495
+ # CUDA requires special handling
496
+ if is_cuda_dispatch_key(self.backend_index.dispatch_key):
497
+ device_guard = (
498
+ f"globalContext().lazyInitCUDA();\n{device_guard}"
499
+ )
500
+ else:
501
+ # kernel is operating on existing tensors
502
+
503
+ # There is precedence for which argument we use to do
504
+ # device guard. This describes the precedence order.
505
+ self_arg = (
506
+ [f.func.arguments.self_arg.argument]
507
+ if f.func.arguments.self_arg is not None
508
+ else []
509
+ )
510
+ candidate_args = itertools.chain(
511
+ self_arg,
512
+ f.func.arguments.out,
513
+ f.func.arguments.flat_positional,
514
+ )
515
+
516
+ # Only tensor like arguments are eligible
517
+ device_of = next(
518
+ (
519
+ f"{a.name}"
520
+ for a in candidate_args
521
+ if a.type.is_tensor_like()
522
+ ),
523
+ None,
524
+ )
525
+ if device_of is not None:
526
+ device_guard = f"const OptionalDeviceGuard device_guard(device_of({device_of}));"
527
+
528
+ return f"""\
529
+ namespace {{
530
+
531
+ {returns_type} {name}({args_str}) {{
532
+ {device_check}
533
+
534
+ {device_guard}
535
+ return {impl_name}({args_exprs_str});
536
+ }}
537
+
538
+ }} // anonymous namespace
539
+ """
540
+
541
+ elif self.target is Target.REGISTRATION:
542
+ if f.manual_kernel_registration or self.skip_dispatcher_op_registration:
543
+ return None
544
+ else:
545
+ payload = f"TORCH_FN({name})"
546
+ return f'm.impl("{f.func.name}",\n{payload});\n'
547
+ else:
548
+ assert_never(self.target)
549
+
550
+
551
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
552
+ #
553
+ # STRUCTURED
554
+ #
555
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
556
+
557
+
558
+ @dataclass(frozen=True)
559
+ class StructuredRegisterDispatchKey(RegisterDispatchKey):
560
+ g: NativeFunctionsGroup
561
+
562
+ def gen_class_set_output_functions(
563
+ self, k: SchemaKind, parent_class: str, generate_super: bool
564
+ ) -> str:
565
+ if generate_super:
566
+ set_output_super = f"{parent_class}::set_output_raw_strided(output_idx, sizes, strides, options, names);"
567
+ else:
568
+ set_output_super = ""
569
+
570
+ def gen_set_output_function(name: str, maybe_create_proxy: bool) -> str:
571
+ maybe_star = "*" if k is SchemaKind.functional else ""
572
+ return f"""
573
+ void set_output_{name}(
574
+ int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
575
+ TensorOptions options, DimnameList names
576
+ ) override {{
577
+ {textwrap.indent(self.gen_class_set_output_body(k, maybe_create_proxy), " ")}
578
+ if (!names.empty()) {{
579
+ namedinference::propagate_names({maybe_star}outputs_[output_idx], names);
580
+ }}
581
+ // super must happen after, so that downstream can use maybe_get_output
582
+ // to retrieve the output
583
+ {textwrap.indent(set_output_super, " ")}
584
+ }}
585
+ """
586
+
587
+ return f"""
588
+ {gen_set_output_function("strided", maybe_create_proxy=True)}
589
+ {gen_set_output_function("raw_strided", maybe_create_proxy=False)}
590
+ """
591
+
592
+ def gen_class_set_output_body(self, k: SchemaKind, maybe_create_proxy: bool) -> str:
593
+ if self.backend_index.dispatch_key in [
594
+ DispatchKey.CUDA,
595
+ DispatchKey.MPS,
596
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
597
+ ]:
598
+ maybe_set_guard = """
599
+ auto current_device = guard_.current_device();
600
+ if (C10_UNLIKELY(current_device.has_value())) {
601
+ TORCH_INTERNAL_ASSERT(*current_device == options.device(),
602
+ "structured kernels don't support multi-device outputs");
603
+ } else {
604
+ guard_.reset_device(options.device());
605
+ }
606
+ """
607
+ maybe_set_guard_line = maybe_set_guard + "\n"
608
+ else:
609
+ maybe_set_guard_line = maybe_set_guard = ""
610
+
611
+ if maybe_create_proxy:
612
+ create_proxy = """
613
+ auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
614
+ if (C10_UNLIKELY(maybe_proxy.has_value())) {
615
+ proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
616
+ }
617
+ """
618
+ else:
619
+ create_proxy = ""
620
+
621
+ if k is SchemaKind.functional:
622
+ assert self.backend_index.dispatch_key in (
623
+ DispatchKey.Meta,
624
+ DispatchKey.CPU,
625
+ DispatchKey.CUDA,
626
+ DispatchKey.MPS,
627
+ DispatchKey.CompositeExplicitAutogradNonFunctional,
628
+ )
629
+ return f"""{maybe_set_guard_line}
630
+ outputs_[output_idx] = create_out(sizes, strides, options);"""
631
+ elif k is SchemaKind.inplace:
632
+ return f"""{maybe_set_guard_line}
633
+ const auto& out = outputs_[output_idx].get();
634
+ check_inplace(out, sizes, options);
635
+ {create_proxy}"""
636
+ elif k is SchemaKind.out:
637
+ return f"""{maybe_set_guard_line}
638
+ const auto& out = outputs_[output_idx].get();
639
+ resize_out(out, sizes, strides, options);
640
+ {create_proxy}"""
641
+ elif k is SchemaKind.mutable or k is SchemaKind.scratch:
642
+ raise AssertionError(
643
+ f"{k} structured operators are currently not supported"
644
+ )
645
+ else:
646
+ assert_never(k)
647
+
648
+ # returns the definition of a ctor, as well as how to construct
649
+ # this class to a variable named op
650
+ def gen_class_ctor(self, k: SchemaKind, class_name: str, returns: int) -> str:
651
+ if k is SchemaKind.functional:
652
+ return ""
653
+ elif k is SchemaKind.inplace:
654
+ # TODO: Make sure out argument is guaranteed to be self
655
+ return f"{class_name}(Tensor& self) : outputs_{{std::ref(self)}} {{}}"
656
+ elif k is SchemaKind.out:
657
+ out_args = ", ".join(f"Tensor& out{i}" for i in range(returns))
658
+ out_refs = ", ".join(f"std::ref(out{i})" for i in range(returns))
659
+ return f"{class_name}({out_args}) : outputs_{{ {out_refs} }} {{}}"
660
+ elif k is SchemaKind.mutable or k is SchemaKind.scratch:
661
+ raise AssertionError(
662
+ f"{k} structured operators are currently not supported"
663
+ )
664
+ else:
665
+ assert_never(k)
666
+
667
+ def gen_class(
668
+ self,
669
+ f: NativeFunction,
670
+ k: SchemaKind,
671
+ *,
672
+ class_name: str,
673
+ parent_class: str,
674
+ generate_super: bool,
675
+ ) -> str:
676
+ if k is SchemaKind.functional:
677
+ output_type = "c10::ExclusivelyOwned<Tensor>"
678
+ output_value = "*outputs_[output_idx]"
679
+ proxy_field = ""
680
+ elif k is SchemaKind.inplace:
681
+ output_type = "std::reference_wrapper<Tensor>"
682
+ output_value = "proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get()"
683
+ proxy_field = f"std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, {len(f.func.returns)}> proxy_outputs_;"
684
+ elif k is SchemaKind.out:
685
+ output_type = "std::reference_wrapper<Tensor>"
686
+ output_value = "proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get()"
687
+ proxy_field = f"std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, {len(f.func.returns)}> proxy_outputs_;"
688
+
689
+ if self.backend_index.dispatch_key == DispatchKey.CUDA:
690
+ if self.rocm:
691
+ guard_field = "c10::hip::OptionalHIPGuardMasqueradingAsCUDA guard_;"
692
+ else:
693
+ guard_field = "c10::cuda::OptionalCUDAGuard guard_;"
694
+ elif (
695
+ self.backend_index.dispatch_key
696
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
697
+ ):
698
+ guard_field = "c10::OptionalDeviceGuard guard_;"
699
+ elif self.backend_index.dispatch_key == DispatchKey.MPS:
700
+ # TODO: Move to OptionalMPSGuard.
701
+ guard_field = "c10::OptionalDeviceGuard guard_;"
702
+ else:
703
+ guard_field = ""
704
+
705
+ indent = " " * 4
706
+ class_ctor_str = self.gen_class_ctor(k, class_name, len(f.func.returns))
707
+ lines = (
708
+ f"struct {class_name} final : public {parent_class} {{",
709
+ f"{textwrap.indent(class_ctor_str, indent)}",
710
+ f"{textwrap.indent(self.gen_class_set_output_functions(k, parent_class, generate_super), indent)}",
711
+ " const Tensor& maybe_get_output(int64_t output_idx) override {",
712
+ f" return {output_value};\n",
713
+ " }",
714
+ f" std::array<{output_type}, {len(f.func.returns)}> outputs_;",
715
+ f"{textwrap.indent(proxy_field, indent)}",
716
+ f"{textwrap.indent(guard_field, indent)}",
717
+ "};",
718
+ )
719
+ return "\n".join(line for line in lines if line)
720
+
721
+ @method_with_native_function
722
+ def gen_one(self, f: NativeFunction) -> Optional[str]:
723
+ assert not f.manual_kernel_registration
724
+
725
+ if (
726
+ self.target is Target.REGISTRATION
727
+ and not self.selector.is_native_function_selected(f)
728
+ ):
729
+ return None
730
+
731
+ # TODO: Now, there is something interesting going on here. In the code below,
732
+ # we generate CompositeExplicitAutogradNonFunctional implementations of functional and inplace
733
+ # based on the out implementation. But in fact, out is definable by
734
+ # functional too (just not very efficiently), and this is honestly the
735
+ # MORE likely situation for a backend implementor. How do we pick?
736
+ # Well, taking a page from Haskell type classes and default methods,
737
+ # we could conceivably register a circular definition (out in terms
738
+ # of functional, and functional in terms of out) and just require
739
+ # someone to implement one or the other. We'd have to do a little bit
740
+ # of work to not register one of these "weak" definitions unless there
741
+ # is a strong definition somewhere in the DAG! So it's not implemented yet.
742
+ if (
743
+ self.backend_index.dispatch_key
744
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
745
+ and f.func.kind() is SchemaKind.out
746
+ ):
747
+ # Never generate a default implementation for out, that's what you
748
+ # have to define as a backend implementor
749
+ return None
750
+
751
+ # Note [Direct dispatch bindings]
752
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
753
+ # Signature of the non-dispatched function we'll expose in a header
754
+ # (e.g., at::cpu::add). We don't generate methods (TODO: do this
755
+ # when CPUTensor class is a thing); nor do we generate fallback
756
+ # bindings for manual_cpp_binding functions.
757
+ cpp_sig_group = CppSignatureGroup.from_native_function(
758
+ f, method=False, fallback_binding=False
759
+ )
760
+
761
+ # Signature of the wrapper function we'll register to the dispatcher
762
+ kern = self.backend_index.get_kernel(f)
763
+ sig = NativeSignature(
764
+ f.func,
765
+ prefix=f"wrapper_{self.backend_index.dispatch_key}_",
766
+ symint=kern is not None and kern.supports_symint(),
767
+ )
768
+
769
+ if self.target is Target.NAMESPACED_DECLARATION:
770
+ result = ""
771
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
772
+ result += f"TORCH_API {cpp_sig.decl()};\n"
773
+ return result
774
+
775
+ elif self.target is Target.NAMESPACED_DEFINITION:
776
+
777
+ def generate_defn(cpp_sig: CppSignature) -> str:
778
+ return f"""
779
+ {cpp_sig.defn()} {{
780
+ return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
781
+ }}
782
+ """
783
+
784
+ result = ""
785
+ for cpp_sig in cpp_sig_group.signatures(symint=self.symint):
786
+ result += generate_defn(cpp_sig)
787
+ return result
788
+
789
+ elif self.target is Target.ANONYMOUS_DEFINITION:
790
+
791
+ k = f.func.kind()
792
+
793
+ # Construct the body of the wrapper function with signature sig
794
+ sig_body = []
795
+ # We'll use context to keep track of any variables we've brought
796
+ # into scope while generating code
797
+ context: List[Union[Binding, Expr]] = list(sig.arguments())
798
+
799
+ # Initialize the class corresponding to this structured
800
+ # operator; feeding it the output argument(s) if it is known
801
+ if self.backend_index.dispatch_key is DispatchKey.Meta:
802
+ class_name = f"structured_{meta.name(self.g)}_meta_{k.name}"
803
+ parent_class = f"at::meta::structured_{meta.name(self.g)}"
804
+ elif (
805
+ self.backend_index.dispatch_key
806
+ is DispatchKey.CompositeExplicitAutogradNonFunctional
807
+ ):
808
+ # TODO: dedup this branch
809
+ class_name = f"structured_{meta.name(self.g)}_default_backend_{k.name}"
810
+ parent_class = f"at::meta::structured_{meta.name(self.g)}"
811
+ else:
812
+ metadata = self.backend_index.get_kernel(self.g)
813
+ assert metadata is not None
814
+ class_name = f"structured_{metadata.kernel}_{k.name}"
815
+ parent_class = f"{metadata.cpp_namespace}::structured_{metadata.kernel}"
816
+
817
+ if self.backend_index.device_guard:
818
+ device_check_args = itertools.chain(
819
+ f.func.arguments.out, f.func.arguments.flat_positional
820
+ )
821
+ sig_body.append(
822
+ RegisterDispatchKey.gen_device_check(
823
+ f.device_check, list(device_check_args), sig.name()
824
+ )
825
+ )
826
+
827
+ if k is SchemaKind.functional:
828
+ sig_body.append(f"{class_name} op;")
829
+ elif k is SchemaKind.inplace:
830
+ sig_body.append(f"{class_name} op(self);")
831
+ elif k is SchemaKind.out:
832
+ out_args_str = ", ".join(a.name for a in f.func.arguments.out)
833
+ sig_body.append(f"{class_name} op({out_args_str});")
834
+
835
+ # Translate the input native arguments into structured
836
+ # arguments for the meta call
837
+ meta_exprs = ", ".join(
838
+ e.expr
839
+ for e in translate(
840
+ context, structured.meta_arguments(self.g), method=False
841
+ )
842
+ )
843
+
844
+ if self.g.out.precomputed:
845
+ # If this function group has precomputed elements, the meta function
846
+ # returns a struct containing them which must be saved so that it
847
+ # can be unpacked when generating code to call the impl.
848
+ sig_body.append(f"auto precompute = op.meta({meta_exprs});")
849
+
850
+ # Put all of the contents of the precompute struct into the context
851
+ # so that translate will be able to return the correct args for the
852
+ # call to the impl.
853
+ precomputed_values = [
854
+ *self.g.out.precomputed.replace.values(),
855
+ self.g.out.precomputed.add,
856
+ ]
857
+ for precomputed_elems in precomputed_values:
858
+ for arg in precomputed_elems:
859
+ context.append(
860
+ Expr(
861
+ expr=f"precompute.{arg.name}",
862
+ type=structured.argument_type(arg, binds=arg.name),
863
+ )
864
+ )
865
+
866
+ # Add a use of the precompute struct so FB internal compilers don't
867
+ # complain that there is an unused variable.
868
+ sig_body.append("(void)precompute;")
869
+ else:
870
+ sig_body.append(f"op.meta({meta_exprs});")
871
+
872
+ # After running meta, op.outputs_ is guaranteed to be valid;
873
+ # add it to the context
874
+ out_args = structured.out_arguments(self.g)
875
+ for i, out_arg in enumerate(out_args):
876
+ assert ConstRefCType(BaseCType(tensorT)) == out_arg.nctype.type
877
+
878
+ if k is SchemaKind.out:
879
+ expr = f"op.maybe_get_output({i})"
880
+ else:
881
+ maybe_star = "*" if k is SchemaKind.functional else ""
882
+ expr = f"{maybe_star}op.outputs_[{i}]"
883
+
884
+ context.append(
885
+ Expr(
886
+ expr=expr,
887
+ # TODO: Stop hardcoding that the output type is a Tensor. Note
888
+ # that for the codegen here this is fine because outputs_ is
889
+ # hardcoded to be tensor already
890
+ type=NamedCType(
891
+ out_arg.nctype.name, MutRefCType(BaseCType(tensorT))
892
+ ),
893
+ )
894
+ )
895
+
896
+ # With the expanded context, do the impl call (if not a meta
897
+ # function)
898
+ if (
899
+ self.backend_index.dispatch_key
900
+ == DispatchKey.CompositeExplicitAutogradNonFunctional
901
+ ):
902
+ # TODO: https://github.com/pytorch/pytorch/issues/53023
903
+ out_sig_group = CppSignatureGroup.from_native_function(
904
+ self.g.out, method=False, fallback_binding=f.manual_cpp_binding
905
+ )
906
+ out_sig = out_sig_group.most_faithful_signature()
907
+ api_name = out_sig.name()
908
+ out_exprs = ", ".join(
909
+ e.expr
910
+ for e in translate(context, out_sig.arguments(), method=False)
911
+ )
912
+ # TODO: I think this means structured won't work with method
913
+ # only functions (but maybe you're saved by faithful? iunno.)
914
+ # NB: Originally I wrote this as an at::redispatch call, but
915
+ # I got in trouble because that meant I needed a DispatchKeySet
916
+ # in the wrapper function, which meant I needed a DispatchKeySet
917
+ # in the DispatchKeyFunctions declarations, but the defined API
918
+ # there does NOT permit a dispatch key set. I think you can
919
+ # probably unwind this by calling some function to do the TLS
920
+ # fetch and get the DispatchKeySet when you don't have it, but
921
+ # I didn't do it for this version
922
+ sig_body.append(f"at::{api_name}({out_exprs});")
923
+ elif self.backend_index.dispatch_key != DispatchKey.Meta:
924
+ impl_exprs = ", ".join(
925
+ e.expr
926
+ for e in translate(
927
+ context, structured.impl_arguments(self.g), method=False
928
+ )
929
+ )
930
+ sig_body.append(f"op.impl({impl_exprs});")
931
+
932
+ # Go over each output, and check if there is a proxy created for it.
933
+ # If so, copy it over to the original output.
934
+ if k is SchemaKind.out or k is SchemaKind.inplace:
935
+ for i in range(len(f.func.returns)):
936
+ sig_body.append(
937
+ f"if (op.proxy_outputs_[{i}].has_value()) op.outputs_[{i}].get().copy_(**op.proxy_outputs_[{i}]);"
938
+ )
939
+
940
+ # Destructively return the final tensors
941
+ # TODO: Do this in translate instead
942
+ if k is SchemaKind.functional:
943
+ if len(f.func.returns) == 1:
944
+ ret_expr = "std::move(op.outputs_[0]).take()" # small optimization
945
+ else:
946
+ moved = ", ".join(
947
+ f"std::move(op.outputs_[{i}]).take()"
948
+ for i in range(len(f.func.returns))
949
+ )
950
+ ret_expr = f"std::make_tuple({moved})"
951
+ elif k is SchemaKind.inplace:
952
+ ret_expr = "self"
953
+ elif k is SchemaKind.out:
954
+ if len(f.func.returns) == 1:
955
+ ret_expr = f.func.arguments.out[0].name
956
+ else:
957
+ refs = ", ".join(a.name for a in f.func.arguments.out)
958
+ ret_expr = f"std::forward_as_tuple({refs})"
959
+ sig_body.append(f"return {ret_expr};")
960
+
961
+ sig_body_str = "\n".join(sig_body)
962
+
963
+ # For an overview of what this template code looks like, see
964
+ # https://github.com/pytorch/rfcs/pull/9
965
+ return f"""\
966
+ {self.gen_class(
967
+ f, k,
968
+ class_name=class_name,
969
+ parent_class=parent_class,
970
+ generate_super=self.g.out.structured_inherits is not None
971
+ )}
972
+
973
+ {sig.defn()} {{
974
+ {sig_body_str}
975
+ }}
976
+ """
977
+
978
+ elif self.target is Target.REGISTRATION:
979
+ return f'm.impl("{f.func.name}", TORCH_FN({sig.name()}));'
980
+ else:
981
+ assert_never(self.target)
982
+ # Silence mypy's "Missing return statement" error
983
+ return None
wemm/lib/python3.10/site-packages/torchgen/executorch/api/__pycache__/et_cpp.cpython-310.pyc ADDED
Binary file (7.43 kB). View file
 
wemm/lib/python3.10/site-packages/torchgen/model.py ADDED
The diff for this file is too large to render. See raw diff
 
wemm/lib/python3.10/site-packages/torchgen/operator_versions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/FunctionalInverses.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #include <ATen/Tensor.h>
6
+
7
+ namespace at {
8
+ namespace functionalization {
9
+
10
+ struct FunctionalInverses {
11
+
12
+ ${view_inverse_declarations}
13
+
14
+ };
15
+ }
16
+ }
wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // ${generated_comment}
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ // Forward declarations of any types needed in the operator signatures.
14
+ // We can't directly include these classes because it will cause circular include dependencies.
15
+ // This file is included by TensorBody.h, which defines the Tensor class.
16
+ #include <ATen/core/ATen_fwd.h>
17
+
18
+ ${MethodOperators_includes}
19
+
20
+ namespace at {
21
+ namespace _ops {
22
+ ${MethodOperators_declarations}
23
+ } // namespace _ops
24
+ } // namespace at
wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.cpp ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Tensor.h>
2
+ #include <ATen/core/dispatch/Dispatcher.h>
3
+
4
+ // ${generated_comment}
5
+ // NOTE See [Sharded File] comment in VariableType
6
+
7
+ #ifndef AT_PER_OPERATOR_HEADERS
8
+ #include <ATen/Operators.h>
9
+ #else
10
+ ${operator_headers}
11
+ #endif
12
+
13
+ ${static_dispatch_extra_headers}
14
+
15
+ namespace at { namespace _ops {
16
+
17
+ ${definitions}
18
+
19
+ }} // namespace at::_ops
wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterBackendSelect.cpp ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // We register ops with a higher priority dispatch key (BackendSelect) than the usual backend-specific keys (e.g. CPU)
2
+ // which makes calls to the factory functions dispatch to here.
3
+ // We then 'manually' compute a lower-priority to re-dispatch to (e.g. CPU) to get to the eventually correct backend.
4
+ // ${generated_comment}
5
+
6
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
7
+ #include <ATen/core/Tensor.h>
8
+ #include <ATen/core/dispatch/DispatchKeyExtractor.h>
9
+ #include <torch/library.h>
10
+
11
+ #ifndef AT_PER_OPERATOR_HEADERS
12
+ #include <ATen/Operators.h>
13
+ #else
14
+ #include <ATen/ops/is_pinned_ops.h>
15
+ #include <ATen/ops/_pin_memory_ops.h>
16
+
17
+ ${ops_headers}
18
+ #endif
19
+
20
+ namespace at {
21
+
22
+ namespace {
23
+
24
+ ${backend_select_method_definitions}
25
+
26
+ bool is_pinned(const Tensor& self, c10::optional<at::Device> device) {
27
+ // Only CPU tensors can be pinned
28
+ if (!self.is_cpu()) {
29
+ return false;
30
+ }
31
+ // TODO: fetch scalar type from Tensor? But it doesn't really matter...
32
+ DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA)));
33
+ return at::_ops::is_pinned::redispatch(_dk, self, device);
34
+ }
35
+
36
+ at::Tensor _pin_memory(const Tensor& self, c10::optional<at::Device> device) {
37
+ TORCH_CHECK(self.device().is_cpu(), "cannot pin '", self.toString(), "' only dense CPU tensors can be pinned");
38
+ DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA)));
39
+ return at::_ops::_pin_memory::redispatch(_dk, self, device);
40
+ }
41
+
42
+ TORCH_LIBRARY_IMPL(aten, BackendSelect, m) {
43
+ ${backend_select_function_registrations};
44
+ m.impl(TORCH_SELECTIVE_NAME("aten::is_pinned"), TORCH_FN(is_pinned));
45
+ m.impl(TORCH_SELECTIVE_NAME("aten::_pin_memory"), TORCH_FN(_pin_memory));
46
+ }
47
+
48
+ } // namespace
49
+ } // at
wemm/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UnboxingFunctions.cpp ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/UnboxingFunctions.h>
2
+ #include <ATen/Functions.h>
3
+
4
+ #include <ATen/Tensor.h>
5
+ #include <ATen/core/functional.h>
6
+ #include <ATen/core/interned_strings.h>
7
+ #include <ATen/core/ivalue.h>
8
+ #include <ATen/core/stack.h>
9
+
10
+ #include <algorithm>
11
+ #include <array>
12
+ #include <cstddef>
13
+ #include <cstring>
14
+ #include <sstream>
15
+ #include <stdexcept>
16
+ #include <tuple>
17
+ #include <unordered_map>
18
+ #include <unordered_set>
19
+ #include <utility>
20
+ #include <vector>
21
+ namespace at {
22
+ namespace unboxing {
23
+
24
+ using ::c10::fmap;
25
+ using ::c10::filter;
26
+ using torch::jit::peek;
27
+ using torch::jit::drop;
28
+ using torch::jit::pack;
29
+ using torch::jit::pop;
30
+
31
+ // Generated function declaration
32
+ ${definitions}
33
+
34
+ } // namespace unboxing
35
+ } // namespace at
wemm/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/config.cpython-310.pyc ADDED
Binary file (7.72 kB). View file
 
wemm/lib/python3.10/site-packages/torchgen/static_runtime/__pycache__/gen_static_runtime_ops.cpython-310.pyc ADDED
Binary file (7.33 kB). View file
 
wemm/lib/python3.10/site-packages/torchgen/static_runtime/config.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Union
2
+
3
+ from torchgen.model import NativeFunctionsGroup, NativeFunctionsViewGroup
4
+
5
+
6
+ def func_name_base_str(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> str:
7
+ if isinstance(g, NativeFunctionsGroup):
8
+ return str(g.functional.func.name.name.base)
9
+ else:
10
+ return str(g.view.root_name)
11
+
12
+
13
+ is_hand_written_ops_ = frozenset(
14
+ (
15
+ "abs",
16
+ "add",
17
+ "addmm",
18
+ "all",
19
+ "any",
20
+ "argmin",
21
+ "bmm",
22
+ "clamp",
23
+ "clamp_min",
24
+ "cumsum",
25
+ "div",
26
+ "fmod",
27
+ "index_select",
28
+ "leaky_relu",
29
+ "linear",
30
+ "log",
31
+ "matmul",
32
+ "mul",
33
+ "narrow_copy",
34
+ "nonzero",
35
+ "pow",
36
+ "remainder",
37
+ "sigmoid",
38
+ "sign",
39
+ "sub",
40
+ "tanh",
41
+ "detach",
42
+ "expand_as",
43
+ "flatten",
44
+ "narrow",
45
+ "reshape_as",
46
+ "select",
47
+ "slice",
48
+ "softmax",
49
+ "split",
50
+ "squeeze",
51
+ "transpose",
52
+ "view",
53
+ "where",
54
+ )
55
+ )
56
+
57
+
58
+ def is_hand_written(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
59
+ name_base = func_name_base_str(g)
60
+ return name_base in is_hand_written_ops_
61
+
62
+
63
+ def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> None:
64
+ assert index == 0 or index == 1
65
+ if op_name == "addr":
66
+ if index == 0:
67
+ arg_map["self"] = "at::rand({6, 6})"
68
+ arg_map["vec1"] = "at::rand({6})"
69
+ arg_map["vec2"] = "at::rand({6})"
70
+ else:
71
+ arg_map["self"] = "at::rand({22, 22})"
72
+ arg_map["vec1"] = "at::rand({22})"
73
+ arg_map["vec2"] = "at::rand({22})"
74
+ return
75
+ if op_name == "mv":
76
+ if index == 0:
77
+ arg_map["self"] = "at::rand({6, 6})"
78
+ arg_map["vec"] = "at::rand({6})"
79
+ else:
80
+ arg_map["self"] = "at::rand({22, 22})"
81
+ arg_map["vec"] = "at::rand({22})"
82
+ return
83
+ if op_name == "addbmm":
84
+ if index == 0:
85
+ arg_map["self"] = "at::rand({6, 6})"
86
+ else:
87
+ arg_map["self"] = "at::rand({22, 22})"
88
+ return
89
+ if op_name == "cross":
90
+ if index == 0:
91
+ arg_map["self"] = "at::rand({3, 3, 3})"
92
+ arg_map["other"] = "at::rand({3, 3, 3})"
93
+ else:
94
+ arg_map["self"] = "at::rand({22, 3, 22})"
95
+ arg_map["other"] = "at::rand({22, 3, 22})"
96
+ return
97
+ if op_name == "take":
98
+ if index == 0:
99
+ arg_map["index"] = "at::randint(0, 216, {20}, torch::kInt64)"
100
+ else:
101
+ arg_map["index"] = "at::randint(0, 1000, {100}, torch::kInt64)"
102
+ return
103
+ if op_name == "take_along_dim":
104
+ if index == 0:
105
+ arg_map["indices"] = "at::argsort(self0, 1, true)"
106
+ else:
107
+ arg_map["indices"] = "at::argsort(self1, 1, true)"
108
+ return
109
+ if op_name == "masked_select":
110
+ if index == 0:
111
+ arg_map["mask"] = "at::randn({6, 6, 6}) > 0.5"
112
+ else:
113
+ arg_map["mask"] = "at::rand({22, 22, 22}) > 0.5"
114
+ return
115
+ if op_name == "orgqr":
116
+ if index == 0:
117
+ arg_map["input2"] = "at::rand({6, 6})"
118
+ else:
119
+ arg_map["input2"] = "at::rand({22, 22})"
120
+ return
121
+ if op_name == "ormqr":
122
+ if index == 0:
123
+ arg_map["input2"] = "at::rand({6, 6})"
124
+ else:
125
+ arg_map["input2"] = "at::rand({22, 22})"
126
+ return
127
+ if op_name == "quantile":
128
+ if index == 0:
129
+ arg_map["q"] = "at::rand({6})"
130
+ arg_map["interpolation"] = '"linear"'
131
+ else:
132
+ arg_map["q"] = "at::rand({22})"
133
+ arg_map["interpolation"] = '"linear"'
134
+ return
135
+ if op_name == "nanquantile":
136
+ if index == 0:
137
+ arg_map["q"] = "at::rand({6})"
138
+ arg_map["interpolation"] = '"linear"'
139
+ else:
140
+ arg_map["q"] = "at::rand({22})"
141
+ arg_map["interpolation"] = '"linear"'
142
+ return
143
+ if op_name == "multi_margin_loss":
144
+ if index == 0:
145
+ arg_map["self"] = "at::rand({6, 6})"
146
+ arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
147
+ arg_map["weight"] = "at::rand({6})"
148
+ else:
149
+ arg_map["self"] = "at::rand({22, 22})"
150
+ arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
151
+ arg_map["weight"] = "at::rand({22})"
152
+ return
153
+ if op_name == "multilabel_margin_loss":
154
+ if index == 0:
155
+ arg_map["self"] = "at::rand({6, 6})"
156
+ arg_map["target"] = "at::randint(6, {6, 6}, torch::kInt64)"
157
+ else:
158
+ arg_map["self"] = "at::rand({22, 22})"
159
+ arg_map["target"] = "at::randint(22, {22, 22}, torch::kInt64)"
160
+ return
161
+ if op_name == "nll_loss":
162
+ if index == 0:
163
+ arg_map["self"] = "at::rand({6, 6})"
164
+ arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
165
+ arg_map["weight"] = "at::rand({6})"
166
+ else:
167
+ arg_map["self"] = "at::rand({22, 22})"
168
+ arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
169
+ arg_map["weight"] = "at::rand({22})"
170
+ return
171
+ if op_name == "nll_loss2d":
172
+ if index == 0:
173
+ arg_map["self"] = "at::rand({6, 6, 6, 6})"
174
+ arg_map["target"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
175
+ arg_map["weight"] = "at::rand({6})"
176
+ else:
177
+ arg_map["self"] = "at::rand({22, 22, 22, 22})"
178
+ arg_map["target"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
179
+ arg_map["weight"] = "at::rand({22})"
180
+ return
181
+ if op_name in (
182
+ "fft_fft",
183
+ "fft_ifft",
184
+ "fft_rfft",
185
+ "fft_irfft",
186
+ "fft_hfft",
187
+ "fft_ihfft",
188
+ ):
189
+ arg_map["norm"] = '"forward"'
190
+ return
191
+ if op_name == "linalg_tensorinv":
192
+ if index == 0:
193
+ arg_map["self"] = "at::rand({6, 6, 6, 6})"
194
+ arg_map["ind"] = "2"
195
+ else:
196
+ arg_map["self"] = "at::rand({22, 22, 22, 22})"
197
+ arg_map["ind"] = "2"
198
+ return
199
+ if op_name == "addmv":
200
+ if index == 0:
201
+ arg_map["self"] = "at::rand({2})"
202
+ arg_map["mat"] = "at::rand({2, 2})"
203
+ arg_map["vec"] = "at::rand({2})"
204
+ else:
205
+ arg_map["self"] = "at::rand({35})"
206
+ arg_map["mat"] = "at::rand({35, 35})"
207
+ arg_map["vec"] = "at::rand({35})"
208
+ return
209
+ if op_name == "acosh":
210
+ if index == 0:
211
+ arg_map["self"] = "at::rand({2, 2, 2}) + at::ones({2, 2, 2})"
212
+ else:
213
+ arg_map["self"] = "at::rand({5, 5, 5}) + at::ones({5, 5, 5})"
214
+ return
215
+ if op_name == "adaptive_max_pool2d_backward":
216
+ if index == 0:
217
+ arg_map["grad_output"] = "at::rand({2, 2, 2}, at::kFloat)"
218
+ arg_map["self"] = "at::rand({2, 2, 2}, at::kFloat)"
219
+ arg_map["indices"] = "at::randint(0, 1, {2, 2, 2}, at::kLong)"
220
+ else:
221
+ arg_map["grad_output"] = "at::rand({3, 3, 3}, at::kFloat)"
222
+ arg_map["self"] = "at::rand({3, 3, 3}, at::kFloat)"
223
+ arg_map["indices"] = "at::randint(0, 1, {3, 3, 3}, at::kLong)"
224
+ return
225
+ if op_name == "adaptive_max_pool3d_backward":
226
+ if index == 0:
227
+ arg_map["grad_output"] = "at::rand({2, 2, 2, 2}, at::kFloat)"
228
+ arg_map["self"] = "at::rand({2, 2, 2, 2}, at::kFloat)"
229
+ arg_map["indices"] = "at::randint(0, 1, {2, 2, 2, 2}, at::kLong)"
230
+ else:
231
+ arg_map["grad_output"] = "at::rand({3, 3, 3, 3}, at::kFloat)"
232
+ arg_map["self"] = "at::rand({3, 3, 3, 3}, at::kFloat)"
233
+ arg_map["indices"] = "at::randint(0, 1, {3, 3, 3, 3}, at::kLong)"
234
+ return
235
+ if op_name == "bitwise_left_shift":
236
+ if index == 0:
237
+ arg_map["self"] = "at::randint(1, 1 << 4, {6, 6, 6}, at::kInt)"
238
+ arg_map["other"] = "at::randint(1, 26, {6, 6, 6}, at::kInt)"
239
+ else:
240
+ arg_map["self"] = "at::randint(1, 1 << 4, {22, 22, 22}, at::kInt)"
241
+ arg_map["other"] = "at::randint(1, 26, {22, 22, 22}, at::kInt)"
242
+ return
243
+ if op_name == "bitwise_right_shift":
244
+ if index == 0:
245
+ arg_map["self"] = "at::randint(1 << 21, 1 << 30, {6, 6, 6}, at::kInt)"
246
+ arg_map["other"] = "at::randint(1, 22, {6, 6, 6}, at::kInt)"
247
+ else:
248
+ arg_map["self"] = "at::randint(1 << 21, 1 << 30, {22, 22, 22}, at::kInt)"
249
+ arg_map["other"] = "at::randint(1, 22, {22, 22, 22}, at::kInt)"
250
+ return
251
+ if op_name == "gather":
252
+ if index == 0:
253
+ arg_map["self"] = "at::randint(1, 100, {2,2,2}, at::kInt)"
254
+ arg_map["dim"] = "1"
255
+ arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
256
+ arg_map["sparse_grad"] = "false"
257
+ else:
258
+ arg_map["self"] = "at::randint(1, 100, {5,5,5}, at::kInt)"
259
+ arg_map["dim"] = "1"
260
+ arg_map["index"] = "at::randint(0, 4, {5,5,5}, torch::kInt64)"
261
+ arg_map["sparse_grad"] = "false"
262
+ return
263
+ if op_name == "gelu":
264
+ if index == 0:
265
+ arg_map["self"] = "at::rand({6, 6, 6})"
266
+ arg_map["approximate"] = '"tanh"'
267
+ else:
268
+ arg_map["self"] = "at::rand({22, 22, 22})"
269
+ arg_map["approximate"] = '"tanh"'
270
+ return
271
+ if op_name == "gelu_backward":
272
+ if index == 0:
273
+ arg_map["grad_output"] = "at::rand({6, 6, 6})"
274
+ arg_map["self"] = "at::rand({6, 6, 6})"
275
+ arg_map["approximate"] = '"tanh"'
276
+ else:
277
+ arg_map["grad_output"] = "at::rand({22, 22, 22})"
278
+ arg_map["self"] = "at::rand({22, 22, 22})"
279
+ arg_map["approximate"] = '"tanh"'
280
+ return
281
+ if op_name == "index_add":
282
+ if index == 0:
283
+ arg_map["self"] = "at::rand({2})"
284
+ arg_map["dim"] = "0"
285
+ arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)"
286
+ arg_map["source"] = "at::rand({2})"
287
+ arg_map["alpha"] = "2"
288
+ else:
289
+ arg_map["self"] = "at::rand({16})"
290
+ arg_map["dim"] = "0"
291
+ arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)"
292
+ arg_map["source"] = "at::rand({16})"
293
+ arg_map["alpha"] = "2"
294
+ return
295
+ if op_name == "index_copy":
296
+ if index == 0:
297
+ arg_map["self"] = "at::rand({2})"
298
+ arg_map["dim"] = "0"
299
+ arg_map["index"] = "at::randint(0, 1, {2}, at::kLong)"
300
+ arg_map["source"] = "at::rand({2})"
301
+ else:
302
+ arg_map["self"] = "at::rand({32})"
303
+ arg_map["dim"] = "0"
304
+ arg_map["index"] = "at::randint(0, 10, {32}, at::kLong)"
305
+ arg_map["source"] = "at::rand({32})"
306
+ return
307
+ if op_name == "linalg_cross":
308
+ if index == 0:
309
+ arg_map["self"] = "at::rand({6, 3, 6})"
310
+ arg_map["other"] = "at::rand({6, 3, 6})"
311
+ arg_map["dim"] = "1"
312
+ else:
313
+ arg_map["self"] = "at::rand({22, 3, 22})"
314
+ arg_map["other"] = "at::rand({22, 3, 22})"
315
+ arg_map["dim"] = "1"
316
+ return
317
+ if op_name == "nll_loss_backward":
318
+ if index == 0:
319
+ arg_map["grad_output"] = "at::rand({})"
320
+ arg_map["self"] = "at::rand({6})"
321
+ arg_map["target"] = "at::randint(0, 5, {6}, torch::kInt64)"
322
+ arg_map["weight"] = "at::rand({6})"
323
+ arg_map["reduction"] = "1"
324
+ arg_map["ignore_index"] = "1"
325
+ arg_map["total_weight"] = "at::rand({})"
326
+ else:
327
+ arg_map["grad_output"] = "at::rand({})"
328
+ arg_map["self"] = "at::rand({36})"
329
+ arg_map["target"] = "at::randint(0, 11, {36}, torch::kInt64)"
330
+ arg_map["weight"] = "at::rand({36})"
331
+ arg_map["reduction"] = "1"
332
+ arg_map["ignore_index"] = "1"
333
+ arg_map["total_weight"] = "at::rand({})"
334
+ return
335
+ if op_name in ["scatter", "scatter_add", "_scatter_reduce"]:
336
+ if index == 0:
337
+ arg_map["self"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
338
+ arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
339
+ arg_map["src"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
340
+ else:
341
+ arg_map["self"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
342
+ arg_map["index"] = "at::randint(0, 1, {5,5,5}, torch::kInt64)"
343
+ arg_map["src"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
344
+ if "reduce" in arg_map:
345
+ arg_map["reduce"] = '"sum"' if op_name == "_scatter_reduce" else '"add"'
346
+ return
347
+ if op_name == "scatter_reduce":
348
+ arg_map["reduce"] = '"mean"'
349
+ if index == 0:
350
+ arg_map["index"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
351
+ else:
352
+ arg_map["index"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
353
+ return
354
+ if op_name == "special_zeta":
355
+ if index == 0:
356
+ arg_map["self"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
357
+ arg_map["other"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
358
+ else:
359
+ arg_map["self"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
360
+ arg_map["other"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
361
+ return
362
+ if op_name == "_convert_indices_from_csr_to_coo":
363
+ if index == 0:
364
+ arg_map["crow_indices"] = "torch::tensor({1}, torch::kInt32)"
365
+ arg_map["col_indices"] = "torch::tensor({0, 1, 0}, torch::kInt32)"
366
+ arg_map["out_int32"] = "false"
367
+ else:
368
+ arg_map["crow_indices"] = "torch::tensor({0}, torch::kInt32)"
369
+ arg_map[
370
+ "col_indices"
371
+ ] = "torch::tensor({0, 1, 0, 2, 1, 2, 0, 1, 0, 2, 1, 2}, torch::kInt32)"
372
+ arg_map["out_int32"] = "false"
373
+ return
374
+ if op_name == "_convert_indices_from_coo_to_csr":
375
+ if index == 0:
376
+ arg_map["self"] = "at::randint(0, 3, {2}, at::kInt)"
377
+ arg_map["size"] = "10"
378
+ arg_map["out_int32"] = "false"
379
+ else:
380
+ arg_map["self"] = "at::randint(0, 3, {12}, at::kInt)"
381
+ arg_map["size"] = "24"
382
+ arg_map["out_int32"] = "false"
383
+ return
384
+ if op_name in ("diagonal", "linalg_diagonal"):
385
+ arg_map["offset"] = "0"
386
+ arg_map["dim0"] = "1"
387
+ arg_map["dim1"] = "2"
388
+ return
wemm/lib/python3.10/site-packages/triton/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.3 kB). View file