ZTWHHH commited on
Commit
0fc3db1
·
verified ·
1 Parent(s): f1afe86

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava/lib/python3.10/site-packages/pip/_vendor/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-310.pyc +0 -0
  4. llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-310.pyc +0 -0
  5. llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-310.pyc +0 -0
  6. llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-310.pyc +0 -0
  7. llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-310.pyc +0 -0
  8. llava/lib/python3.10/site-packages/pip/_vendor/distro/__init__.py +54 -0
  9. llava/lib/python3.10/site-packages/pip/_vendor/distro/__main__.py +4 -0
  10. minigpt2/lib/python3.10/site-packages/Crypto/SelfTest/PublicKey/__pycache__/test_ECC_Ed25519.cpython-310.pyc +0 -0
  11. minigpt2/lib/python3.10/site-packages/Crypto/SelfTest/PublicKey/__pycache__/test_import_ECC.cpython-310.pyc +0 -0
  12. minigpt2/lib/python3.10/site-packages/Crypto/SelfTest/PublicKey/test_RSA.py +324 -0
  13. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search.h +30 -0
  14. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_native.h +24 -0
  15. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size_native.h +21 -0
  16. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_native.h +23 -0
  17. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_native.h +25 -0
  18. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h +23 -0
  19. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h +28 -0
  20. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_has_compatible_shallow_copy_type.h +30 -0
  21. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_has_same_storage_numel_ops.h +28 -0
  22. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token.h +34 -0
  23. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_native.h +23 -0
  24. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_and_nested_example_native.h +22 -0
  25. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_ops.h +39 -0
  26. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged.h +30 -0
  27. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_native.h +21 -0
  28. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_compositeexplicitautograd_dispatch.h +24 -0
  29. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_native.h +24 -0
  30. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_ops.h +50 -0
  31. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_cpu_dispatch.h +24 -0
  32. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_cpu_dispatch.h +25 -0
  33. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_tensors.h +30 -0
  34. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cos_native.h +24 -0
  35. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_ops.h +39 -0
  36. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/float_power_ops.h +105 -0
  37. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward.h +39 -0
  38. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gather_meta.h +27 -0
  39. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_cpu_dispatch.h +25 -0
  40. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj.h +30 -0
  41. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_ops.h +28 -0
  42. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cuda_dispatch.h +23 -0
  43. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_cuda_dispatch.h +24 -0
  44. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h +39 -0
  45. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal.h +53 -0
  46. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  47. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_rank_ops.h +105 -0
  48. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_ops.h +39 -0
  49. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp.h +53 -0
  50. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h +30 -0
llava/lib/python3.10/site-packages/pip/_vendor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.04 kB). View file
 
llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (741 Bytes). View file
 
llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-310.pyc ADDED
Binary file (4.39 kB). View file
 
llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-310.pyc ADDED
Binary file (3.25 kB). View file
 
llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-310.pyc ADDED
Binary file (3.19 kB). View file
 
llava/lib/python3.10/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
llava/lib/python3.10/site-packages/pip/_vendor/distro/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .distro import (
2
+ NORMALIZED_DISTRO_ID,
3
+ NORMALIZED_LSB_ID,
4
+ NORMALIZED_OS_ID,
5
+ LinuxDistribution,
6
+ __version__,
7
+ build_number,
8
+ codename,
9
+ distro_release_attr,
10
+ distro_release_info,
11
+ id,
12
+ info,
13
+ like,
14
+ linux_distribution,
15
+ lsb_release_attr,
16
+ lsb_release_info,
17
+ major_version,
18
+ minor_version,
19
+ name,
20
+ os_release_attr,
21
+ os_release_info,
22
+ uname_attr,
23
+ uname_info,
24
+ version,
25
+ version_parts,
26
+ )
27
+
28
+ __all__ = [
29
+ "NORMALIZED_DISTRO_ID",
30
+ "NORMALIZED_LSB_ID",
31
+ "NORMALIZED_OS_ID",
32
+ "LinuxDistribution",
33
+ "build_number",
34
+ "codename",
35
+ "distro_release_attr",
36
+ "distro_release_info",
37
+ "id",
38
+ "info",
39
+ "like",
40
+ "linux_distribution",
41
+ "lsb_release_attr",
42
+ "lsb_release_info",
43
+ "major_version",
44
+ "minor_version",
45
+ "name",
46
+ "os_release_attr",
47
+ "os_release_info",
48
+ "uname_attr",
49
+ "uname_info",
50
+ "version",
51
+ "version_parts",
52
+ ]
53
+
54
+ __version__ = __version__
llava/lib/python3.10/site-packages/pip/_vendor/distro/__main__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .distro import main
2
+
3
+ if __name__ == "__main__":
4
+ main()
minigpt2/lib/python3.10/site-packages/Crypto/SelfTest/PublicKey/__pycache__/test_ECC_Ed25519.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
minigpt2/lib/python3.10/site-packages/Crypto/SelfTest/PublicKey/__pycache__/test_import_ECC.cpython-310.pyc ADDED
Binary file (54.1 kB). View file
 
minigpt2/lib/python3.10/site-packages/Crypto/SelfTest/PublicKey/test_RSA.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # SelfTest/PublicKey/test_RSA.py: Self-test for the RSA primitive
4
+ #
5
+ # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
6
+ #
7
+ # ===================================================================
8
+ # The contents of this file are dedicated to the public domain. To
9
+ # the extent that dedication to the public domain is not available,
10
+ # everyone is granted a worldwide, perpetual, royalty-free,
11
+ # non-exclusive license to exercise all rights associated with the
12
+ # contents of this file for any purpose whatsoever.
13
+ # No rights are reserved.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19
+ # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20
+ # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21
+ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
+ # SOFTWARE.
23
+ # ===================================================================
24
+
25
+ """Self-test suite for Crypto.PublicKey.RSA"""
26
+
27
+ __revision__ = "$Id$"
28
+
29
+ import os
30
+ import pickle
31
+ from pickle import PicklingError
32
+ from Crypto.Util.py3compat import *
33
+
34
+ import unittest
35
+ from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
36
+
37
+ class RSATest(unittest.TestCase):
38
+ # Test vectors from "RSA-OAEP and RSA-PSS test vectors (.zip file)"
39
+ # ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
40
+ # See RSADSI's PKCS#1 page at
41
+ # http://www.rsa.com/rsalabs/node.asp?id=2125
42
+
43
+ # from oaep-int.txt
44
+
45
+ # TODO: PyCrypto treats the message as starting *after* the leading "00"
46
+ # TODO: That behaviour should probably be changed in the future.
47
+ plaintext = """
48
+ eb 7a 19 ac e9 e3 00 63 50 e3 29 50 4b 45 e2
49
+ ca 82 31 0b 26 dc d8 7d 5c 68 f1 ee a8 f5 52 67
50
+ c3 1b 2e 8b b4 25 1f 84 d7 e0 b2 c0 46 26 f5 af
51
+ f9 3e dc fb 25 c9 c2 b3 ff 8a e1 0e 83 9a 2d db
52
+ 4c dc fe 4f f4 77 28 b4 a1 b7 c1 36 2b aa d2 9a
53
+ b4 8d 28 69 d5 02 41 21 43 58 11 59 1b e3 92 f9
54
+ 82 fb 3e 87 d0 95 ae b4 04 48 db 97 2f 3a c1 4f
55
+ 7b c2 75 19 52 81 ce 32 d2 f1 b7 6d 4d 35 3e 2d
56
+ """
57
+
58
+ ciphertext = """
59
+ 12 53 e0 4d c0 a5 39 7b b4 4a 7a b8 7e 9b f2 a0
60
+ 39 a3 3d 1e 99 6f c8 2a 94 cc d3 00 74 c9 5d f7
61
+ 63 72 20 17 06 9e 52 68 da 5d 1c 0b 4f 87 2c f6
62
+ 53 c1 1d f8 23 14 a6 79 68 df ea e2 8d ef 04 bb
63
+ 6d 84 b1 c3 1d 65 4a 19 70 e5 78 3b d6 eb 96 a0
64
+ 24 c2 ca 2f 4a 90 fe 9f 2e f5 c9 c1 40 e5 bb 48
65
+ da 95 36 ad 87 00 c8 4f c9 13 0a de a7 4e 55 8d
66
+ 51 a7 4d df 85 d8 b5 0d e9 68 38 d6 06 3e 09 55
67
+ """
68
+
69
+ modulus = """
70
+ bb f8 2f 09 06 82 ce 9c 23 38 ac 2b 9d a8 71 f7
71
+ 36 8d 07 ee d4 10 43 a4 40 d6 b6 f0 74 54 f5 1f
72
+ b8 df ba af 03 5c 02 ab 61 ea 48 ce eb 6f cd 48
73
+ 76 ed 52 0d 60 e1 ec 46 19 71 9d 8a 5b 8b 80 7f
74
+ af b8 e0 a3 df c7 37 72 3e e6 b4 b7 d9 3a 25 84
75
+ ee 6a 64 9d 06 09 53 74 88 34 b2 45 45 98 39 4e
76
+ e0 aa b1 2d 7b 61 a5 1f 52 7a 9a 41 f6 c1 68 7f
77
+ e2 53 72 98 ca 2a 8f 59 46 f8 e5 fd 09 1d bd cb
78
+ """
79
+
80
+ e = 0x11 # public exponent
81
+
82
+ prime_factor = """
83
+ c9 7f b1 f0 27 f4 53 f6 34 12 33 ea aa d1 d9 35
84
+ 3f 6c 42 d0 88 66 b1 d0 5a 0f 20 35 02 8b 9d 86
85
+ 98 40 b4 16 66 b4 2e 92 ea 0d a3 b4 32 04 b5 cf
86
+ ce 33 52 52 4d 04 16 a5 a4 41 e7 00 af 46 15 03
87
+ """
88
+
89
+ def setUp(self):
90
+ global RSA, Random, bytes_to_long
91
+ from Crypto.PublicKey import RSA
92
+ from Crypto import Random
93
+ from Crypto.Util.number import bytes_to_long, inverse
94
+ self.n = bytes_to_long(a2b_hex(self.modulus))
95
+ self.p = bytes_to_long(a2b_hex(self.prime_factor))
96
+
97
+ # Compute q, d, and u from n, e, and p
98
+ self.q = self.n // self.p
99
+ self.d = inverse(self.e, (self.p-1)*(self.q-1))
100
+ self.u = inverse(self.p, self.q) # u = e**-1 (mod q)
101
+
102
+ self.rsa = RSA
103
+
104
+ def test_generate_1arg(self):
105
+ """RSA (default implementation) generated key (1 argument)"""
106
+ rsaObj = self.rsa.generate(1024)
107
+ self._check_private_key(rsaObj)
108
+ self._exercise_primitive(rsaObj)
109
+ pub = rsaObj.public_key()
110
+ self._check_public_key(pub)
111
+ self._exercise_public_primitive(rsaObj)
112
+
113
+ def test_generate_2arg(self):
114
+ """RSA (default implementation) generated key (2 arguments)"""
115
+ rsaObj = self.rsa.generate(1024, Random.new().read)
116
+ self._check_private_key(rsaObj)
117
+ self._exercise_primitive(rsaObj)
118
+ pub = rsaObj.public_key()
119
+ self._check_public_key(pub)
120
+ self._exercise_public_primitive(rsaObj)
121
+
122
+ def test_generate_3args(self):
123
+ rsaObj = self.rsa.generate(1024, Random.new().read,e=65537)
124
+ self._check_private_key(rsaObj)
125
+ self._exercise_primitive(rsaObj)
126
+ pub = rsaObj.public_key()
127
+ self._check_public_key(pub)
128
+ self._exercise_public_primitive(rsaObj)
129
+ self.assertEqual(65537,rsaObj.e)
130
+
131
+ def test_construct_2tuple(self):
132
+ """RSA (default implementation) constructed key (2-tuple)"""
133
+ pub = self.rsa.construct((self.n, self.e))
134
+ self._check_public_key(pub)
135
+ self._check_encryption(pub)
136
+
137
+ def test_construct_3tuple(self):
138
+ """RSA (default implementation) constructed key (3-tuple)"""
139
+ rsaObj = self.rsa.construct((self.n, self.e, self.d))
140
+ self._check_encryption(rsaObj)
141
+ self._check_decryption(rsaObj)
142
+
143
+ def test_construct_4tuple(self):
144
+ """RSA (default implementation) constructed key (4-tuple)"""
145
+ rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p))
146
+ self._check_encryption(rsaObj)
147
+ self._check_decryption(rsaObj)
148
+
149
+ def test_construct_5tuple(self):
150
+ """RSA (default implementation) constructed key (5-tuple)"""
151
+ rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q))
152
+ self._check_private_key(rsaObj)
153
+ self._check_encryption(rsaObj)
154
+ self._check_decryption(rsaObj)
155
+
156
+ def test_construct_6tuple(self):
157
+ """RSA (default implementation) constructed key (6-tuple)"""
158
+ rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q, self.u))
159
+ self._check_private_key(rsaObj)
160
+ self._check_encryption(rsaObj)
161
+ self._check_decryption(rsaObj)
162
+
163
+ def test_construct_bad_key2(self):
164
+ tup = (self.n, 1)
165
+ self.assertRaises(ValueError, self.rsa.construct, tup)
166
+
167
+ # An even modulus is wrong
168
+ tup = (self.n+1, self.e)
169
+ self.assertRaises(ValueError, self.rsa.construct, tup)
170
+
171
+ def test_construct_bad_key3(self):
172
+ tup = (self.n, self.e, self.d+1)
173
+ self.assertRaises(ValueError, self.rsa.construct, tup)
174
+
175
+ def test_construct_bad_key5(self):
176
+ tup = (self.n, self.e, self.d, self.p, self.p)
177
+ self.assertRaises(ValueError, self.rsa.construct, tup)
178
+
179
+ tup = (self.p*self.p, self.e, self.p, self.p)
180
+ self.assertRaises(ValueError, self.rsa.construct, tup)
181
+
182
+ tup = (self.p*self.p, 3, self.p, self.q)
183
+ self.assertRaises(ValueError, self.rsa.construct, tup)
184
+
185
+ def test_construct_bad_key6(self):
186
+ tup = (self.n, self.e, self.d, self.p, self.q, 10)
187
+ self.assertRaises(ValueError, self.rsa.construct, tup)
188
+
189
+ from Crypto.Util.number import inverse
190
+ tup = (self.n, self.e, self.d, self.p, self.q, inverse(self.q, self.p))
191
+ self.assertRaises(ValueError, self.rsa.construct, tup)
192
+
193
+ def test_factoring(self):
194
+ rsaObj = self.rsa.construct([self.n, self.e, self.d])
195
+ self.assertTrue(rsaObj.p==self.p or rsaObj.p==self.q)
196
+ self.assertTrue(rsaObj.q==self.p or rsaObj.q==self.q)
197
+ self.assertTrue(rsaObj.q*rsaObj.p == self.n)
198
+
199
+ self.assertRaises(ValueError, self.rsa.construct, [self.n, self.e, self.n-1])
200
+
201
+ def test_repr(self):
202
+ rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q))
203
+ repr(rsaObj)
204
+
205
+ def test_serialization(self):
206
+ """RSA keys are unpickable"""
207
+
208
+ rsa_key = self.rsa.generate(1024)
209
+ self.assertRaises(PicklingError, pickle.dumps, rsa_key)
210
+
211
+ def test_raw_rsa_boundary(self):
212
+ # The argument of every RSA raw operation (encrypt/decrypt) must be
213
+ # non-negative and no larger than the modulus
214
+ rsa_obj = self.rsa.generate(1024)
215
+
216
+ self.assertRaises(ValueError, rsa_obj._decrypt, rsa_obj.n)
217
+ self.assertRaises(ValueError, rsa_obj._decrypt_to_bytes, rsa_obj.n)
218
+ self.assertRaises(ValueError, rsa_obj._encrypt, rsa_obj.n)
219
+
220
+ self.assertRaises(ValueError, rsa_obj._decrypt, -1)
221
+ self.assertRaises(ValueError, rsa_obj._decrypt_to_bytes, -1)
222
+ self.assertRaises(ValueError, rsa_obj._encrypt, -1)
223
+
224
+ def test_size(self):
225
+ pub = self.rsa.construct((self.n, self.e))
226
+ self.assertEqual(pub.size_in_bits(), 1024)
227
+ self.assertEqual(pub.size_in_bytes(), 128)
228
+
229
+ def _check_private_key(self, rsaObj):
230
+ from Crypto.Math.Numbers import Integer
231
+
232
+ # Check capabilities
233
+ self.assertEqual(1, rsaObj.has_private())
234
+
235
+ # Sanity check key data
236
+ self.assertEqual(rsaObj.n, rsaObj.p * rsaObj.q) # n = pq
237
+ lcm = int(Integer(rsaObj.p-1).lcm(rsaObj.q-1))
238
+ self.assertEqual(1, rsaObj.d * rsaObj.e % lcm) # ed = 1 (mod LCM(p-1, q-1))
239
+ self.assertEqual(1, rsaObj.p * rsaObj.u % rsaObj.q) # pu = 1 (mod q)
240
+ self.assertEqual(1, rsaObj.p > 1) # p > 1
241
+ self.assertEqual(1, rsaObj.q > 1) # q > 1
242
+ self.assertEqual(1, rsaObj.e > 1) # e > 1
243
+ self.assertEqual(1, rsaObj.d > 1) # d > 1
244
+
245
+ self.assertEqual(rsaObj.u, rsaObj.invp)
246
+ self.assertEqual(1, rsaObj.q * rsaObj.invq % rsaObj.p)
247
+
248
+ def _check_public_key(self, rsaObj):
249
+ ciphertext = a2b_hex(self.ciphertext)
250
+
251
+ # Check capabilities
252
+ self.assertEqual(0, rsaObj.has_private())
253
+
254
+ # Check rsaObj.[ne] -> rsaObj.[ne] mapping
255
+ self.assertEqual(rsaObj.n, rsaObj.n)
256
+ self.assertEqual(rsaObj.e, rsaObj.e)
257
+
258
+ # Check that private parameters are all missing
259
+ self.assertEqual(0, hasattr(rsaObj, 'd'))
260
+ self.assertEqual(0, hasattr(rsaObj, 'p'))
261
+ self.assertEqual(0, hasattr(rsaObj, 'q'))
262
+ self.assertEqual(0, hasattr(rsaObj, 'u'))
263
+
264
+ # Sanity check key data
265
+ self.assertEqual(1, rsaObj.e > 1) # e > 1
266
+
267
+ # Public keys should not be able to sign or decrypt
268
+ self.assertRaises(TypeError, rsaObj._decrypt,
269
+ bytes_to_long(ciphertext))
270
+ self.assertRaises(TypeError, rsaObj._decrypt_to_bytes,
271
+ bytes_to_long(ciphertext))
272
+
273
+ # Check __eq__ and __ne__
274
+ self.assertEqual(rsaObj.public_key() == rsaObj.public_key(),True) # assert_
275
+ self.assertEqual(rsaObj.public_key() != rsaObj.public_key(),False) # assertFalse
276
+
277
+ self.assertEqual(rsaObj.publickey(), rsaObj.public_key())
278
+
279
+ def _exercise_primitive(self, rsaObj):
280
+ # Since we're using a randomly-generated key, we can't check the test
281
+ # vector, but we can make sure encryption and decryption are inverse
282
+ # operations.
283
+ ciphertext = bytes_to_long(a2b_hex(self.ciphertext))
284
+
285
+ # Test decryption
286
+ plaintext = rsaObj._decrypt(ciphertext)
287
+
288
+ # Test encryption (2 arguments)
289
+ new_ciphertext2 = rsaObj._encrypt(plaintext)
290
+ self.assertEqual(ciphertext, new_ciphertext2)
291
+
292
+ def _exercise_public_primitive(self, rsaObj):
293
+ plaintext = a2b_hex(self.plaintext)
294
+
295
+ # Test encryption (2 arguments)
296
+ new_ciphertext2 = rsaObj._encrypt(bytes_to_long(plaintext))
297
+
298
+ def _check_encryption(self, rsaObj):
299
+ plaintext = a2b_hex(self.plaintext)
300
+ ciphertext = a2b_hex(self.ciphertext)
301
+
302
+ # Test encryption
303
+ new_ciphertext2 = rsaObj._encrypt(bytes_to_long(plaintext))
304
+ self.assertEqual(bytes_to_long(ciphertext), new_ciphertext2)
305
+
306
+ def _check_decryption(self, rsaObj):
307
+ plaintext = bytes_to_long(a2b_hex(self.plaintext))
308
+ ciphertext = bytes_to_long(a2b_hex(self.ciphertext))
309
+
310
+ # Test plain decryption
311
+ new_plaintext = rsaObj._decrypt(ciphertext)
312
+ self.assertEqual(plaintext, new_plaintext)
313
+
314
+
315
+ def get_tests(config={}):
316
+ tests = []
317
+ tests += list_test_cases(RSATest)
318
+ return tests
319
+
320
+ if __name__ == '__main__':
321
+ suite = lambda: unittest.TestSuite(get_tests())
322
+ unittest.main(defaultTest='suite')
323
+
324
+ # vim:set ts=4 sw=4 sts=4 expandtab:
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_cslt_sparse_mm_search_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int
26
+ inline int64_t _cslt_sparse_mm_search(const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional<at::Tensor> & bias={}, const ::std::optional<at::Tensor> & alpha={}, ::std::optional<at::ScalarType> out_dtype=::std::nullopt, bool transpose_result=false) {
27
+ return at::_ops::_cslt_sparse_mm_search::call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _ctc_loss_backward_out(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out);
20
+ TORCH_API at::Tensor ctc_loss_backward_cpu(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false);
21
+ TORCH_API at::Tensor ctc_loss_backward_gpu(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false);
22
+ TORCH_API at::Tensor ctc_loss_backward_tensor(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false);
23
+ } // namespace native
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API void _cufft_set_plan_cache_max_size(at::DeviceIndex device_index, int64_t max_size);
20
+ } // namespace native
21
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _dirichlet_grad_out(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out);
20
+ TORCH_API at::Tensor _dirichlet_grad_cpu(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total);
21
+ TORCH_API at::Tensor _dirichlet_grad_cuda(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total);
22
+ } // namespace native
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_native.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_sign_slow(at::TensorList self);
20
+ TORCH_API void _foreach_sign_out(at::TensorList self, at::TensorList out);
21
+ TORCH_API void foreach_tensor_sign_slow_(at::TensorList self);
22
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_sign_cuda(at::TensorList self);
23
+ TORCH_API void foreach_tensor_sign_cuda_(at::TensorList self);
24
+ } // namespace native
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor _functional_sym_constrain_range(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token);
21
+
22
+ } // namespace compositeexplicitautograd
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _grid_sampler_2d_cpu_fallback_backward {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_grid_sampler_2d_cpu_fallback_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)")
24
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
25
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
26
+ };
27
+
28
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_has_compatible_shallow_copy_type.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_has_compatible_shallow_copy_type_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
26
+ inline bool _has_compatible_shallow_copy_type(const at::Tensor & self, const at::Tensor & from) {
27
+ return at::_ops::_has_compatible_shallow_copy_type::call(self, from);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_has_same_storage_numel_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _has_same_storage_numel {
18
+ using schema = bool (const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_has_same_storage_numel")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_has_same_storage_numel(Tensor self, Tensor other) -> bool")
24
+ static bool call(const at::Tensor & self, const at::Tensor & other);
25
+ static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
26
+ };
27
+
28
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_make_dep_token_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
26
+ inline at::Tensor _make_dep_token(at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
27
+ return at::_ops::_make_dep_token::call(c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
28
+ }
29
+ // aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
30
+ inline at::Tensor _make_dep_token(::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
31
+ return at::_ops::_make_dep_token::call(dtype, layout, device, pin_memory, memory_format);
32
+ }
33
+
34
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _mkldnn_transpose_out(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out);
20
+ TORCH_API at::Tensor mkldnn_transpose(const at::Tensor & self, int64_t dim0, int64_t dim1);
21
+ TORCH_API at::Tensor & mkldnn_transpose_(at::Tensor & self, int64_t dim0, int64_t dim1);
22
+ } // namespace native
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_and_nested_example_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _nested_from_padded_and_nested_example_out(const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out);
20
+ TORCH_API at::Tensor NestedTensor_from_padded_and_nested_example(const at::Tensor & padded, const at::Tensor & nt_example);
21
+ } // namespace native
22
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _nested_tensor_from_tensor_list {
18
+ using schema = at::Tensor (at::TensorList, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_from_tensor_list")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
24
+ static at::Tensor call(at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ };
27
+
28
+ struct TORCH_API _nested_tensor_from_tensor_list_out {
29
+ using schema = at::Tensor & (at::TensorList, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_from_tensor_list")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_nested_view_from_jagged_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor(a)
26
+ inline at::Tensor _nested_view_from_jagged(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths={}, int64_t ragged_idx=1) {
27
+ return at::_ops::_nested_view_from_jagged::call(self, offsets, dummy, lengths, ragged_idx);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_cpu_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask={}, ::std::optional<double> scale=::std::nullopt);
20
+ } // namespace native
21
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_outf(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size);
20
+ TORCH_API at::Tensor & adaptive_avg_pool3d_out_cpu(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out);
21
+ TORCH_API at::Tensor & adaptive_avg_pool3d_out_cuda(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out);
22
+ TORCH_API at::Tensor & adaptive_avg_pool3d_out_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out);
23
+ } // namespace native
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API addmm_out {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addmm")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
26
+ };
27
+
28
+ struct TORCH_API addmm {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addmm")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha);
37
+ };
38
+
39
+ struct TORCH_API addmm_ {
40
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addmm_")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)")
46
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha);
48
+ };
49
+
50
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_cpu_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<int64_t> storage_offset=::std::nullopt);
21
+ TORCH_API at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset=::std::nullopt);
22
+
23
+ } // namespace cpu
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override);
21
+ TORCH_API at::Tensor & avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override);
22
+ TORCH_API at::Tensor & avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_tensors.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/broadcast_tensors_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
26
+ inline ::std::vector<at::Tensor> broadcast_tensors(at::TensorList tensors) {
27
+ return at::_ops::broadcast_tensors::call(tensors);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cos_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/cos_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_cos_out : public at::meta::structured_cos {
20
+ void impl(const at::Tensor & self, const at::Tensor & out);
21
+ };
22
+ TORCH_API at::Tensor cos_nested(const at::Tensor & self);
23
+ } // namespace native
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API cudnn_batch_norm_backward {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, double, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_batch_norm_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)")
24
+ static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace);
25
+ static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace);
26
+ };
27
+
28
+ struct TORCH_API cudnn_batch_norm_backward_out {
29
+ using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, double, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_batch_norm_backward")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
35
+ static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
36
+ static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/float_power_ops.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API float_power_Tensor_Tensor_out {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor_out")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out);
26
+ };
27
+
28
+ struct TORCH_API float_power_Tensor_Tensor {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & exponent);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent);
37
+ };
38
+
39
+ struct TORCH_API float_power_Scalar_out {
40
+ using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out);
48
+ };
49
+
50
+ struct TORCH_API float_power_Scalar {
51
+ using schema = at::Tensor (const at::Scalar &, const at::Tensor &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Scalar(Scalar self, Tensor exponent) -> Tensor")
57
+ static at::Tensor call(const at::Scalar & self, const at::Tensor & exponent);
58
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent);
59
+ };
60
+
61
+ struct TORCH_API float_power_Tensor_Scalar_out {
62
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)")
68
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out);
69
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out);
70
+ };
71
+
72
+ struct TORCH_API float_power_Tensor_Scalar {
73
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor")
79
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & exponent);
80
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent);
81
+ };
82
+
83
+ struct TORCH_API float_power__Scalar {
84
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
85
+ using ptr_schema = schema*;
86
+ // See Note [static constexpr char* members for windows NVCC]
87
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power_")
88
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
89
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)")
90
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & exponent);
91
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent);
92
+ };
93
+
94
+ struct TORCH_API float_power__Tensor {
95
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
96
+ using ptr_schema = schema*;
97
+ // See Note [static constexpr char* members for windows NVCC]
98
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power_")
99
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
100
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)")
101
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & exponent);
102
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent);
103
+ };
104
+
105
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/fractional_max_pool3d_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
26
+ inline at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
27
+ return at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input);
28
+ }
29
+ // aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
30
+ inline at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
31
+ return at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input);
32
+ }
33
+
34
+ // aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
35
+ inline at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
36
+ return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices);
37
+ }
38
+
39
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gather_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_gather : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5);
21
+ TORCH_API at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5);
22
+ TORCH_API at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/is_conj_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::is_conj(Tensor self) -> bool
26
+ inline bool __dispatch_is_conj(const at::Tensor & self) {
27
+ return at::_ops::is_conj::call(self);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API is_set_to {
18
+ using schema = bool (const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_set_to")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_set_to(Tensor self, Tensor tensor) -> bool")
24
+ static bool call(const at::Tensor & self, const at::Tensor & tensor);
25
+ static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor);
26
+ };
27
+
28
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor isnan(const at::Tensor & self);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_cuda_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices);
22
+
23
+ } // namespace cuda
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API leaky_relu_backward_grad_input {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::leaky_relu_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input);
26
+ };
27
+
28
+ struct TORCH_API leaky_relu_backward {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::leaky_relu_backward")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/less_equal_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
26
+ inline at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
27
+ return at::_ops::less_equal_Scalar_out::call(self, other, out);
28
+ }
29
+ // aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
30
+ inline at::Tensor & less_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
31
+ return at::_ops::less_equal_Scalar_out::call(self, other, out);
32
+ }
33
+
34
+ // aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
35
+ inline at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other) {
36
+ return at::_ops::less_equal_Scalar::call(self, other);
37
+ }
38
+
39
+ // aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
40
+ inline at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
41
+ return at::_ops::less_equal_Tensor_out::call(self, other, out);
42
+ }
43
+ // aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
44
+ inline at::Tensor & less_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
45
+ return at::_ops::less_equal_Tensor_out::call(self, other, out);
46
+ }
47
+
48
+ // aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
49
+ inline at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other) {
50
+ return at::_ops::less_equal_Tensor::call(self, other);
51
+ }
52
+
53
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_rank_ops.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API linalg_matrix_rank_atol_rtol_tensor {
18
+ using schema = at::Tensor (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_rank")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "atol_rtol_tensor")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian);
26
+ };
27
+
28
+ struct TORCH_API linalg_matrix_rank_atol_rtol_tensor_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, bool, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_rank")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "atol_rtol_tensor_out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out);
37
+ };
38
+
39
+ struct TORCH_API linalg_matrix_rank_atol_rtol_float {
40
+ using schema = at::Tensor (const at::Tensor &, ::std::optional<double>, ::std::optional<double>, bool);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_rank")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "atol_rtol_float")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor")
46
+ static at::Tensor call(const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian);
47
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian);
48
+ };
49
+
50
+ struct TORCH_API linalg_matrix_rank_atol_rtol_float_out {
51
+ using schema = at::Tensor & (const at::Tensor &, ::std::optional<double>, ::std::optional<double>, bool, at::Tensor &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_rank")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "atol_rtol_float_out")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)")
57
+ static at::Tensor & call(const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian, at::Tensor & out);
58
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian, at::Tensor & out);
59
+ };
60
+
61
+ struct TORCH_API linalg_matrix_rank {
62
+ using schema = at::Tensor (const at::Tensor &, double, bool);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_rank")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor")
68
+ static at::Tensor call(const at::Tensor & self, double tol, bool hermitian);
69
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian);
70
+ };
71
+
72
+ struct TORCH_API linalg_matrix_rank_out {
73
+ using schema = at::Tensor & (const at::Tensor &, double, bool, at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_rank")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)")
79
+ static at::Tensor & call(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian, at::Tensor & out);
81
+ };
82
+
83
+ struct TORCH_API linalg_matrix_rank_tol_tensor {
84
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool);
85
+ using ptr_schema = schema*;
86
+ // See Note [static constexpr char* members for windows NVCC]
87
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_rank")
88
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tol_tensor")
89
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor")
90
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & tol, bool hermitian);
91
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian);
92
+ };
93
+
94
+ struct TORCH_API linalg_matrix_rank_out_tol_tensor {
95
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &);
96
+ using ptr_schema = schema*;
97
+ // See Note [static constexpr char* members for windows NVCC]
98
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_rank")
99
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out_tol_tensor")
100
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)")
101
+ static at::Tensor & call(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out);
102
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out);
103
+ };
104
+
105
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API linalg_solve {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_solve")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & A, const at::Tensor & B, bool left);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left);
26
+ };
27
+
28
+ struct TORCH_API linalg_solve_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_solve")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/logsumexp_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
26
+ inline at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
27
+ return at::_ops::logsumexp::call(self, dim, keepdim);
28
+ }
29
+
30
+ // aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
32
+ return at::_ops::logsumexp_out::call(self, dim, keepdim, out);
33
+ }
34
+ // aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
36
+ return at::_ops::logsumexp_out::call(self, dim, keepdim, out);
37
+ }
38
+
39
+ // aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
40
+ inline at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false) {
41
+ return at::_ops::logsumexp_names::call(self, dim, keepdim);
42
+ }
43
+
44
+ // aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
45
+ inline at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false) {
46
+ return at::_ops::logsumexp_names_out::call(self, dim, keepdim, out);
47
+ }
48
+ // aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
49
+ inline at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
50
+ return at::_ops::logsumexp_names_out::call(self, dim, keepdim, out);
51
+ }
52
+
53
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other);
21
+ TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other);
22
+ TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
23
+ TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other);
24
+ TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other);
25
+ TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
26
+ TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
27
+ TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other);
28
+
29
+ } // namespace cuda
30
+ } // namespace at