ZTWHHH commited on
Commit
ccf2005
·
verified ·
1 Parent(s): dd82874

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/functools/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__init__.py +624 -0
  3. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/layouts.cpython-310.pyc +0 -0
  5. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/show-newlines.cpython-310.pyc +0 -0
  6. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/strip-prefix.cpython-310.pyc +0 -0
  7. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/to-dvorak.cpython-310.pyc +0 -0
  8. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/to-qwerty.cpython-310.pyc +0 -0
  9. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/layouts.py +25 -0
  10. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/show-newlines.py +33 -0
  11. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/strip-prefix.py +21 -0
  12. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/to-dvorak.py +6 -0
  13. llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/to-qwerty.py +6 -0
  14. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h +24 -0
  15. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async_native.h +24 -0
  16. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_meta_dispatch.h +23 -0
  17. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h +35 -0
  18. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_cuda_dispatch.h +23 -0
  19. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h +25 -0
  20. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_cpu_dispatch.h +25 -0
  21. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_native.h +21 -0
  22. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h +28 -0
  23. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention.h +30 -0
  24. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_bsc_tensor_args.h +30 -0
  25. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h +23 -0
  26. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/acos_meta.h +27 -0
  27. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h +27 -0
  28. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/angle.h +39 -0
  29. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/any_native.h +34 -0
  30. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h +26 -0
  31. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_meta.h +27 -0
  32. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_cpu_dispatch.h +23 -0
  33. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h +24 -0
  34. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_native.h +22 -0
  35. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h +24 -0
  36. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid.h +35 -0
  37. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/det_ops.h +28 -0
  38. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_native.h +22 -0
  39. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal.h +35 -0
  40. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/equal_cpu_dispatch.h +23 -0
  41. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_meta.h +27 -0
  42. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h +21 -0
  43. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h +23 -0
  44. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/frac_meta.h +27 -0
  45. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h +44 -0
  46. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  47. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  48. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_ops.h +39 -0
  49. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h +25 -0
  50. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_ops.h +39 -0
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/functools/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (19.2 kB). View file
 
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__init__.py ADDED
@@ -0,0 +1,624 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import itertools
3
+ import textwrap
4
+ import functools
5
+
6
+ try:
7
+ from importlib.resources import files # type: ignore
8
+ except ImportError: # pragma: nocover
9
+ from importlib_resources import files # type: ignore
10
+
11
+ from jaraco.functools import compose, method_cache
12
+ from jaraco.context import ExceptionTrap
13
+
14
+
15
+ def substitution(old, new):
16
+ """
17
+ Return a function that will perform a substitution on a string
18
+ """
19
+ return lambda s: s.replace(old, new)
20
+
21
+
22
+ def multi_substitution(*substitutions):
23
+ """
24
+ Take a sequence of pairs specifying substitutions, and create
25
+ a function that performs those substitutions.
26
+
27
+ >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
28
+ 'baz'
29
+ """
30
+ substitutions = itertools.starmap(substitution, substitutions)
31
+ # compose function applies last function first, so reverse the
32
+ # substitutions to get the expected order.
33
+ substitutions = reversed(tuple(substitutions))
34
+ return compose(*substitutions)
35
+
36
+
37
+ class FoldedCase(str):
38
+ """
39
+ A case insensitive string class; behaves just like str
40
+ except compares equal when the only variation is case.
41
+
42
+ >>> s = FoldedCase('hello world')
43
+
44
+ >>> s == 'Hello World'
45
+ True
46
+
47
+ >>> 'Hello World' == s
48
+ True
49
+
50
+ >>> s != 'Hello World'
51
+ False
52
+
53
+ >>> s.index('O')
54
+ 4
55
+
56
+ >>> s.split('O')
57
+ ['hell', ' w', 'rld']
58
+
59
+ >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
60
+ ['alpha', 'Beta', 'GAMMA']
61
+
62
+ Sequence membership is straightforward.
63
+
64
+ >>> "Hello World" in [s]
65
+ True
66
+ >>> s in ["Hello World"]
67
+ True
68
+
69
+ Allows testing for set inclusion, but candidate and elements
70
+ must both be folded.
71
+
72
+ >>> FoldedCase("Hello World") in {s}
73
+ True
74
+ >>> s in {FoldedCase("Hello World")}
75
+ True
76
+
77
+ String inclusion works as long as the FoldedCase object
78
+ is on the right.
79
+
80
+ >>> "hello" in FoldedCase("Hello World")
81
+ True
82
+
83
+ But not if the FoldedCase object is on the left:
84
+
85
+ >>> FoldedCase('hello') in 'Hello World'
86
+ False
87
+
88
+ In that case, use ``in_``:
89
+
90
+ >>> FoldedCase('hello').in_('Hello World')
91
+ True
92
+
93
+ >>> FoldedCase('hello') > FoldedCase('Hello')
94
+ False
95
+
96
+ >>> FoldedCase('ß') == FoldedCase('ss')
97
+ True
98
+ """
99
+
100
+ def __lt__(self, other):
101
+ return self.casefold() < other.casefold()
102
+
103
+ def __gt__(self, other):
104
+ return self.casefold() > other.casefold()
105
+
106
+ def __eq__(self, other):
107
+ return self.casefold() == other.casefold()
108
+
109
+ def __ne__(self, other):
110
+ return self.casefold() != other.casefold()
111
+
112
+ def __hash__(self):
113
+ return hash(self.casefold())
114
+
115
+ def __contains__(self, other):
116
+ return super().casefold().__contains__(other.casefold())
117
+
118
+ def in_(self, other):
119
+ "Does self appear in other?"
120
+ return self in FoldedCase(other)
121
+
122
+ # cache casefold since it's likely to be called frequently.
123
+ @method_cache
124
+ def casefold(self):
125
+ return super().casefold()
126
+
127
+ def index(self, sub):
128
+ return self.casefold().index(sub.casefold())
129
+
130
+ def split(self, splitter=' ', maxsplit=0):
131
+ pattern = re.compile(re.escape(splitter), re.I)
132
+ return pattern.split(self, maxsplit)
133
+
134
+
135
+ # Python 3.8 compatibility
136
+ _unicode_trap = ExceptionTrap(UnicodeDecodeError)
137
+
138
+
139
+ @_unicode_trap.passes
140
+ def is_decodable(value):
141
+ r"""
142
+ Return True if the supplied value is decodable (using the default
143
+ encoding).
144
+
145
+ >>> is_decodable(b'\xff')
146
+ False
147
+ >>> is_decodable(b'\x32')
148
+ True
149
+ """
150
+ value.decode()
151
+
152
+
153
+ def is_binary(value):
154
+ r"""
155
+ Return True if the value appears to be binary (that is, it's a byte
156
+ string and isn't decodable).
157
+
158
+ >>> is_binary(b'\xff')
159
+ True
160
+ >>> is_binary('\xff')
161
+ False
162
+ """
163
+ return isinstance(value, bytes) and not is_decodable(value)
164
+
165
+
166
+ def trim(s):
167
+ r"""
168
+ Trim something like a docstring to remove the whitespace that
169
+ is common due to indentation and formatting.
170
+
171
+ >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
172
+ 'foo = bar\n\tbar = baz'
173
+ """
174
+ return textwrap.dedent(s).strip()
175
+
176
+
177
+ def wrap(s):
178
+ """
179
+ Wrap lines of text, retaining existing newlines as
180
+ paragraph markers.
181
+
182
+ >>> print(wrap(lorem_ipsum))
183
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
184
+ eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
185
+ minim veniam, quis nostrud exercitation ullamco laboris nisi ut
186
+ aliquip ex ea commodo consequat. Duis aute irure dolor in
187
+ reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
188
+ pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
189
+ culpa qui officia deserunt mollit anim id est laborum.
190
+ <BLANKLINE>
191
+ Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
192
+ varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
193
+ magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
194
+ gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
195
+ risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
196
+ eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
197
+ fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
198
+ a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
199
+ neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
200
+ sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
201
+ nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
202
+ quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
203
+ molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
204
+ """
205
+ paragraphs = s.splitlines()
206
+ wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
207
+ return '\n\n'.join(wrapped)
208
+
209
+
210
+ def unwrap(s):
211
+ r"""
212
+ Given a multi-line string, return an unwrapped version.
213
+
214
+ >>> wrapped = wrap(lorem_ipsum)
215
+ >>> wrapped.count('\n')
216
+ 20
217
+ >>> unwrapped = unwrap(wrapped)
218
+ >>> unwrapped.count('\n')
219
+ 1
220
+ >>> print(unwrapped)
221
+ Lorem ipsum dolor sit amet, consectetur adipiscing ...
222
+ Curabitur pretium tincidunt lacus. Nulla gravida orci ...
223
+
224
+ """
225
+ paragraphs = re.split(r'\n\n+', s)
226
+ cleaned = (para.replace('\n', ' ') for para in paragraphs)
227
+ return '\n'.join(cleaned)
228
+
229
+
230
+ lorem_ipsum: str = (
231
+ files(__name__).joinpath('Lorem ipsum.txt').read_text(encoding='utf-8')
232
+ )
233
+
234
+
235
+ class Splitter:
236
+ """object that will split a string with the given arguments for each call
237
+
238
+ >>> s = Splitter(',')
239
+ >>> s('hello, world, this is your, master calling')
240
+ ['hello', ' world', ' this is your', ' master calling']
241
+ """
242
+
243
+ def __init__(self, *args):
244
+ self.args = args
245
+
246
+ def __call__(self, s):
247
+ return s.split(*self.args)
248
+
249
+
250
+ def indent(string, prefix=' ' * 4):
251
+ """
252
+ >>> indent('foo')
253
+ ' foo'
254
+ """
255
+ return prefix + string
256
+
257
+
258
+ class WordSet(tuple):
259
+ """
260
+ Given an identifier, return the words that identifier represents,
261
+ whether in camel case, underscore-separated, etc.
262
+
263
+ >>> WordSet.parse("camelCase")
264
+ ('camel', 'Case')
265
+
266
+ >>> WordSet.parse("under_sep")
267
+ ('under', 'sep')
268
+
269
+ Acronyms should be retained
270
+
271
+ >>> WordSet.parse("firstSNL")
272
+ ('first', 'SNL')
273
+
274
+ >>> WordSet.parse("you_and_I")
275
+ ('you', 'and', 'I')
276
+
277
+ >>> WordSet.parse("A simple test")
278
+ ('A', 'simple', 'test')
279
+
280
+ Multiple caps should not interfere with the first cap of another word.
281
+
282
+ >>> WordSet.parse("myABCClass")
283
+ ('my', 'ABC', 'Class')
284
+
285
+ The result is a WordSet, providing access to other forms.
286
+
287
+ >>> WordSet.parse("myABCClass").underscore_separated()
288
+ 'my_ABC_Class'
289
+
290
+ >>> WordSet.parse('a-command').camel_case()
291
+ 'ACommand'
292
+
293
+ >>> WordSet.parse('someIdentifier').lowered().space_separated()
294
+ 'some identifier'
295
+
296
+ Slices of the result should return another WordSet.
297
+
298
+ >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
299
+ 'out_of_context'
300
+
301
+ >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
302
+ 'word set'
303
+
304
+ >>> example = WordSet.parse('figured it out')
305
+ >>> example.headless_camel_case()
306
+ 'figuredItOut'
307
+ >>> example.dash_separated()
308
+ 'figured-it-out'
309
+
310
+ """
311
+
312
+ _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
313
+
314
+ def capitalized(self):
315
+ return WordSet(word.capitalize() for word in self)
316
+
317
+ def lowered(self):
318
+ return WordSet(word.lower() for word in self)
319
+
320
+ def camel_case(self):
321
+ return ''.join(self.capitalized())
322
+
323
+ def headless_camel_case(self):
324
+ words = iter(self)
325
+ first = next(words).lower()
326
+ new_words = itertools.chain((first,), WordSet(words).camel_case())
327
+ return ''.join(new_words)
328
+
329
+ def underscore_separated(self):
330
+ return '_'.join(self)
331
+
332
+ def dash_separated(self):
333
+ return '-'.join(self)
334
+
335
+ def space_separated(self):
336
+ return ' '.join(self)
337
+
338
+ def trim_right(self, item):
339
+ """
340
+ Remove the item from the end of the set.
341
+
342
+ >>> WordSet.parse('foo bar').trim_right('foo')
343
+ ('foo', 'bar')
344
+ >>> WordSet.parse('foo bar').trim_right('bar')
345
+ ('foo',)
346
+ >>> WordSet.parse('').trim_right('bar')
347
+ ()
348
+ """
349
+ return self[:-1] if self and self[-1] == item else self
350
+
351
+ def trim_left(self, item):
352
+ """
353
+ Remove the item from the beginning of the set.
354
+
355
+ >>> WordSet.parse('foo bar').trim_left('foo')
356
+ ('bar',)
357
+ >>> WordSet.parse('foo bar').trim_left('bar')
358
+ ('foo', 'bar')
359
+ >>> WordSet.parse('').trim_left('bar')
360
+ ()
361
+ """
362
+ return self[1:] if self and self[0] == item else self
363
+
364
+ def trim(self, item):
365
+ """
366
+ >>> WordSet.parse('foo bar').trim('foo')
367
+ ('bar',)
368
+ """
369
+ return self.trim_left(item).trim_right(item)
370
+
371
+ def __getitem__(self, item):
372
+ result = super().__getitem__(item)
373
+ if isinstance(item, slice):
374
+ result = WordSet(result)
375
+ return result
376
+
377
+ @classmethod
378
+ def parse(cls, identifier):
379
+ matches = cls._pattern.finditer(identifier)
380
+ return WordSet(match.group(0) for match in matches)
381
+
382
+ @classmethod
383
+ def from_class_name(cls, subject):
384
+ return cls.parse(subject.__class__.__name__)
385
+
386
+
387
+ # for backward compatibility
388
+ words = WordSet.parse
389
+
390
+
391
+ def simple_html_strip(s):
392
+ r"""
393
+ Remove HTML from the string `s`.
394
+
395
+ >>> str(simple_html_strip(''))
396
+ ''
397
+
398
+ >>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
399
+ A stormy day in paradise
400
+
401
+ >>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
402
+ Somebody tell the truth.
403
+
404
+ >>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
405
+ What about
406
+ multiple lines?
407
+ """
408
+ html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
409
+ texts = (match.group(3) or '' for match in html_stripper.finditer(s))
410
+ return ''.join(texts)
411
+
412
+
413
+ class SeparatedValues(str):
414
+ """
415
+ A string separated by a separator. Overrides __iter__ for getting
416
+ the values.
417
+
418
+ >>> list(SeparatedValues('a,b,c'))
419
+ ['a', 'b', 'c']
420
+
421
+ Whitespace is stripped and empty values are discarded.
422
+
423
+ >>> list(SeparatedValues(' a, b , c, '))
424
+ ['a', 'b', 'c']
425
+ """
426
+
427
+ separator = ','
428
+
429
+ def __iter__(self):
430
+ parts = self.split(self.separator)
431
+ return filter(None, (part.strip() for part in parts))
432
+
433
+
434
+ class Stripper:
435
+ r"""
436
+ Given a series of lines, find the common prefix and strip it from them.
437
+
438
+ >>> lines = [
439
+ ... 'abcdefg\n',
440
+ ... 'abc\n',
441
+ ... 'abcde\n',
442
+ ... ]
443
+ >>> res = Stripper.strip_prefix(lines)
444
+ >>> res.prefix
445
+ 'abc'
446
+ >>> list(res.lines)
447
+ ['defg\n', '\n', 'de\n']
448
+
449
+ If no prefix is common, nothing should be stripped.
450
+
451
+ >>> lines = [
452
+ ... 'abcd\n',
453
+ ... '1234\n',
454
+ ... ]
455
+ >>> res = Stripper.strip_prefix(lines)
456
+ >>> res.prefix = ''
457
+ >>> list(res.lines)
458
+ ['abcd\n', '1234\n']
459
+ """
460
+
461
+ def __init__(self, prefix, lines):
462
+ self.prefix = prefix
463
+ self.lines = map(self, lines)
464
+
465
+ @classmethod
466
+ def strip_prefix(cls, lines):
467
+ prefix_lines, lines = itertools.tee(lines)
468
+ prefix = functools.reduce(cls.common_prefix, prefix_lines)
469
+ return cls(prefix, lines)
470
+
471
+ def __call__(self, line):
472
+ if not self.prefix:
473
+ return line
474
+ null, prefix, rest = line.partition(self.prefix)
475
+ return rest
476
+
477
+ @staticmethod
478
+ def common_prefix(s1, s2):
479
+ """
480
+ Return the common prefix of two lines.
481
+ """
482
+ index = min(len(s1), len(s2))
483
+ while s1[:index] != s2[:index]:
484
+ index -= 1
485
+ return s1[:index]
486
+
487
+
488
+ def remove_prefix(text, prefix):
489
+ """
490
+ Remove the prefix from the text if it exists.
491
+
492
+ >>> remove_prefix('underwhelming performance', 'underwhelming ')
493
+ 'performance'
494
+
495
+ >>> remove_prefix('something special', 'sample')
496
+ 'something special'
497
+ """
498
+ null, prefix, rest = text.rpartition(prefix)
499
+ return rest
500
+
501
+
502
+ def remove_suffix(text, suffix):
503
+ """
504
+ Remove the suffix from the text if it exists.
505
+
506
+ >>> remove_suffix('name.git', '.git')
507
+ 'name'
508
+
509
+ >>> remove_suffix('something special', 'sample')
510
+ 'something special'
511
+ """
512
+ rest, suffix, null = text.partition(suffix)
513
+ return rest
514
+
515
+
516
+ def normalize_newlines(text):
517
+ r"""
518
+ Replace alternate newlines with the canonical newline.
519
+
520
+ >>> normalize_newlines('Lorem Ipsum\u2029')
521
+ 'Lorem Ipsum\n'
522
+ >>> normalize_newlines('Lorem Ipsum\r\n')
523
+ 'Lorem Ipsum\n'
524
+ >>> normalize_newlines('Lorem Ipsum\x85')
525
+ 'Lorem Ipsum\n'
526
+ """
527
+ newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
528
+ pattern = '|'.join(newlines)
529
+ return re.sub(pattern, '\n', text)
530
+
531
+
532
+ def _nonblank(str):
533
+ return str and not str.startswith('#')
534
+
535
+
536
+ @functools.singledispatch
537
+ def yield_lines(iterable):
538
+ r"""
539
+ Yield valid lines of a string or iterable.
540
+
541
+ >>> list(yield_lines(''))
542
+ []
543
+ >>> list(yield_lines(['foo', 'bar']))
544
+ ['foo', 'bar']
545
+ >>> list(yield_lines('foo\nbar'))
546
+ ['foo', 'bar']
547
+ >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
548
+ ['foo', 'baz #comment']
549
+ >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
550
+ ['foo', 'bar', 'baz', 'bing']
551
+ """
552
+ return itertools.chain.from_iterable(map(yield_lines, iterable))
553
+
554
+
555
+ @yield_lines.register(str)
556
+ def _(text):
557
+ return filter(_nonblank, map(str.strip, text.splitlines()))
558
+
559
+
560
+ def drop_comment(line):
561
+ """
562
+ Drop comments.
563
+
564
+ >>> drop_comment('foo # bar')
565
+ 'foo'
566
+
567
+ A hash without a space may be in a URL.
568
+
569
+ >>> drop_comment('http://example.com/foo#bar')
570
+ 'http://example.com/foo#bar'
571
+ """
572
+ return line.partition(' #')[0]
573
+
574
+
575
+ def join_continuation(lines):
576
+ r"""
577
+ Join lines continued by a trailing backslash.
578
+
579
+ >>> list(join_continuation(['foo \\', 'bar', 'baz']))
580
+ ['foobar', 'baz']
581
+ >>> list(join_continuation(['foo \\', 'bar', 'baz']))
582
+ ['foobar', 'baz']
583
+ >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
584
+ ['foobarbaz']
585
+
586
+ Not sure why, but...
587
+ The character preceding the backslash is also elided.
588
+
589
+ >>> list(join_continuation(['goo\\', 'dly']))
590
+ ['godly']
591
+
592
+ A terrible idea, but...
593
+ If no line is available to continue, suppress the lines.
594
+
595
+ >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
596
+ ['foo']
597
+ """
598
+ lines = iter(lines)
599
+ for item in lines:
600
+ while item.endswith('\\'):
601
+ try:
602
+ item = item[:-2].strip() + next(lines)
603
+ except StopIteration:
604
+ return
605
+ yield item
606
+
607
+
608
+ def read_newlines(filename, limit=1024):
609
+ r"""
610
+ >>> tmp_path = getfixture('tmp_path')
611
+ >>> filename = tmp_path / 'out.txt'
612
+ >>> _ = filename.write_text('foo\n', newline='', encoding='utf-8')
613
+ >>> read_newlines(filename)
614
+ '\n'
615
+ >>> _ = filename.write_text('foo\r\n', newline='', encoding='utf-8')
616
+ >>> read_newlines(filename)
617
+ '\r\n'
618
+ >>> _ = filename.write_text('foo\r\nbar\nbing\r', newline='', encoding='utf-8')
619
+ >>> read_newlines(filename)
620
+ ('\r', '\n', '\r\n')
621
+ """
622
+ with open(filename, encoding='utf-8') as fp:
623
+ fp.read(limit)
624
+ return fp.newlines
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/layouts.cpython-310.pyc ADDED
Binary file (880 Bytes). View file
 
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/show-newlines.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/strip-prefix.cpython-310.pyc ADDED
Binary file (650 Bytes). View file
 
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/to-dvorak.cpython-310.pyc ADDED
Binary file (306 Bytes). View file
 
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/__pycache__/to-qwerty.cpython-310.pyc ADDED
Binary file (306 Bytes). View file
 
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/layouts.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ qwerty = "-=qwertyuiop[]asdfghjkl;'zxcvbnm,./_+QWERTYUIOP{}ASDFGHJKL:\"ZXCVBNM<>?"
2
+ dvorak = "[]',.pyfgcrl/=aoeuidhtns-;qjkxbmwvz{}\"<>PYFGCRL?+AOEUIDHTNS_:QJKXBMWVZ"
3
+
4
+
5
+ to_dvorak = str.maketrans(qwerty, dvorak)
6
+ to_qwerty = str.maketrans(dvorak, qwerty)
7
+
8
+
9
+ def translate(input, translation):
10
+ """
11
+ >>> translate('dvorak', to_dvorak)
12
+ 'ekrpat'
13
+ >>> translate('qwerty', to_qwerty)
14
+ 'x,dokt'
15
+ """
16
+ return input.translate(translation)
17
+
18
+
19
+ def _translate_stream(stream, translation):
20
+ """
21
+ >>> import io
22
+ >>> _translate_stream(io.StringIO('foo'), to_dvorak)
23
+ urr
24
+ """
25
+ print(translate(stream.read(), translation))
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/show-newlines.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import autocommand
2
+ import inflect
3
+
4
+ from more_itertools import always_iterable
5
+
6
+ import jaraco.text
7
+
8
+
9
+ def report_newlines(filename):
10
+ r"""
11
+ Report the newlines in the indicated file.
12
+
13
+ >>> tmp_path = getfixture('tmp_path')
14
+ >>> filename = tmp_path / 'out.txt'
15
+ >>> _ = filename.write_text('foo\nbar\n', newline='', encoding='utf-8')
16
+ >>> report_newlines(filename)
17
+ newline is '\n'
18
+ >>> filename = tmp_path / 'out.txt'
19
+ >>> _ = filename.write_text('foo\nbar\r\n', newline='', encoding='utf-8')
20
+ >>> report_newlines(filename)
21
+ newlines are ('\n', '\r\n')
22
+ """
23
+ newlines = jaraco.text.read_newlines(filename)
24
+ count = len(tuple(always_iterable(newlines)))
25
+ engine = inflect.engine()
26
+ print(
27
+ engine.plural_noun("newline", count),
28
+ engine.plural_verb("is", count),
29
+ repr(newlines),
30
+ )
31
+
32
+
33
+ autocommand.autocommand(__name__)(report_newlines)
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/strip-prefix.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import autocommand
4
+
5
+ from jaraco.text import Stripper
6
+
7
+
8
+ def strip_prefix():
9
+ r"""
10
+ Strip any common prefix from stdin.
11
+
12
+ >>> import io, pytest
13
+ >>> getfixture('monkeypatch').setattr('sys.stdin', io.StringIO('abcdef\nabc123'))
14
+ >>> strip_prefix()
15
+ def
16
+ 123
17
+ """
18
+ sys.stdout.writelines(Stripper.strip_prefix(sys.stdin).lines)
19
+
20
+
21
+ autocommand.autocommand(__name__)(strip_prefix)
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/to-dvorak.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ from . import layouts
4
+
5
+
6
+ __name__ == '__main__' and layouts._translate_stream(sys.stdin, layouts.to_dvorak)
llava/lib/python3.10/site-packages/setuptools/_vendor/jaraco/text/to-qwerty.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ from . import layouts
4
+
5
+
6
+ __name__ == '__main__' and layouts._translate_stream(sys.stdin, layouts.to_qwerty)
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _adaptive_avg_pool3d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self);
21
+ TORCH_API at::Tensor & _adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API void _assert_async_cpu(const at::Tensor & self);
20
+ TORCH_API void _assert_async_cuda(const at::Tensor & self);
21
+ TORCH_API void _assert_async_msg_cpu(const at::Tensor & self, c10::string_view assert_msg);
22
+ TORCH_API void _assert_async_msg_cuda(const at::Tensor & self, c10::string_view assert_msg);
23
+ } // namespace native
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_meta_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor & _coalesced_(at::Tensor & self, bool coalesced);
21
+
22
+ } // namespace meta
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_scalar_kernel_slow(at::TensorList self, const at::Scalar & scalar);
20
+ TORCH_API void _foreach_clamp_min_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out);
21
+ TORCH_API void foreach_tensor_clamp_min_scalar_kernel_slow_(at::TensorList self, const at::Scalar & scalar);
22
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_scalar_kernel_cuda(at::TensorList self, const at::Scalar & scalar);
23
+ TORCH_API void foreach_tensor_clamp_min_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & scalar);
24
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_list_kernel_slow(at::TensorList self, at::TensorList other);
25
+ TORCH_API void _foreach_clamp_min_List_out(at::TensorList self, at::TensorList other, at::TensorList out);
26
+ TORCH_API void foreach_tensor_clamp_min_list_kernel_slow_(at::TensorList self, at::TensorList other);
27
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_list_kernel_cuda(at::TensorList self, at::TensorList other);
28
+ TORCH_API void foreach_tensor_clamp_min_list_kernel_cuda_(at::TensorList self, at::TensorList other);
29
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
30
+ TORCH_API void _foreach_clamp_min_ScalarList_out(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out);
31
+ TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
32
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_clamp_min_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
33
+ TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
34
+ } // namespace native
35
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API void _foreach_zero_(at::TensorList self);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor _grid_sampler_2d_cpu_fallback(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
21
+ TORCH_API at::Tensor & _grid_sampler_2d_cpu_fallback_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
22
+ TORCH_API at::Tensor & _grid_sampler_2d_cpu_fallback_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out);
23
+
24
+ } // namespace compositeexplicitautograd
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd(const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, ::std::optional<c10::string_view> driver=::std::nullopt);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, ::std::optional<c10::string_view> driver=::std::nullopt);
22
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _nested_sum_backward_cpu(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false);
20
+ } // namespace native
21
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor _resize_output(const at::Tensor & self, at::IntArrayRef size, at::Device device);
21
+ TORCH_API at::Tensor _resize_output_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device);
22
+ TORCH_API const at::Tensor & _resize_output_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::Device device);
23
+ TORCH_API const at::Tensor & _resize_output_outf(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out);
24
+ TORCH_API const at::Tensor & _resize_output_symint_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device);
25
+ TORCH_API const at::Tensor & _resize_output_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device, const at::Tensor & out);
26
+
27
+ } // namespace compositeexplicitautograd
28
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_scaled_dot_product_cudnn_attention_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false, ::std::optional<double> scale=::std::nullopt) {
27
+ return at::_ops::_scaled_dot_product_cudnn_attention::call(query, key, value, dropout_p, is_causal, return_debug_mask, scale);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_bsc_tensor_args.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_validate_sparse_bsc_tensor_args_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
26
+ inline void _validate_sparse_bsc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
27
+ return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API void _validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/acos_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_acos : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & self);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_aminmax : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/angle.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/angle_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::angle(Tensor self) -> Tensor
26
+ inline at::Tensor angle(const at::Tensor & self) {
27
+ return at::_ops::angle::call(self);
28
+ }
29
+
30
+ // aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) {
32
+ return at::_ops::angle_out::call(self, out);
33
+ }
34
+ // aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) {
36
+ return at::_ops::angle_out::call(self, out);
37
+ }
38
+
39
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/any_native.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/any_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_any_out : public at::meta::structured_any_dim {
20
+ void impl(const at::Tensor & self, int64_t dim, bool keepdim, const at::Tensor & out);
21
+ };
22
+ TORCH_API at::Tensor any_dims_default(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false);
23
+ TORCH_API at::Tensor & any_dims_out_default(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out);
24
+ struct TORCH_API structured_any_dims_out : public at::meta::structured_any_dims {
25
+ void impl(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, const at::Tensor & out);
26
+ };
27
+ TORCH_API at::Tensor any(const at::Tensor & self, at::Dimname dim, bool keepdim=false);
28
+ TORCH_API at::Tensor & any_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out);
29
+ struct TORCH_API structured_any_all_out : public at::meta::structured_any {
30
+ void impl(const at::Tensor & self, const at::Tensor & out);
31
+ };
32
+ TORCH_API at::Tensor any_sparse(const at::Tensor & self);
33
+ } // namespace native
34
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor arctan(const at::Tensor & self);
21
+ TORCH_API at::Tensor & arctan_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & arctan_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & arctan_(at::Tensor & self);
24
+
25
+ } // namespace compositeimplicitautograd
26
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_avg_pool2d_backward : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, bool update, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reserve);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator=::std::nullopt);
21
+ TORCH_API at::Tensor & bernoulli_(at::Tensor & self, double p=0.5, ::std::optional<at::Generator> generator=::std::nullopt);
22
+
23
+ } // namespace meta
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor block_diag(at::TensorList tensors);
20
+ TORCH_API at::Tensor & block_diag_out(at::TensorList tensors, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & cudnn_affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W);
21
+ TORCH_API at::Tensor & cudnn_affine_grid_generator_outf(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/cumulative_trapezoid_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
26
+ inline at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) {
27
+ return at::_ops::cumulative_trapezoid_x::call(y, x, dim);
28
+ }
29
+
30
+ // aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
31
+ inline at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) {
32
+ return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim);
33
+ }
34
+
35
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/det_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API det {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::det")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "det(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & diag_embed_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
20
+ TORCH_API at::Tensor diag_embed(const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1);
21
+ } // namespace native
22
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/diagonal_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
26
+ inline at::Tensor diagonal(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) {
27
+ return at::_ops::diagonal::call(self, offset, dim1, dim2);
28
+ }
29
+
30
+ // aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
31
+ inline at::Tensor diagonal(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0) {
32
+ return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset);
33
+ }
34
+
35
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/equal_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API bool equal(const at::Tensor & self, const at::Tensor & other);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_exp2 : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & self);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask);
20
+ } // namespace native
21
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/frac_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_frac : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & self);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/gcd_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
26
+ inline at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
27
+ return at::_ops::gcd_out::call(self, other, out);
28
+ }
29
+ // aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
30
+ inline at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
31
+ return at::_ops::gcd_out::call(self, other, out);
32
+ }
33
+
34
+ // aten::gcd(Tensor self, Tensor other) -> Tensor
35
+ inline at::Tensor gcd(const at::Tensor & self, const at::Tensor & other) {
36
+ return at::_ops::gcd::call(self, other);
37
+ }
38
+
39
+ // aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
40
+ inline at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other) {
41
+ return at::_ops::gcd_::call(self, other);
42
+ }
43
+
44
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none");
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API isneginf {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isneginf")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isneginf(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API isneginf_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isneginf")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false);
22
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API linalg_matrix_exp {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_exp")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_exp(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API linalg_matrix_exp_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_exp")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops