ZTWHHH commited on
Commit
af68ae0
·
verified ·
1 Parent(s): 0e50390

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/scipy/_lib/_bunch.py +225 -0
  2. parrot/lib/python3.10/site-packages/scipy/_lib/_ccallback.py +251 -0
  3. parrot/lib/python3.10/site-packages/scipy/_lib/_gcutils.py +105 -0
  4. parrot/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so +0 -0
  5. parrot/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so +0 -0
  6. parrot/lib/python3.10/site-packages/scipy/_lib/_testutils.py +337 -0
  7. parrot/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py +22 -0
  8. parrot/lib/python3.10/site-packages/scipy/_lib/decorator.py +399 -0
  9. parrot/lib/python3.10/site-packages/scipy/_lib/doccer.py +275 -0
  10. parrot/lib/python3.10/site-packages/scipy/_lib/uarray.py +31 -0
  11. parrot/lib/python3.10/site-packages/scipy/constants/__init__.py +347 -0
  12. parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py +54 -0
  19. parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py +815 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h +23 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h +30 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_choose_qparams_per_tensor_ops.h +28 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_clear_plan_cache_ops.h +28 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_dimI_ops.h +28 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_backward.h +47 -0
  26. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_ops.h +50 -0
  27. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_ops.h +39 -0
  28. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim.h +30 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_from_tensor_ops.h +28 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h +23 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_addmm.h +39 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_native.h +21 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma.h +39 -0
  34. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_native.h +23 -0
  35. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy.h +43 -0
  36. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  37. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/all_compositeimplicitautograd_dispatch.h +25 -0
  38. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h +39 -0
  39. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_compositeimplicitautograd_dispatch.h +26 -0
  40. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy.h +39 -0
  41. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h +24 -0
  42. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h +24 -0
  43. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dropout_native.h +22 -0
  44. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_meta_dispatch.h +26 -0
  45. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_ops.h +39 -0
  46. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h +27 -0
  47. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h +39 -0
  48. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_cell_ops.h +28 -0
  49. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  50. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_ops.h +39 -0
parrot/lib/python3.10/site-packages/scipy/_lib/_bunch.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys as _sys
2
+ from keyword import iskeyword as _iskeyword
3
+
4
+
5
+ def _validate_names(typename, field_names, extra_field_names):
6
+ """
7
+ Ensure that all the given names are valid Python identifiers that
8
+ do not start with '_'. Also check that there are no duplicates
9
+ among field_names + extra_field_names.
10
+ """
11
+ for name in [typename] + field_names + extra_field_names:
12
+ if not isinstance(name, str):
13
+ raise TypeError('typename and all field names must be strings')
14
+ if not name.isidentifier():
15
+ raise ValueError('typename and all field names must be valid '
16
+ f'identifiers: {name!r}')
17
+ if _iskeyword(name):
18
+ raise ValueError('typename and all field names cannot be a '
19
+ f'keyword: {name!r}')
20
+
21
+ seen = set()
22
+ for name in field_names + extra_field_names:
23
+ if name.startswith('_'):
24
+ raise ValueError('Field names cannot start with an underscore: '
25
+ f'{name!r}')
26
+ if name in seen:
27
+ raise ValueError(f'Duplicate field name: {name!r}')
28
+ seen.add(name)
29
+
30
+
31
+ # Note: This code is adapted from CPython:Lib/collections/__init__.py
32
+ def _make_tuple_bunch(typename, field_names, extra_field_names=None,
33
+ module=None):
34
+ """
35
+ Create a namedtuple-like class with additional attributes.
36
+
37
+ This function creates a subclass of tuple that acts like a namedtuple
38
+ and that has additional attributes.
39
+
40
+ The additional attributes are listed in `extra_field_names`. The
41
+ values assigned to these attributes are not part of the tuple.
42
+
43
+ The reason this function exists is to allow functions in SciPy
44
+ that currently return a tuple or a namedtuple to returned objects
45
+ that have additional attributes, while maintaining backwards
46
+ compatibility.
47
+
48
+ This should only be used to enhance *existing* functions in SciPy.
49
+ New functions are free to create objects as return values without
50
+ having to maintain backwards compatibility with an old tuple or
51
+ namedtuple return value.
52
+
53
+ Parameters
54
+ ----------
55
+ typename : str
56
+ The name of the type.
57
+ field_names : list of str
58
+ List of names of the values to be stored in the tuple. These names
59
+ will also be attributes of instances, so the values in the tuple
60
+ can be accessed by indexing or as attributes. At least one name
61
+ is required. See the Notes for additional restrictions.
62
+ extra_field_names : list of str, optional
63
+ List of names of values that will be stored as attributes of the
64
+ object. See the notes for additional restrictions.
65
+
66
+ Returns
67
+ -------
68
+ cls : type
69
+ The new class.
70
+
71
+ Notes
72
+ -----
73
+ There are restrictions on the names that may be used in `field_names`
74
+ and `extra_field_names`:
75
+
76
+ * The names must be unique--no duplicates allowed.
77
+ * The names must be valid Python identifiers, and must not begin with
78
+ an underscore.
79
+ * The names must not be Python keywords (e.g. 'def', 'and', etc., are
80
+ not allowed).
81
+
82
+ Examples
83
+ --------
84
+ >>> from scipy._lib._bunch import _make_tuple_bunch
85
+
86
+ Create a class that acts like a namedtuple with length 2 (with field
87
+ names `x` and `y`) that will also have the attributes `w` and `beta`:
88
+
89
+ >>> Result = _make_tuple_bunch('Result', ['x', 'y'], ['w', 'beta'])
90
+
91
+ `Result` is the new class. We call it with keyword arguments to create
92
+ a new instance with given values.
93
+
94
+ >>> result1 = Result(x=1, y=2, w=99, beta=0.5)
95
+ >>> result1
96
+ Result(x=1, y=2, w=99, beta=0.5)
97
+
98
+ `result1` acts like a tuple of length 2:
99
+
100
+ >>> len(result1)
101
+ 2
102
+ >>> result1[:]
103
+ (1, 2)
104
+
105
+ The values assigned when the instance was created are available as
106
+ attributes:
107
+
108
+ >>> result1.y
109
+ 2
110
+ >>> result1.beta
111
+ 0.5
112
+ """
113
+ if len(field_names) == 0:
114
+ raise ValueError('field_names must contain at least one name')
115
+
116
+ if extra_field_names is None:
117
+ extra_field_names = []
118
+ _validate_names(typename, field_names, extra_field_names)
119
+
120
+ typename = _sys.intern(str(typename))
121
+ field_names = tuple(map(_sys.intern, field_names))
122
+ extra_field_names = tuple(map(_sys.intern, extra_field_names))
123
+
124
+ all_names = field_names + extra_field_names
125
+ arg_list = ', '.join(field_names)
126
+ full_list = ', '.join(all_names)
127
+ repr_fmt = ''.join(('(',
128
+ ', '.join(f'{name}=%({name})r' for name in all_names),
129
+ ')'))
130
+ tuple_new = tuple.__new__
131
+ _dict, _tuple, _zip = dict, tuple, zip
132
+
133
+ # Create all the named tuple methods to be added to the class namespace
134
+
135
+ s = f"""\
136
+ def __new__(_cls, {arg_list}, **extra_fields):
137
+ return _tuple_new(_cls, ({arg_list},))
138
+
139
+ def __init__(self, {arg_list}, **extra_fields):
140
+ for key in self._extra_fields:
141
+ if key not in extra_fields:
142
+ raise TypeError("missing keyword argument '%s'" % (key,))
143
+ for key, val in extra_fields.items():
144
+ if key not in self._extra_fields:
145
+ raise TypeError("unexpected keyword argument '%s'" % (key,))
146
+ self.__dict__[key] = val
147
+
148
+ def __setattr__(self, key, val):
149
+ if key in {repr(field_names)}:
150
+ raise AttributeError("can't set attribute %r of class %r"
151
+ % (key, self.__class__.__name__))
152
+ else:
153
+ self.__dict__[key] = val
154
+ """
155
+ del arg_list
156
+ namespace = {'_tuple_new': tuple_new,
157
+ '__builtins__': dict(TypeError=TypeError,
158
+ AttributeError=AttributeError),
159
+ '__name__': f'namedtuple_{typename}'}
160
+ exec(s, namespace)
161
+ __new__ = namespace['__new__']
162
+ __new__.__doc__ = f'Create new instance of {typename}({full_list})'
163
+ __init__ = namespace['__init__']
164
+ __init__.__doc__ = f'Instantiate instance of {typename}({full_list})'
165
+ __setattr__ = namespace['__setattr__']
166
+
167
+ def __repr__(self):
168
+ 'Return a nicely formatted representation string'
169
+ return self.__class__.__name__ + repr_fmt % self._asdict()
170
+
171
+ def _asdict(self):
172
+ 'Return a new dict which maps field names to their values.'
173
+ out = _dict(_zip(self._fields, self))
174
+ out.update(self.__dict__)
175
+ return out
176
+
177
+ def __getnewargs_ex__(self):
178
+ 'Return self as a plain tuple. Used by copy and pickle.'
179
+ return _tuple(self), self.__dict__
180
+
181
+ # Modify function metadata to help with introspection and debugging
182
+ for method in (__new__, __repr__, _asdict, __getnewargs_ex__):
183
+ method.__qualname__ = f'{typename}.{method.__name__}'
184
+
185
+ # Build-up the class namespace dictionary
186
+ # and use type() to build the result class
187
+ class_namespace = {
188
+ '__doc__': f'{typename}({full_list})',
189
+ '_fields': field_names,
190
+ '__new__': __new__,
191
+ '__init__': __init__,
192
+ '__repr__': __repr__,
193
+ '__setattr__': __setattr__,
194
+ '_asdict': _asdict,
195
+ '_extra_fields': extra_field_names,
196
+ '__getnewargs_ex__': __getnewargs_ex__,
197
+ }
198
+ for index, name in enumerate(field_names):
199
+
200
+ def _get(self, index=index):
201
+ return self[index]
202
+ class_namespace[name] = property(_get)
203
+ for name in extra_field_names:
204
+
205
+ def _get(self, name=name):
206
+ return self.__dict__[name]
207
+ class_namespace[name] = property(_get)
208
+
209
+ result = type(typename, (tuple,), class_namespace)
210
+
211
+ # For pickling to work, the __module__ variable needs to be set to the
212
+ # frame where the named tuple is created. Bypass this step in environments
213
+ # where sys._getframe is not defined (Jython for example) or sys._getframe
214
+ # is not defined for arguments greater than 0 (IronPython), or where the
215
+ # user has specified a particular module.
216
+ if module is None:
217
+ try:
218
+ module = _sys._getframe(1).f_globals.get('__name__', '__main__')
219
+ except (AttributeError, ValueError):
220
+ pass
221
+ if module is not None:
222
+ result.__module__ = module
223
+ __new__.__module__ = module
224
+
225
+ return result
parrot/lib/python3.10/site-packages/scipy/_lib/_ccallback.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import _ccallback_c
2
+
3
+ import ctypes
4
+
5
+ PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0]
6
+
7
+ ffi = None
8
+
9
+ class CData:
10
+ pass
11
+
12
+ def _import_cffi():
13
+ global ffi, CData
14
+
15
+ if ffi is not None:
16
+ return
17
+
18
+ try:
19
+ import cffi
20
+ ffi = cffi.FFI()
21
+ CData = ffi.CData
22
+ except ImportError:
23
+ ffi = False
24
+
25
+
26
+ class LowLevelCallable(tuple):
27
+ """
28
+ Low-level callback function.
29
+
30
+ Some functions in SciPy take as arguments callback functions, which
31
+ can either be python callables or low-level compiled functions. Using
32
+ compiled callback functions can improve performance somewhat by
33
+ avoiding wrapping data in Python objects.
34
+
35
+ Such low-level functions in SciPy are wrapped in `LowLevelCallable`
36
+ objects, which can be constructed from function pointers obtained from
37
+ ctypes, cffi, Cython, or contained in Python `PyCapsule` objects.
38
+
39
+ .. seealso::
40
+
41
+ Functions accepting low-level callables:
42
+
43
+ `scipy.integrate.quad`, `scipy.ndimage.generic_filter`,
44
+ `scipy.ndimage.generic_filter1d`, `scipy.ndimage.geometric_transform`
45
+
46
+ Usage examples:
47
+
48
+ :ref:`ndimage-ccallbacks`, :ref:`quad-callbacks`
49
+
50
+ Parameters
51
+ ----------
52
+ function : {PyCapsule, ctypes function pointer, cffi function pointer}
53
+ Low-level callback function.
54
+ user_data : {PyCapsule, ctypes void pointer, cffi void pointer}
55
+ User data to pass on to the callback function.
56
+ signature : str, optional
57
+ Signature of the function. If omitted, determined from *function*,
58
+ if possible.
59
+
60
+ Attributes
61
+ ----------
62
+ function
63
+ Callback function given.
64
+ user_data
65
+ User data given.
66
+ signature
67
+ Signature of the function.
68
+
69
+ Methods
70
+ -------
71
+ from_cython
72
+ Class method for constructing callables from Cython C-exported
73
+ functions.
74
+
75
+ Notes
76
+ -----
77
+ The argument ``function`` can be one of:
78
+
79
+ - PyCapsule, whose name contains the C function signature
80
+ - ctypes function pointer
81
+ - cffi function pointer
82
+
83
+ The signature of the low-level callback must match one of those expected
84
+ by the routine it is passed to.
85
+
86
+ If constructing low-level functions from a PyCapsule, the name of the
87
+ capsule must be the corresponding signature, in the format::
88
+
89
+ return_type (arg1_type, arg2_type, ...)
90
+
91
+ For example::
92
+
93
+ "void (double)"
94
+ "double (double, int *, void *)"
95
+
96
+ The context of a PyCapsule passed in as ``function`` is used as ``user_data``,
97
+ if an explicit value for ``user_data`` was not given.
98
+
99
+ """
100
+
101
+ # Make the class immutable
102
+ __slots__ = ()
103
+
104
+ def __new__(cls, function, user_data=None, signature=None):
105
+ # We need to hold a reference to the function & user data,
106
+ # to prevent them going out of scope
107
+ item = cls._parse_callback(function, user_data, signature)
108
+ return tuple.__new__(cls, (item, function, user_data))
109
+
110
+ def __repr__(self):
111
+ return f"LowLevelCallable({self.function!r}, {self.user_data!r})"
112
+
113
+ @property
114
+ def function(self):
115
+ return tuple.__getitem__(self, 1)
116
+
117
+ @property
118
+ def user_data(self):
119
+ return tuple.__getitem__(self, 2)
120
+
121
+ @property
122
+ def signature(self):
123
+ return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0))
124
+
125
+ def __getitem__(self, idx):
126
+ raise ValueError()
127
+
128
+ @classmethod
129
+ def from_cython(cls, module, name, user_data=None, signature=None):
130
+ """
131
+ Create a low-level callback function from an exported Cython function.
132
+
133
+ Parameters
134
+ ----------
135
+ module : module
136
+ Cython module where the exported function resides
137
+ name : str
138
+ Name of the exported function
139
+ user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional
140
+ User data to pass on to the callback function.
141
+ signature : str, optional
142
+ Signature of the function. If omitted, determined from *function*.
143
+
144
+ """
145
+ try:
146
+ function = module.__pyx_capi__[name]
147
+ except AttributeError as e:
148
+ message = "Given module is not a Cython module with __pyx_capi__ attribute"
149
+ raise ValueError(message) from e
150
+ except KeyError as e:
151
+ message = f"No function {name!r} found in __pyx_capi__ of the module"
152
+ raise ValueError(message) from e
153
+ return cls(function, user_data, signature)
154
+
155
+ @classmethod
156
+ def _parse_callback(cls, obj, user_data=None, signature=None):
157
+ _import_cffi()
158
+
159
+ if isinstance(obj, LowLevelCallable):
160
+ func = tuple.__getitem__(obj, 0)
161
+ elif isinstance(obj, PyCFuncPtr):
162
+ func, signature = _get_ctypes_func(obj, signature)
163
+ elif isinstance(obj, CData):
164
+ func, signature = _get_cffi_func(obj, signature)
165
+ elif _ccallback_c.check_capsule(obj):
166
+ func = obj
167
+ else:
168
+ raise ValueError("Given input is not a callable or a "
169
+ "low-level callable (pycapsule/ctypes/cffi)")
170
+
171
+ if isinstance(user_data, ctypes.c_void_p):
172
+ context = _get_ctypes_data(user_data)
173
+ elif isinstance(user_data, CData):
174
+ context = _get_cffi_data(user_data)
175
+ elif user_data is None:
176
+ context = 0
177
+ elif _ccallback_c.check_capsule(user_data):
178
+ context = user_data
179
+ else:
180
+ raise ValueError("Given user data is not a valid "
181
+ "low-level void* pointer (pycapsule/ctypes/cffi)")
182
+
183
+ return _ccallback_c.get_raw_capsule(func, signature, context)
184
+
185
+
186
+ #
187
+ # ctypes helpers
188
+ #
189
+
190
+ def _get_ctypes_func(func, signature=None):
191
+ # Get function pointer
192
+ func_ptr = ctypes.cast(func, ctypes.c_void_p).value
193
+
194
+ # Construct function signature
195
+ if signature is None:
196
+ signature = _typename_from_ctypes(func.restype) + " ("
197
+ for j, arg in enumerate(func.argtypes):
198
+ if j == 0:
199
+ signature += _typename_from_ctypes(arg)
200
+ else:
201
+ signature += ", " + _typename_from_ctypes(arg)
202
+ signature += ")"
203
+
204
+ return func_ptr, signature
205
+
206
+
207
+ def _typename_from_ctypes(item):
208
+ if item is None:
209
+ return "void"
210
+ elif item is ctypes.c_void_p:
211
+ return "void *"
212
+
213
+ name = item.__name__
214
+
215
+ pointer_level = 0
216
+ while name.startswith("LP_"):
217
+ pointer_level += 1
218
+ name = name[3:]
219
+
220
+ if name.startswith('c_'):
221
+ name = name[2:]
222
+
223
+ if pointer_level > 0:
224
+ name += " " + "*"*pointer_level
225
+
226
+ return name
227
+
228
+
229
+ def _get_ctypes_data(data):
230
+ # Get voidp pointer
231
+ return ctypes.cast(data, ctypes.c_void_p).value
232
+
233
+
234
+ #
235
+ # CFFI helpers
236
+ #
237
+
238
+ def _get_cffi_func(func, signature=None):
239
+ # Get function pointer
240
+ func_ptr = ffi.cast('uintptr_t', func)
241
+
242
+ # Get signature
243
+ if signature is None:
244
+ signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ')
245
+
246
+ return func_ptr, signature
247
+
248
+
249
+ def _get_cffi_data(data):
250
+ # Get pointer
251
+ return ffi.cast('uintptr_t', data)
parrot/lib/python3.10/site-packages/scipy/_lib/_gcutils.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for testing automatic garbage collection of objects
3
+
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ set_gc_state - enable or disable garbage collection
8
+ gc_state - context manager for given state of garbage collector
9
+ assert_deallocated - context manager to check for circular references on object
10
+
11
+ """
12
+ import weakref
13
+ import gc
14
+
15
+ from contextlib import contextmanager
16
+ from platform import python_implementation
17
+
18
+ __all__ = ['set_gc_state', 'gc_state', 'assert_deallocated']
19
+
20
+
21
+ IS_PYPY = python_implementation() == 'PyPy'
22
+
23
+
24
+ class ReferenceError(AssertionError):
25
+ pass
26
+
27
+
28
+ def set_gc_state(state):
29
+ """ Set status of garbage collector """
30
+ if gc.isenabled() == state:
31
+ return
32
+ if state:
33
+ gc.enable()
34
+ else:
35
+ gc.disable()
36
+
37
+
38
+ @contextmanager
39
+ def gc_state(state):
40
+ """ Context manager to set state of garbage collector to `state`
41
+
42
+ Parameters
43
+ ----------
44
+ state : bool
45
+ True for gc enabled, False for disabled
46
+
47
+ Examples
48
+ --------
49
+ >>> with gc_state(False):
50
+ ... assert not gc.isenabled()
51
+ >>> with gc_state(True):
52
+ ... assert gc.isenabled()
53
+ """
54
+ orig_state = gc.isenabled()
55
+ set_gc_state(state)
56
+ yield
57
+ set_gc_state(orig_state)
58
+
59
+
60
+ @contextmanager
61
+ def assert_deallocated(func, *args, **kwargs):
62
+ """Context manager to check that object is deallocated
63
+
64
+ This is useful for checking that an object can be freed directly by
65
+ reference counting, without requiring gc to break reference cycles.
66
+ GC is disabled inside the context manager.
67
+
68
+ This check is not available on PyPy.
69
+
70
+ Parameters
71
+ ----------
72
+ func : callable
73
+ Callable to create object to check
74
+ \\*args : sequence
75
+ positional arguments to `func` in order to create object to check
76
+ \\*\\*kwargs : dict
77
+ keyword arguments to `func` in order to create object to check
78
+
79
+ Examples
80
+ --------
81
+ >>> class C: pass
82
+ >>> with assert_deallocated(C) as c:
83
+ ... # do something
84
+ ... del c
85
+
86
+ >>> class C:
87
+ ... def __init__(self):
88
+ ... self._circular = self # Make circular reference
89
+ >>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL
90
+ ... # do something
91
+ ... del c
92
+ Traceback (most recent call last):
93
+ ...
94
+ ReferenceError: Remaining reference(s) to object
95
+ """
96
+ if IS_PYPY:
97
+ raise RuntimeError("assert_deallocated is unavailable on PyPy")
98
+
99
+ with gc_state(False):
100
+ obj = func(*args, **kwargs)
101
+ ref = weakref.ref(obj)
102
+ yield obj
103
+ del obj
104
+ if ref() is not None:
105
+ raise ReferenceError("Remaining reference(s) to object")
parrot/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (23.2 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (49.5 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/_lib/_testutils.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generic test utilities.
3
+
4
+ """
5
+
6
+ import inspect
7
+ import os
8
+ import re
9
+ import shutil
10
+ import subprocess
11
+ import sys
12
+ import sysconfig
13
+ from importlib.util import module_from_spec, spec_from_file_location
14
+
15
+ import numpy as np
16
+ import scipy
17
+
18
+ try:
19
+ # Need type: ignore[import-untyped] for mypy >= 1.6
20
+ import cython # type: ignore[import-untyped]
21
+ from Cython.Compiler.Version import ( # type: ignore[import-untyped]
22
+ version as cython_version,
23
+ )
24
+ except ImportError:
25
+ cython = None
26
+ else:
27
+ from scipy._lib import _pep440
28
+ required_version = '3.0.8'
29
+ if _pep440.parse(cython_version) < _pep440.Version(required_version):
30
+ # too old or wrong cython, skip Cython API tests
31
+ cython = None
32
+
33
+
34
+ __all__ = ['PytestTester', 'check_free_memory', '_TestPythranFunc', 'IS_MUSL']
35
+
36
+
37
+ IS_MUSL = False
38
+ # alternate way is
39
+ # from packaging.tags import sys_tags
40
+ # _tags = list(sys_tags())
41
+ # if 'musllinux' in _tags[0].platform:
42
+ _v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
43
+ if 'musl' in _v:
44
+ IS_MUSL = True
45
+
46
+
47
+ IS_EDITABLE = 'editable' in scipy.__path__[0]
48
+
49
+
50
+ class FPUModeChangeWarning(RuntimeWarning):
51
+ """Warning about FPU mode change"""
52
+ pass
53
+
54
+
55
+ class PytestTester:
56
+ """
57
+ Run tests for this namespace
58
+
59
+ ``scipy.test()`` runs tests for all of SciPy, with the default settings.
60
+ When used from a submodule (e.g., ``scipy.cluster.test()``, only the tests
61
+ for that namespace are run.
62
+
63
+ Parameters
64
+ ----------
65
+ label : {'fast', 'full'}, optional
66
+ Whether to run only the fast tests, or also those marked as slow.
67
+ Default is 'fast'.
68
+ verbose : int, optional
69
+ Test output verbosity. Default is 1.
70
+ extra_argv : list, optional
71
+ Arguments to pass through to Pytest.
72
+ doctests : bool, optional
73
+ Whether to run doctests or not. Default is False.
74
+ coverage : bool, optional
75
+ Whether to run tests with code coverage measurements enabled.
76
+ Default is False.
77
+ tests : list of str, optional
78
+ List of module names to run tests for. By default, uses the module
79
+ from which the ``test`` function is called.
80
+ parallel : int, optional
81
+ Run tests in parallel with pytest-xdist, if number given is larger than
82
+ 1. Default is 1.
83
+
84
+ """
85
+ def __init__(self, module_name):
86
+ self.module_name = module_name
87
+
88
+ def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
89
+ coverage=False, tests=None, parallel=None):
90
+ import pytest
91
+
92
+ module = sys.modules[self.module_name]
93
+ module_path = os.path.abspath(module.__path__[0])
94
+
95
+ pytest_args = ['--showlocals', '--tb=short']
96
+
97
+ if doctests:
98
+ pytest_args += [
99
+ "--doctest-modules",
100
+ "--ignore=scipy/interpolate/_interpnd_info.py",
101
+ "--ignore=scipy/_lib/array_api_compat",
102
+ "--ignore=scipy/_lib/highs",
103
+ "--ignore=scipy/_lib/unuran",
104
+ "--ignore=scipy/_lib/_gcutils.py",
105
+ "--ignore=scipy/_lib/doccer.py",
106
+ "--ignore=scipy/_lib/_uarray",
107
+ ]
108
+
109
+ if extra_argv:
110
+ pytest_args += list(extra_argv)
111
+
112
+ if verbose and int(verbose) > 1:
113
+ pytest_args += ["-" + "v"*(int(verbose)-1)]
114
+
115
+ if coverage:
116
+ pytest_args += ["--cov=" + module_path]
117
+
118
+ if label == "fast":
119
+ pytest_args += ["-m", "not slow"]
120
+ elif label != "full":
121
+ pytest_args += ["-m", label]
122
+
123
+ if tests is None:
124
+ tests = [self.module_name]
125
+
126
+ if parallel is not None and parallel > 1:
127
+ if _pytest_has_xdist():
128
+ pytest_args += ['-n', str(parallel)]
129
+ else:
130
+ import warnings
131
+ warnings.warn('Could not run tests in parallel because '
132
+ 'pytest-xdist plugin is not available.',
133
+ stacklevel=2)
134
+
135
+ pytest_args += ['--pyargs'] + list(tests)
136
+
137
+ try:
138
+ code = pytest.main(pytest_args)
139
+ except SystemExit as exc:
140
+ code = exc.code
141
+
142
+ return (code == 0)
143
+
144
+
145
+ class _TestPythranFunc:
146
+ '''
147
+ These are situations that can be tested in our pythran tests:
148
+ - A function with multiple array arguments and then
149
+ other positional and keyword arguments.
150
+ - A function with array-like keywords (e.g. `def somefunc(x0, x1=None)`.
151
+ Note: list/tuple input is not yet tested!
152
+
153
+ `self.arguments`: A dictionary which key is the index of the argument,
154
+ value is tuple(array value, all supported dtypes)
155
+ `self.partialfunc`: A function used to freeze some non-array argument
156
+ that of no interests in the original function
157
+ '''
158
+ ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]
159
+ ALL_FLOAT = [np.float32, np.float64]
160
+ ALL_COMPLEX = [np.complex64, np.complex128]
161
+
162
+ def setup_method(self):
163
+ self.arguments = {}
164
+ self.partialfunc = None
165
+ self.expected = None
166
+
167
+ def get_optional_args(self, func):
168
+ # get optional arguments with its default value,
169
+ # used for testing keywords
170
+ signature = inspect.signature(func)
171
+ optional_args = {}
172
+ for k, v in signature.parameters.items():
173
+ if v.default is not inspect.Parameter.empty:
174
+ optional_args[k] = v.default
175
+ return optional_args
176
+
177
+ def get_max_dtype_list_length(self):
178
+ # get the max supported dtypes list length in all arguments
179
+ max_len = 0
180
+ for arg_idx in self.arguments:
181
+ cur_len = len(self.arguments[arg_idx][1])
182
+ if cur_len > max_len:
183
+ max_len = cur_len
184
+ return max_len
185
+
186
+ def get_dtype(self, dtype_list, dtype_idx):
187
+ # get the dtype from dtype_list via index
188
+ # if the index is out of range, then return the last dtype
189
+ if dtype_idx > len(dtype_list)-1:
190
+ return dtype_list[-1]
191
+ else:
192
+ return dtype_list[dtype_idx]
193
+
194
+ def test_all_dtypes(self):
195
+ for type_idx in range(self.get_max_dtype_list_length()):
196
+ args_array = []
197
+ for arg_idx in self.arguments:
198
+ new_dtype = self.get_dtype(self.arguments[arg_idx][1],
199
+ type_idx)
200
+ args_array.append(self.arguments[arg_idx][0].astype(new_dtype))
201
+ self.pythranfunc(*args_array)
202
+
203
+ def test_views(self):
204
+ args_array = []
205
+ for arg_idx in self.arguments:
206
+ args_array.append(self.arguments[arg_idx][0][::-1][::-1])
207
+ self.pythranfunc(*args_array)
208
+
209
+ def test_strided(self):
210
+ args_array = []
211
+ for arg_idx in self.arguments:
212
+ args_array.append(np.repeat(self.arguments[arg_idx][0],
213
+ 2, axis=0)[::2])
214
+ self.pythranfunc(*args_array)
215
+
216
+
217
+ def _pytest_has_xdist():
218
+ """
219
+ Check if the pytest-xdist plugin is installed, providing parallel tests
220
+ """
221
+ # Check xdist exists without importing, otherwise pytests emits warnings
222
+ from importlib.util import find_spec
223
+ return find_spec('xdist') is not None
224
+
225
+
226
+ def check_free_memory(free_mb):
227
+ """
228
+ Check *free_mb* of memory is available, otherwise do pytest.skip
229
+ """
230
+ import pytest
231
+
232
+ try:
233
+ mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
234
+ msg = '{} MB memory required, but environment SCIPY_AVAILABLE_MEM={}'.format(
235
+ free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
236
+ except KeyError:
237
+ mem_free = _get_mem_available()
238
+ if mem_free is None:
239
+ pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
240
+ "variable to free memory in MB to run the test.")
241
+ msg = f'{free_mb} MB memory required, but {mem_free/1e6} MB available'
242
+
243
+ if mem_free < free_mb * 1e6:
244
+ pytest.skip(msg)
245
+
246
+
247
+ def _parse_size(size_str):
248
+ suffixes = {'': 1e6,
249
+ 'b': 1.0,
250
+ 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
251
+ 'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
252
+ 'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
253
+ m = re.match(r'^\s*(\d+)\s*({})\s*$'.format('|'.join(suffixes.keys())),
254
+ size_str,
255
+ re.I)
256
+ if not m or m.group(2) not in suffixes:
257
+ raise ValueError("Invalid size string")
258
+
259
+ return float(m.group(1)) * suffixes[m.group(2)]
260
+
261
+
262
+ def _get_mem_available():
263
+ """
264
+ Get information about memory available, not counting swap.
265
+ """
266
+ try:
267
+ import psutil
268
+ return psutil.virtual_memory().available
269
+ except (ImportError, AttributeError):
270
+ pass
271
+
272
+ if sys.platform.startswith('linux'):
273
+ info = {}
274
+ with open('/proc/meminfo') as f:
275
+ for line in f:
276
+ p = line.split()
277
+ info[p[0].strip(':').lower()] = float(p[1]) * 1e3
278
+
279
+ if 'memavailable' in info:
280
+ # Linux >= 3.14
281
+ return info['memavailable']
282
+ else:
283
+ return info['memfree'] + info['cached']
284
+
285
+ return None
286
+
287
+ def _test_cython_extension(tmp_path, srcdir):
288
+ """
289
+ Helper function to test building and importing Cython modules that
290
+ make use of the Cython APIs for BLAS, LAPACK, optimize, and special.
291
+ """
292
+ import pytest
293
+ try:
294
+ subprocess.check_call(["meson", "--version"])
295
+ except FileNotFoundError:
296
+ pytest.skip("No usable 'meson' found")
297
+
298
+ # build the examples in a temporary directory
299
+ mod_name = os.path.split(srcdir)[1]
300
+ shutil.copytree(srcdir, tmp_path / mod_name)
301
+ build_dir = tmp_path / mod_name / 'tests' / '_cython_examples'
302
+ target_dir = build_dir / 'build'
303
+ os.makedirs(target_dir, exist_ok=True)
304
+
305
+ # Ensure we use the correct Python interpreter even when `meson` is
306
+ # installed in a different Python environment (see numpy#24956)
307
+ native_file = str(build_dir / 'interpreter-native-file.ini')
308
+ with open(native_file, 'w') as f:
309
+ f.write("[binaries]\n")
310
+ f.write(f"python = '{sys.executable}'")
311
+
312
+ if sys.platform == "win32":
313
+ subprocess.check_call(["meson", "setup",
314
+ "--buildtype=release",
315
+ "--native-file", native_file,
316
+ "--vsenv", str(build_dir)],
317
+ cwd=target_dir,
318
+ )
319
+ else:
320
+ subprocess.check_call(["meson", "setup",
321
+ "--native-file", native_file, str(build_dir)],
322
+ cwd=target_dir
323
+ )
324
+ subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir)
325
+
326
+ # import without adding the directory to sys.path
327
+ suffix = sysconfig.get_config_var('EXT_SUFFIX')
328
+
329
+ def load(modname):
330
+ so = (target_dir / modname).with_suffix(suffix)
331
+ spec = spec_from_file_location(modname, so)
332
+ mod = module_from_spec(spec)
333
+ spec.loader.exec_module(mod)
334
+ return mod
335
+
336
+ # test that the module can be imported
337
+ return load("extending"), load("extending_cpp")
parrot/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ NumPy Array API compatibility library
3
+
4
+ This is a small wrapper around NumPy and CuPy that is compatible with the
5
+ Array API standard https://data-apis.org/array-api/latest/. See also NEP 47
6
+ https://numpy.org/neps/nep-0047-array-api-standard.html.
7
+
8
+ Unlike array_api_strict, this is not a strict minimal implementation of the
9
+ Array API, but rather just an extension of the main NumPy namespace with
10
+ changes needed to be compliant with the Array API. See
11
+ https://numpy.org/doc/stable/reference/array_api.html for a full list of
12
+ changes. In particular, unlike array_api_strict, this package does not use a
13
+ separate Array object, but rather just uses numpy.ndarray directly.
14
+
15
+ Library authors using the Array API may wish to test against array_api_strict
16
+ to ensure they are not using functionality outside of the standard, but prefer
17
+ this implementation for the default when working with NumPy arrays.
18
+
19
+ """
20
+ __version__ = '1.5.1'
21
+
22
+ from .common import * # noqa: F401, F403
parrot/lib/python3.10/site-packages/scipy/_lib/decorator.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ######################### LICENSE ############################ #
2
+
3
+ # Copyright (c) 2005-2015, Michele Simionato
4
+ # All rights reserved.
5
+
6
+ # Redistribution and use in source and binary forms, with or without
7
+ # modification, are permitted provided that the following conditions are
8
+ # met:
9
+
10
+ # Redistributions of source code must retain the above copyright
11
+ # notice, this list of conditions and the following disclaimer.
12
+ # Redistributions in bytecode form must reproduce the above copyright
13
+ # notice, this list of conditions and the following disclaimer in
14
+ # the documentation and/or other materials provided with the
15
+ # distribution.
16
+
17
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ # HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22
+ # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23
+ # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24
+ # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26
+ # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27
+ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28
+ # DAMAGE.
29
+
30
+ """
31
+ Decorator module, see https://pypi.python.org/pypi/decorator
32
+ for the documentation.
33
+ """
34
+ import re
35
+ import sys
36
+ import inspect
37
+ import operator
38
+ import itertools
39
+ import collections
40
+
41
+ from inspect import getfullargspec
42
+
43
+ __version__ = '4.0.5'
44
+
45
+
46
+ def get_init(cls):
47
+ return cls.__init__
48
+
49
+
50
+ # getargspec has been deprecated in Python 3.5
51
+ ArgSpec = collections.namedtuple(
52
+ 'ArgSpec', 'args varargs varkw defaults')
53
+
54
+
55
+ def getargspec(f):
56
+ """A replacement for inspect.getargspec"""
57
+ spec = getfullargspec(f)
58
+ return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
59
+
60
+
61
+ DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
62
+
63
+
64
+ # basic functionality
65
+ class FunctionMaker:
66
+ """
67
+ An object with the ability to create functions with a given signature.
68
+ It has attributes name, doc, module, signature, defaults, dict, and
69
+ methods update and make.
70
+ """
71
+
72
+ # Atomic get-and-increment provided by the GIL
73
+ _compile_count = itertools.count()
74
+
75
+ def __init__(self, func=None, name=None, signature=None,
76
+ defaults=None, doc=None, module=None, funcdict=None):
77
+ self.shortsignature = signature
78
+ if func:
79
+ # func can be a class or a callable, but not an instance method
80
+ self.name = func.__name__
81
+ if self.name == '<lambda>': # small hack for lambda functions
82
+ self.name = '_lambda_'
83
+ self.doc = func.__doc__
84
+ self.module = func.__module__
85
+ if inspect.isfunction(func):
86
+ argspec = getfullargspec(func)
87
+ self.annotations = getattr(func, '__annotations__', {})
88
+ for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
89
+ 'kwonlydefaults'):
90
+ setattr(self, a, getattr(argspec, a))
91
+ for i, arg in enumerate(self.args):
92
+ setattr(self, 'arg%d' % i, arg)
93
+ allargs = list(self.args)
94
+ allshortargs = list(self.args)
95
+ if self.varargs:
96
+ allargs.append('*' + self.varargs)
97
+ allshortargs.append('*' + self.varargs)
98
+ elif self.kwonlyargs:
99
+ allargs.append('*') # single star syntax
100
+ for a in self.kwonlyargs:
101
+ allargs.append('%s=None' % a)
102
+ allshortargs.append(f'{a}={a}')
103
+ if self.varkw:
104
+ allargs.append('**' + self.varkw)
105
+ allshortargs.append('**' + self.varkw)
106
+ self.signature = ', '.join(allargs)
107
+ self.shortsignature = ', '.join(allshortargs)
108
+ self.dict = func.__dict__.copy()
109
+ # func=None happens when decorating a caller
110
+ if name:
111
+ self.name = name
112
+ if signature is not None:
113
+ self.signature = signature
114
+ if defaults:
115
+ self.defaults = defaults
116
+ if doc:
117
+ self.doc = doc
118
+ if module:
119
+ self.module = module
120
+ if funcdict:
121
+ self.dict = funcdict
122
+ # check existence required attributes
123
+ assert hasattr(self, 'name')
124
+ if not hasattr(self, 'signature'):
125
+ raise TypeError('You are decorating a non-function: %s' % func)
126
+
127
+ def update(self, func, **kw):
128
+ "Update the signature of func with the data in self"
129
+ func.__name__ = self.name
130
+ func.__doc__ = getattr(self, 'doc', None)
131
+ func.__dict__ = getattr(self, 'dict', {})
132
+ func.__defaults__ = getattr(self, 'defaults', ())
133
+ func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
134
+ func.__annotations__ = getattr(self, 'annotations', None)
135
+ try:
136
+ frame = sys._getframe(3)
137
+ except AttributeError: # for IronPython and similar implementations
138
+ callermodule = '?'
139
+ else:
140
+ callermodule = frame.f_globals.get('__name__', '?')
141
+ func.__module__ = getattr(self, 'module', callermodule)
142
+ func.__dict__.update(kw)
143
+
144
+ def make(self, src_templ, evaldict=None, addsource=False, **attrs):
145
+ "Make a new function from a given template and update the signature"
146
+ src = src_templ % vars(self) # expand name and signature
147
+ evaldict = evaldict or {}
148
+ mo = DEF.match(src)
149
+ if mo is None:
150
+ raise SyntaxError('not a valid function template\n%s' % src)
151
+ name = mo.group(1) # extract the function name
152
+ names = set([name] + [arg.strip(' *') for arg in
153
+ self.shortsignature.split(',')])
154
+ for n in names:
155
+ if n in ('_func_', '_call_'):
156
+ raise NameError(f'{n} is overridden in\n{src}')
157
+ if not src.endswith('\n'): # add a newline just for safety
158
+ src += '\n' # this is needed in old versions of Python
159
+
160
+ # Ensure each generated function has a unique filename for profilers
161
+ # (such as cProfile) that depend on the tuple of (<filename>,
162
+ # <definition line>, <function name>) being unique.
163
+ filename = '<decorator-gen-%d>' % (next(self._compile_count),)
164
+ try:
165
+ code = compile(src, filename, 'single')
166
+ exec(code, evaldict)
167
+ except: # noqa: E722
168
+ print('Error in generated code:', file=sys.stderr)
169
+ print(src, file=sys.stderr)
170
+ raise
171
+ func = evaldict[name]
172
+ if addsource:
173
+ attrs['__source__'] = src
174
+ self.update(func, **attrs)
175
+ return func
176
+
177
+ @classmethod
178
+ def create(cls, obj, body, evaldict, defaults=None,
179
+ doc=None, module=None, addsource=True, **attrs):
180
+ """
181
+ Create a function from the strings name, signature, and body.
182
+ evaldict is the evaluation dictionary. If addsource is true, an
183
+ attribute __source__ is added to the result. The attributes attrs
184
+ are added, if any.
185
+ """
186
+ if isinstance(obj, str): # "name(signature)"
187
+ name, rest = obj.strip().split('(', 1)
188
+ signature = rest[:-1] # strip a right parens
189
+ func = None
190
+ else: # a function
191
+ name = None
192
+ signature = None
193
+ func = obj
194
+ self = cls(func, name, signature, defaults, doc, module)
195
+ ibody = '\n'.join(' ' + line for line in body.splitlines())
196
+ return self.make('def %(name)s(%(signature)s):\n' + ibody,
197
+ evaldict, addsource, **attrs)
198
+
199
+
200
+ def decorate(func, caller):
201
+ """
202
+ decorate(func, caller) decorates a function using a caller.
203
+ """
204
+ evaldict = func.__globals__.copy()
205
+ evaldict['_call_'] = caller
206
+ evaldict['_func_'] = func
207
+ fun = FunctionMaker.create(
208
+ func, "return _call_(_func_, %(shortsignature)s)",
209
+ evaldict, __wrapped__=func)
210
+ if hasattr(func, '__qualname__'):
211
+ fun.__qualname__ = func.__qualname__
212
+ return fun
213
+
214
+
215
+ def decorator(caller, _func=None):
216
+ """decorator(caller) converts a caller function into a decorator"""
217
+ if _func is not None: # return a decorated function
218
+ # this is obsolete behavior; you should use decorate instead
219
+ return decorate(_func, caller)
220
+ # else return a decorator function
221
+ if inspect.isclass(caller):
222
+ name = caller.__name__.lower()
223
+ callerfunc = get_init(caller)
224
+ doc = (f'decorator({caller.__name__}) converts functions/generators into '
225
+ f'factories of {caller.__name__} objects')
226
+ elif inspect.isfunction(caller):
227
+ if caller.__name__ == '<lambda>':
228
+ name = '_lambda_'
229
+ else:
230
+ name = caller.__name__
231
+ callerfunc = caller
232
+ doc = caller.__doc__
233
+ else: # assume caller is an object with a __call__ method
234
+ name = caller.__class__.__name__.lower()
235
+ callerfunc = caller.__call__.__func__
236
+ doc = caller.__call__.__doc__
237
+ evaldict = callerfunc.__globals__.copy()
238
+ evaldict['_call_'] = caller
239
+ evaldict['_decorate_'] = decorate
240
+ return FunctionMaker.create(
241
+ '%s(func)' % name, 'return _decorate_(func, _call_)',
242
+ evaldict, doc=doc, module=caller.__module__,
243
+ __wrapped__=caller)
244
+
245
+
246
+ # ####################### contextmanager ####################### #
247
+
248
+ try: # Python >= 3.2
249
+ from contextlib import _GeneratorContextManager
250
+ except ImportError: # Python >= 2.5
251
+ from contextlib import GeneratorContextManager as _GeneratorContextManager
252
+
253
+
254
+ class ContextManager(_GeneratorContextManager):
255
+ def __call__(self, func):
256
+ """Context manager decorator"""
257
+ return FunctionMaker.create(
258
+ func, "with _self_: return _func_(%(shortsignature)s)",
259
+ dict(_self_=self, _func_=func), __wrapped__=func)
260
+
261
+
262
+ init = getfullargspec(_GeneratorContextManager.__init__)
263
+ n_args = len(init.args)
264
+ if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
265
+ def __init__(self, g, *a, **k):
266
+ return _GeneratorContextManager.__init__(self, g(*a, **k))
267
+ ContextManager.__init__ = __init__
268
+ elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
269
+ pass
270
+ elif n_args == 4: # (self, gen, args, kwds) Python 3.5
271
+ def __init__(self, g, *a, **k):
272
+ return _GeneratorContextManager.__init__(self, g, a, k)
273
+ ContextManager.__init__ = __init__
274
+
275
+ contextmanager = decorator(ContextManager)
276
+
277
+
278
+ # ############################ dispatch_on ############################ #
279
+
280
+ def append(a, vancestors):
281
+ """
282
+ Append ``a`` to the list of the virtual ancestors, unless it is already
283
+ included.
284
+ """
285
+ add = True
286
+ for j, va in enumerate(vancestors):
287
+ if issubclass(va, a):
288
+ add = False
289
+ break
290
+ if issubclass(a, va):
291
+ vancestors[j] = a
292
+ add = False
293
+ if add:
294
+ vancestors.append(a)
295
+
296
+
297
+ # inspired from simplegeneric by P.J. Eby and functools.singledispatch
298
+ def dispatch_on(*dispatch_args):
299
+ """
300
+ Factory of decorators turning a function into a generic function
301
+ dispatching on the given arguments.
302
+ """
303
+ assert dispatch_args, 'No dispatch args passed'
304
+ dispatch_str = '(%s,)' % ', '.join(dispatch_args)
305
+
306
+ def check(arguments, wrong=operator.ne, msg=''):
307
+ """Make sure one passes the expected number of arguments"""
308
+ if wrong(len(arguments), len(dispatch_args)):
309
+ raise TypeError('Expected %d arguments, got %d%s' %
310
+ (len(dispatch_args), len(arguments), msg))
311
+
312
+ def gen_func_dec(func):
313
+ """Decorator turning a function into a generic function"""
314
+
315
+ # first check the dispatch arguments
316
+ argset = set(getfullargspec(func).args)
317
+ if not set(dispatch_args) <= argset:
318
+ raise NameError('Unknown dispatch arguments %s' % dispatch_str)
319
+
320
+ typemap = {}
321
+
322
+ def vancestors(*types):
323
+ """
324
+ Get a list of sets of virtual ancestors for the given types
325
+ """
326
+ check(types)
327
+ ras = [[] for _ in range(len(dispatch_args))]
328
+ for types_ in typemap:
329
+ for t, type_, ra in zip(types, types_, ras):
330
+ if issubclass(t, type_) and type_ not in t.__mro__:
331
+ append(type_, ra)
332
+ return [set(ra) for ra in ras]
333
+
334
+ def ancestors(*types):
335
+ """
336
+ Get a list of virtual MROs, one for each type
337
+ """
338
+ check(types)
339
+ lists = []
340
+ for t, vas in zip(types, vancestors(*types)):
341
+ n_vas = len(vas)
342
+ if n_vas > 1:
343
+ raise RuntimeError(
344
+ f'Ambiguous dispatch for {t}: {vas}')
345
+ elif n_vas == 1:
346
+ va, = vas
347
+ mro = type('t', (t, va), {}).__mro__[1:]
348
+ else:
349
+ mro = t.__mro__
350
+ lists.append(mro[:-1]) # discard t and object
351
+ return lists
352
+
353
+ def register(*types):
354
+ """
355
+ Decorator to register an implementation for the given types
356
+ """
357
+ check(types)
358
+
359
+ def dec(f):
360
+ check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
361
+ typemap[types] = f
362
+ return f
363
+ return dec
364
+
365
+ def dispatch_info(*types):
366
+ """
367
+ An utility to introspect the dispatch algorithm
368
+ """
369
+ check(types)
370
+ lst = [tuple(a.__name__ for a in anc)
371
+ for anc in itertools.product(*ancestors(*types))]
372
+ return lst
373
+
374
+ def _dispatch(dispatch_args, *args, **kw):
375
+ types = tuple(type(arg) for arg in dispatch_args)
376
+ try: # fast path
377
+ f = typemap[types]
378
+ except KeyError:
379
+ pass
380
+ else:
381
+ return f(*args, **kw)
382
+ combinations = itertools.product(*ancestors(*types))
383
+ next(combinations) # the first one has been already tried
384
+ for types_ in combinations:
385
+ f = typemap.get(types_)
386
+ if f is not None:
387
+ return f(*args, **kw)
388
+
389
+ # else call the default implementation
390
+ return func(*args, **kw)
391
+
392
+ return FunctionMaker.create(
393
+ func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
394
+ dict(_f_=_dispatch), register=register, default=func,
395
+ typemap=typemap, vancestors=vancestors, ancestors=ancestors,
396
+ dispatch_info=dispatch_info, __wrapped__=func)
397
+
398
+ gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
399
+ return gen_func_dec
parrot/lib/python3.10/site-packages/scipy/_lib/doccer.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ''' Utilities to allow inserting docstring fragments for common
2
+ parameters into function and method docstrings'''
3
+
4
+ import sys
5
+
6
+ __all__ = [
7
+ 'docformat', 'inherit_docstring_from', 'indentcount_lines',
8
+ 'filldoc', 'unindent_dict', 'unindent_string', 'extend_notes_in_docstring',
9
+ 'replace_notes_in_docstring', 'doc_replace'
10
+ ]
11
+
12
+
13
+ def docformat(docstring, docdict=None):
14
+ ''' Fill a function docstring from variables in dictionary
15
+
16
+ Adapt the indent of the inserted docs
17
+
18
+ Parameters
19
+ ----------
20
+ docstring : string
21
+ docstring from function, possibly with dict formatting strings
22
+ docdict : dict, optional
23
+ dictionary with keys that match the dict formatting strings
24
+ and values that are docstring fragments to be inserted. The
25
+ indentation of the inserted docstrings is set to match the
26
+ minimum indentation of the ``docstring`` by adding this
27
+ indentation to all lines of the inserted string, except the
28
+ first.
29
+
30
+ Returns
31
+ -------
32
+ outstring : string
33
+ string with requested ``docdict`` strings inserted
34
+
35
+ Examples
36
+ --------
37
+ >>> docformat(' Test string with %(value)s', {'value':'inserted value'})
38
+ ' Test string with inserted value'
39
+ >>> docstring = 'First line\\n Second line\\n %(value)s'
40
+ >>> inserted_string = "indented\\nstring"
41
+ >>> docdict = {'value': inserted_string}
42
+ >>> docformat(docstring, docdict)
43
+ 'First line\\n Second line\\n indented\\n string'
44
+ '''
45
+ if not docstring:
46
+ return docstring
47
+ if docdict is None:
48
+ docdict = {}
49
+ if not docdict:
50
+ return docstring
51
+ lines = docstring.expandtabs().splitlines()
52
+ # Find the minimum indent of the main docstring, after first line
53
+ if len(lines) < 2:
54
+ icount = 0
55
+ else:
56
+ icount = indentcount_lines(lines[1:])
57
+ indent = ' ' * icount
58
+ # Insert this indent to dictionary docstrings
59
+ indented = {}
60
+ for name, dstr in docdict.items():
61
+ lines = dstr.expandtabs().splitlines()
62
+ try:
63
+ newlines = [lines[0]]
64
+ for line in lines[1:]:
65
+ newlines.append(indent+line)
66
+ indented[name] = '\n'.join(newlines)
67
+ except IndexError:
68
+ indented[name] = dstr
69
+ return docstring % indented
70
+
71
+
72
+ def inherit_docstring_from(cls):
73
+ """
74
+ This decorator modifies the decorated function's docstring by
75
+ replacing occurrences of '%(super)s' with the docstring of the
76
+ method of the same name from the class `cls`.
77
+
78
+ If the decorated method has no docstring, it is simply given the
79
+ docstring of `cls`s method.
80
+
81
+ Parameters
82
+ ----------
83
+ cls : Python class or instance
84
+ A class with a method with the same name as the decorated method.
85
+ The docstring of the method in this class replaces '%(super)s' in the
86
+ docstring of the decorated method.
87
+
88
+ Returns
89
+ -------
90
+ f : function
91
+ The decorator function that modifies the __doc__ attribute
92
+ of its argument.
93
+
94
+ Examples
95
+ --------
96
+ In the following, the docstring for Bar.func created using the
97
+ docstring of `Foo.func`.
98
+
99
+ >>> class Foo:
100
+ ... def func(self):
101
+ ... '''Do something useful.'''
102
+ ... return
103
+ ...
104
+ >>> class Bar(Foo):
105
+ ... @inherit_docstring_from(Foo)
106
+ ... def func(self):
107
+ ... '''%(super)s
108
+ ... Do it fast.
109
+ ... '''
110
+ ... return
111
+ ...
112
+ >>> b = Bar()
113
+ >>> b.func.__doc__
114
+ 'Do something useful.\n Do it fast.\n '
115
+
116
+ """
117
+ def _doc(func):
118
+ cls_docstring = getattr(cls, func.__name__).__doc__
119
+ func_docstring = func.__doc__
120
+ if func_docstring is None:
121
+ func.__doc__ = cls_docstring
122
+ else:
123
+ new_docstring = func_docstring % dict(super=cls_docstring)
124
+ func.__doc__ = new_docstring
125
+ return func
126
+ return _doc
127
+
128
+
129
+ def extend_notes_in_docstring(cls, notes):
130
+ """
131
+ This decorator replaces the decorated function's docstring
132
+ with the docstring from corresponding method in `cls`.
133
+ It extends the 'Notes' section of that docstring to include
134
+ the given `notes`.
135
+ """
136
+ def _doc(func):
137
+ cls_docstring = getattr(cls, func.__name__).__doc__
138
+ # If python is called with -OO option,
139
+ # there is no docstring
140
+ if cls_docstring is None:
141
+ return func
142
+ end_of_notes = cls_docstring.find(' References\n')
143
+ if end_of_notes == -1:
144
+ end_of_notes = cls_docstring.find(' Examples\n')
145
+ if end_of_notes == -1:
146
+ end_of_notes = len(cls_docstring)
147
+ func.__doc__ = (cls_docstring[:end_of_notes] + notes +
148
+ cls_docstring[end_of_notes:])
149
+ return func
150
+ return _doc
151
+
152
+
153
+ def replace_notes_in_docstring(cls, notes):
154
+ """
155
+ This decorator replaces the decorated function's docstring
156
+ with the docstring from corresponding method in `cls`.
157
+ It replaces the 'Notes' section of that docstring with
158
+ the given `notes`.
159
+ """
160
+ def _doc(func):
161
+ cls_docstring = getattr(cls, func.__name__).__doc__
162
+ notes_header = ' Notes\n -----\n'
163
+ # If python is called with -OO option,
164
+ # there is no docstring
165
+ if cls_docstring is None:
166
+ return func
167
+ start_of_notes = cls_docstring.find(notes_header)
168
+ end_of_notes = cls_docstring.find(' References\n')
169
+ if end_of_notes == -1:
170
+ end_of_notes = cls_docstring.find(' Examples\n')
171
+ if end_of_notes == -1:
172
+ end_of_notes = len(cls_docstring)
173
+ func.__doc__ = (cls_docstring[:start_of_notes + len(notes_header)] +
174
+ notes +
175
+ cls_docstring[end_of_notes:])
176
+ return func
177
+ return _doc
178
+
179
+
180
+ def indentcount_lines(lines):
181
+ ''' Minimum indent for all lines in line list
182
+
183
+ >>> lines = [' one', ' two', ' three']
184
+ >>> indentcount_lines(lines)
185
+ 1
186
+ >>> lines = []
187
+ >>> indentcount_lines(lines)
188
+ 0
189
+ >>> lines = [' one']
190
+ >>> indentcount_lines(lines)
191
+ 1
192
+ >>> indentcount_lines([' '])
193
+ 0
194
+ '''
195
+ indentno = sys.maxsize
196
+ for line in lines:
197
+ stripped = line.lstrip()
198
+ if stripped:
199
+ indentno = min(indentno, len(line) - len(stripped))
200
+ if indentno == sys.maxsize:
201
+ return 0
202
+ return indentno
203
+
204
+
205
+ def filldoc(docdict, unindent_params=True):
206
+ ''' Return docstring decorator using docdict variable dictionary
207
+
208
+ Parameters
209
+ ----------
210
+ docdict : dictionary
211
+ dictionary containing name, docstring fragment pairs
212
+ unindent_params : {False, True}, boolean, optional
213
+ If True, strip common indentation from all parameters in
214
+ docdict
215
+
216
+ Returns
217
+ -------
218
+ decfunc : function
219
+ decorator that applies dictionary to input function docstring
220
+
221
+ '''
222
+ if unindent_params:
223
+ docdict = unindent_dict(docdict)
224
+
225
+ def decorate(f):
226
+ f.__doc__ = docformat(f.__doc__, docdict)
227
+ return f
228
+ return decorate
229
+
230
+
231
+ def unindent_dict(docdict):
232
+ ''' Unindent all strings in a docdict '''
233
+ can_dict = {}
234
+ for name, dstr in docdict.items():
235
+ can_dict[name] = unindent_string(dstr)
236
+ return can_dict
237
+
238
+
239
+ def unindent_string(docstring):
240
+ ''' Set docstring to minimum indent for all lines, including first
241
+
242
+ >>> unindent_string(' two')
243
+ 'two'
244
+ >>> unindent_string(' two\\n three')
245
+ 'two\\n three'
246
+ '''
247
+ lines = docstring.expandtabs().splitlines()
248
+ icount = indentcount_lines(lines)
249
+ if icount == 0:
250
+ return docstring
251
+ return '\n'.join([line[icount:] for line in lines])
252
+
253
+
254
+ def doc_replace(obj, oldval, newval):
255
+ """Decorator to take the docstring from obj, with oldval replaced by newval
256
+
257
+ Equivalent to ``func.__doc__ = obj.__doc__.replace(oldval, newval)``
258
+
259
+ Parameters
260
+ ----------
261
+ obj : object
262
+ The object to take the docstring from.
263
+ oldval : string
264
+ The string to replace from the original docstring.
265
+ newval : string
266
+ The string to replace ``oldval`` with.
267
+ """
268
+ # __doc__ may be None for optimized Python (-OO)
269
+ doc = (obj.__doc__ or '').replace(oldval, newval)
270
+
271
+ def inner(func):
272
+ func.__doc__ = doc
273
+ return func
274
+
275
+ return inner
parrot/lib/python3.10/site-packages/scipy/_lib/uarray.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """`uarray` provides functions for generating multimethods that dispatch to
2
+ multiple different backends
3
+
4
+ This should be imported, rather than `_uarray` so that an installed version could
5
+ be used instead, if available. This means that users can call
6
+ `uarray.set_backend` directly instead of going through SciPy.
7
+
8
+ """
9
+
10
+
11
+ # Prefer an installed version of uarray, if available
12
+ try:
13
+ import uarray as _uarray
14
+ except ImportError:
15
+ _has_uarray = False
16
+ else:
17
+ from scipy._lib._pep440 import Version as _Version
18
+
19
+ _has_uarray = _Version(_uarray.__version__) >= _Version("0.8")
20
+ del _uarray
21
+ del _Version
22
+
23
+
24
+ if _has_uarray:
25
+ from uarray import * # noqa: F403
26
+ from uarray import _Function
27
+ else:
28
+ from ._uarray import * # noqa: F403
29
+ from ._uarray import _Function # noqa: F401
30
+
31
+ del _has_uarray
parrot/lib/python3.10/site-packages/scipy/constants/__init__.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ ==================================
3
+ Constants (:mod:`scipy.constants`)
4
+ ==================================
5
+
6
+ .. currentmodule:: scipy.constants
7
+
8
+ Physical and mathematical constants and units.
9
+
10
+
11
+ Mathematical constants
12
+ ======================
13
+
14
+ ================ =================================================================
15
+ ``pi`` Pi
16
+ ``golden`` Golden ratio
17
+ ``golden_ratio`` Golden ratio
18
+ ================ =================================================================
19
+
20
+
21
+ Physical constants
22
+ ==================
23
+
24
+ =========================== =================================================================
25
+ ``c`` speed of light in vacuum
26
+ ``speed_of_light`` speed of light in vacuum
27
+ ``mu_0`` the magnetic constant :math:`\mu_0`
28
+ ``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
29
+ ``h`` the Planck constant :math:`h`
30
+ ``Planck`` the Planck constant :math:`h`
31
+ ``hbar`` :math:`\hbar = h/(2\pi)`
32
+ ``G`` Newtonian constant of gravitation
33
+ ``gravitational_constant`` Newtonian constant of gravitation
34
+ ``g`` standard acceleration of gravity
35
+ ``e`` elementary charge
36
+ ``elementary_charge`` elementary charge
37
+ ``R`` molar gas constant
38
+ ``gas_constant`` molar gas constant
39
+ ``alpha`` fine-structure constant
40
+ ``fine_structure`` fine-structure constant
41
+ ``N_A`` Avogadro constant
42
+ ``Avogadro`` Avogadro constant
43
+ ``k`` Boltzmann constant
44
+ ``Boltzmann`` Boltzmann constant
45
+ ``sigma`` Stefan-Boltzmann constant :math:`\sigma`
46
+ ``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma`
47
+ ``Wien`` Wien displacement law constant
48
+ ``Rydberg`` Rydberg constant
49
+ ``m_e`` electron mass
50
+ ``electron_mass`` electron mass
51
+ ``m_p`` proton mass
52
+ ``proton_mass`` proton mass
53
+ ``m_n`` neutron mass
54
+ ``neutron_mass`` neutron mass
55
+ =========================== =================================================================
56
+
57
+
58
+ Constants database
59
+ ------------------
60
+
61
+ In addition to the above variables, :mod:`scipy.constants` also contains the
62
+ 2018 CODATA recommended values [CODATA2018]_ database containing more physical
63
+ constants.
64
+
65
+ .. autosummary::
66
+ :toctree: generated/
67
+
68
+ value -- Value in physical_constants indexed by key
69
+ unit -- Unit in physical_constants indexed by key
70
+ precision -- Relative precision in physical_constants indexed by key
71
+ find -- Return list of physical_constant keys with a given string
72
+ ConstantWarning -- Constant sought not in newest CODATA data set
73
+
74
+ .. data:: physical_constants
75
+
76
+ Dictionary of physical constants, of the format
77
+ ``physical_constants[name] = (value, unit, uncertainty)``.
78
+
79
+ Available constants:
80
+
81
+ ====================================================================== ====
82
+ %(constant_names)s
83
+ ====================================================================== ====
84
+
85
+
86
+ Units
87
+ =====
88
+
89
+ SI prefixes
90
+ -----------
91
+
92
+ ============ =================================================================
93
+ ``quetta`` :math:`10^{30}`
94
+ ``ronna`` :math:`10^{27}`
95
+ ``yotta`` :math:`10^{24}`
96
+ ``zetta`` :math:`10^{21}`
97
+ ``exa`` :math:`10^{18}`
98
+ ``peta`` :math:`10^{15}`
99
+ ``tera`` :math:`10^{12}`
100
+ ``giga`` :math:`10^{9}`
101
+ ``mega`` :math:`10^{6}`
102
+ ``kilo`` :math:`10^{3}`
103
+ ``hecto`` :math:`10^{2}`
104
+ ``deka`` :math:`10^{1}`
105
+ ``deci`` :math:`10^{-1}`
106
+ ``centi`` :math:`10^{-2}`
107
+ ``milli`` :math:`10^{-3}`
108
+ ``micro`` :math:`10^{-6}`
109
+ ``nano`` :math:`10^{-9}`
110
+ ``pico`` :math:`10^{-12}`
111
+ ``femto`` :math:`10^{-15}`
112
+ ``atto`` :math:`10^{-18}`
113
+ ``zepto`` :math:`10^{-21}`
114
+ ``yocto`` :math:`10^{-24}`
115
+ ``ronto`` :math:`10^{-27}`
116
+ ``quecto`` :math:`10^{-30}`
117
+ ============ =================================================================
118
+
119
+ Binary prefixes
120
+ ---------------
121
+
122
+ ============ =================================================================
123
+ ``kibi`` :math:`2^{10}`
124
+ ``mebi`` :math:`2^{20}`
125
+ ``gibi`` :math:`2^{30}`
126
+ ``tebi`` :math:`2^{40}`
127
+ ``pebi`` :math:`2^{50}`
128
+ ``exbi`` :math:`2^{60}`
129
+ ``zebi`` :math:`2^{70}`
130
+ ``yobi`` :math:`2^{80}`
131
+ ============ =================================================================
132
+
133
+ Mass
134
+ ----
135
+
136
+ ================= ============================================================
137
+ ``gram`` :math:`10^{-3}` kg
138
+ ``metric_ton`` :math:`10^{3}` kg
139
+ ``grain`` one grain in kg
140
+ ``lb`` one pound (avoirdupous) in kg
141
+ ``pound`` one pound (avoirdupous) in kg
142
+ ``blob`` one inch version of a slug in kg (added in 1.0.0)
143
+ ``slinch`` one inch version of a slug in kg (added in 1.0.0)
144
+ ``slug`` one slug in kg (added in 1.0.0)
145
+ ``oz`` one ounce in kg
146
+ ``ounce`` one ounce in kg
147
+ ``stone`` one stone in kg
148
+ ``grain`` one grain in kg
149
+ ``long_ton`` one long ton in kg
150
+ ``short_ton`` one short ton in kg
151
+ ``troy_ounce`` one Troy ounce in kg
152
+ ``troy_pound`` one Troy pound in kg
153
+ ``carat`` one carat in kg
154
+ ``m_u`` atomic mass constant (in kg)
155
+ ``u`` atomic mass constant (in kg)
156
+ ``atomic_mass`` atomic mass constant (in kg)
157
+ ================= ============================================================
158
+
159
+ Angle
160
+ -----
161
+
162
+ ================= ============================================================
163
+ ``degree`` degree in radians
164
+ ``arcmin`` arc minute in radians
165
+ ``arcminute`` arc minute in radians
166
+ ``arcsec`` arc second in radians
167
+ ``arcsecond`` arc second in radians
168
+ ================= ============================================================
169
+
170
+
171
+ Time
172
+ ----
173
+
174
+ ================= ============================================================
175
+ ``minute`` one minute in seconds
176
+ ``hour`` one hour in seconds
177
+ ``day`` one day in seconds
178
+ ``week`` one week in seconds
179
+ ``year`` one year (365 days) in seconds
180
+ ``Julian_year`` one Julian year (365.25 days) in seconds
181
+ ================= ============================================================
182
+
183
+
184
+ Length
185
+ ------
186
+
187
+ ===================== ============================================================
188
+ ``inch`` one inch in meters
189
+ ``foot`` one foot in meters
190
+ ``yard`` one yard in meters
191
+ ``mile`` one mile in meters
192
+ ``mil`` one mil in meters
193
+ ``pt`` one point in meters
194
+ ``point`` one point in meters
195
+ ``survey_foot`` one survey foot in meters
196
+ ``survey_mile`` one survey mile in meters
197
+ ``nautical_mile`` one nautical mile in meters
198
+ ``fermi`` one Fermi in meters
199
+ ``angstrom`` one Angstrom in meters
200
+ ``micron`` one micron in meters
201
+ ``au`` one astronomical unit in meters
202
+ ``astronomical_unit`` one astronomical unit in meters
203
+ ``light_year`` one light year in meters
204
+ ``parsec`` one parsec in meters
205
+ ===================== ============================================================
206
+
207
+ Pressure
208
+ --------
209
+
210
+ ================= ============================================================
211
+ ``atm`` standard atmosphere in pascals
212
+ ``atmosphere`` standard atmosphere in pascals
213
+ ``bar`` one bar in pascals
214
+ ``torr`` one torr (mmHg) in pascals
215
+ ``mmHg`` one torr (mmHg) in pascals
216
+ ``psi`` one psi in pascals
217
+ ================= ============================================================
218
+
219
+ Area
220
+ ----
221
+
222
+ ================= ============================================================
223
+ ``hectare`` one hectare in square meters
224
+ ``acre`` one acre in square meters
225
+ ================= ============================================================
226
+
227
+
228
+ Volume
229
+ ------
230
+
231
+ =================== ========================================================
232
+ ``liter`` one liter in cubic meters
233
+ ``litre`` one liter in cubic meters
234
+ ``gallon`` one gallon (US) in cubic meters
235
+ ``gallon_US`` one gallon (US) in cubic meters
236
+ ``gallon_imp`` one gallon (UK) in cubic meters
237
+ ``fluid_ounce`` one fluid ounce (US) in cubic meters
238
+ ``fluid_ounce_US`` one fluid ounce (US) in cubic meters
239
+ ``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
240
+ ``bbl`` one barrel in cubic meters
241
+ ``barrel`` one barrel in cubic meters
242
+ =================== ========================================================
243
+
244
+ Speed
245
+ -----
246
+
247
+ ================== ==========================================================
248
+ ``kmh`` kilometers per hour in meters per second
249
+ ``mph`` miles per hour in meters per second
250
+ ``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
251
+ ``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
252
+ ``knot`` one knot in meters per second
253
+ ================== ==========================================================
254
+
255
+
256
+ Temperature
257
+ -----------
258
+
259
+ ===================== =======================================================
260
+ ``zero_Celsius`` zero of Celsius scale in Kelvin
261
+ ``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
262
+ ===================== =======================================================
263
+
264
+ .. autosummary::
265
+ :toctree: generated/
266
+
267
+ convert_temperature
268
+
269
+ Energy
270
+ ------
271
+
272
+ ==================== =======================================================
273
+ ``eV`` one electron volt in Joules
274
+ ``electron_volt`` one electron volt in Joules
275
+ ``calorie`` one calorie (thermochemical) in Joules
276
+ ``calorie_th`` one calorie (thermochemical) in Joules
277
+ ``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
278
+ ``erg`` one erg in Joules
279
+ ``Btu`` one British thermal unit (International Steam Table) in Joules
280
+ ``Btu_IT`` one British thermal unit (International Steam Table) in Joules
281
+ ``Btu_th`` one British thermal unit (thermochemical) in Joules
282
+ ``ton_TNT`` one ton of TNT in Joules
283
+ ==================== =======================================================
284
+
285
+ Power
286
+ -----
287
+
288
+ ==================== =======================================================
289
+ ``hp`` one horsepower in watts
290
+ ``horsepower`` one horsepower in watts
291
+ ==================== =======================================================
292
+
293
+ Force
294
+ -----
295
+
296
+ ==================== =======================================================
297
+ ``dyn`` one dyne in newtons
298
+ ``dyne`` one dyne in newtons
299
+ ``lbf`` one pound force in newtons
300
+ ``pound_force`` one pound force in newtons
301
+ ``kgf`` one kilogram force in newtons
302
+ ``kilogram_force`` one kilogram force in newtons
303
+ ==================== =======================================================
304
+
305
+ Optics
306
+ ------
307
+
308
+ .. autosummary::
309
+ :toctree: generated/
310
+
311
+ lambda2nu
312
+ nu2lambda
313
+
314
+ References
315
+ ==========
316
+
317
+ .. [CODATA2018] CODATA Recommended Values of the Fundamental
318
+ Physical Constants 2018.
319
+
320
+ https://physics.nist.gov/cuu/Constants/
321
+
322
+ """ # noqa: E501
323
+ # Modules contributed by BasSw (wegwerp@gmail.com)
324
+ from ._codata import *
325
+ from ._constants import *
326
+ from ._codata import _obsolete_constants, physical_constants
327
+
328
+ # Deprecated namespaces, to be removed in v2.0.0
329
+ from . import codata, constants
330
+
331
+ _constant_names_list = [(_k.lower(), _k, _v)
332
+ for _k, _v in physical_constants.items()
333
+ if _k not in _obsolete_constants]
334
+ _constant_names = "\n".join(["``{}``{} {} {}".format(_x[1], " "*(66-len(_x[1])),
335
+ _x[2][0], _x[2][1])
336
+ for _x in sorted(_constant_names_list)])
337
+ if __doc__:
338
+ __doc__ = __doc__ % dict(constant_names=_constant_names)
339
+
340
+ del _constant_names
341
+ del _constant_names_list
342
+
343
+ __all__ = [s for s in dir() if not s.startswith('_')]
344
+
345
+ from scipy._lib._testutils import PytestTester
346
+ test = PytestTester(__name__)
347
+ del PytestTester
parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc ADDED
Binary file (8.81 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc ADDED
Binary file (655 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by Pearu Peterson, September 2002
2
+
3
+ __usage__ = """
4
+ Build fftpack:
5
+ python setup_fftpack.py build
6
+ Run tests if scipy is installed:
7
+ python -c 'import scipy;scipy.fftpack.test(<level>)'
8
+ Run tests if fftpack is not installed:
9
+ python tests/test_helper.py [<level>]
10
+ """
11
+
12
+ from numpy.testing import assert_array_almost_equal
13
+ from scipy.fftpack import fftshift, ifftshift, fftfreq, rfftfreq
14
+
15
+ from numpy import pi, random
16
+
17
+ class TestFFTShift:
18
+
19
+ def test_definition(self):
20
+ x = [0,1,2,3,4,-4,-3,-2,-1]
21
+ y = [-4,-3,-2,-1,0,1,2,3,4]
22
+ assert_array_almost_equal(fftshift(x),y)
23
+ assert_array_almost_equal(ifftshift(y),x)
24
+ x = [0,1,2,3,4,-5,-4,-3,-2,-1]
25
+ y = [-5,-4,-3,-2,-1,0,1,2,3,4]
26
+ assert_array_almost_equal(fftshift(x),y)
27
+ assert_array_almost_equal(ifftshift(y),x)
28
+
29
+ def test_inverse(self):
30
+ for n in [1,4,9,100,211]:
31
+ x = random.random((n,))
32
+ assert_array_almost_equal(ifftshift(fftshift(x)),x)
33
+
34
+
35
+ class TestFFTFreq:
36
+
37
+ def test_definition(self):
38
+ x = [0,1,2,3,4,-4,-3,-2,-1]
39
+ assert_array_almost_equal(9*fftfreq(9),x)
40
+ assert_array_almost_equal(9*pi*fftfreq(9,pi),x)
41
+ x = [0,1,2,3,4,-5,-4,-3,-2,-1]
42
+ assert_array_almost_equal(10*fftfreq(10),x)
43
+ assert_array_almost_equal(10*pi*fftfreq(10,pi),x)
44
+
45
+
46
+ class TestRFFTFreq:
47
+
48
+ def test_definition(self):
49
+ x = [0,1,1,2,2,3,3,4,4]
50
+ assert_array_almost_equal(9*rfftfreq(9),x)
51
+ assert_array_almost_equal(9*pi*rfftfreq(9,pi),x)
52
+ x = [0,1,1,2,2,3,3,4,4,5]
53
+ assert_array_almost_equal(10*rfftfreq(10),x)
54
+ assert_array_almost_equal(10*pi*rfftfreq(10,pi),x)
parrot/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py ADDED
@@ -0,0 +1,815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import join, dirname
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_array_almost_equal, assert_equal
5
+ import pytest
6
+ from pytest import raises as assert_raises
7
+
8
+ from scipy.fftpack._realtransforms import (
9
+ dct, idct, dst, idst, dctn, idctn, dstn, idstn)
10
+
11
+ # Matlab reference data
12
+ MDATA = np.load(join(dirname(__file__), 'test.npz'))
13
+ X = [MDATA['x%d' % i] for i in range(8)]
14
+ Y = [MDATA['y%d' % i] for i in range(8)]
15
+
16
+ # FFTW reference data: the data are organized as follows:
17
+ # * SIZES is an array containing all available sizes
18
+ # * for every type (1, 2, 3, 4) and every size, the array dct_type_size
19
+ # contains the output of the DCT applied to the input np.linspace(0, size-1,
20
+ # size)
21
+ FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
22
+ FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
23
+ FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
24
+
25
+
26
+ def fftw_dct_ref(type, size, dt):
27
+ x = np.linspace(0, size-1, size).astype(dt)
28
+ dt = np.result_type(np.float32, dt)
29
+ if dt == np.float64:
30
+ data = FFTWDATA_DOUBLE
31
+ elif dt == np.float32:
32
+ data = FFTWDATA_SINGLE
33
+ else:
34
+ raise ValueError()
35
+ y = (data['dct_%d_%d' % (type, size)]).astype(dt)
36
+ return x, y, dt
37
+
38
+
39
+ def fftw_dst_ref(type, size, dt):
40
+ x = np.linspace(0, size-1, size).astype(dt)
41
+ dt = np.result_type(np.float32, dt)
42
+ if dt == np.float64:
43
+ data = FFTWDATA_DOUBLE
44
+ elif dt == np.float32:
45
+ data = FFTWDATA_SINGLE
46
+ else:
47
+ raise ValueError()
48
+ y = (data['dst_%d_%d' % (type, size)]).astype(dt)
49
+ return x, y, dt
50
+
51
+
52
+ def dct_2d_ref(x, **kwargs):
53
+ """Calculate reference values for testing dct2."""
54
+ x = np.array(x, copy=True)
55
+ for row in range(x.shape[0]):
56
+ x[row, :] = dct(x[row, :], **kwargs)
57
+ for col in range(x.shape[1]):
58
+ x[:, col] = dct(x[:, col], **kwargs)
59
+ return x
60
+
61
+
62
+ def idct_2d_ref(x, **kwargs):
63
+ """Calculate reference values for testing idct2."""
64
+ x = np.array(x, copy=True)
65
+ for row in range(x.shape[0]):
66
+ x[row, :] = idct(x[row, :], **kwargs)
67
+ for col in range(x.shape[1]):
68
+ x[:, col] = idct(x[:, col], **kwargs)
69
+ return x
70
+
71
+
72
+ def dst_2d_ref(x, **kwargs):
73
+ """Calculate reference values for testing dst2."""
74
+ x = np.array(x, copy=True)
75
+ for row in range(x.shape[0]):
76
+ x[row, :] = dst(x[row, :], **kwargs)
77
+ for col in range(x.shape[1]):
78
+ x[:, col] = dst(x[:, col], **kwargs)
79
+ return x
80
+
81
+
82
+ def idst_2d_ref(x, **kwargs):
83
+ """Calculate reference values for testing idst2."""
84
+ x = np.array(x, copy=True)
85
+ for row in range(x.shape[0]):
86
+ x[row, :] = idst(x[row, :], **kwargs)
87
+ for col in range(x.shape[1]):
88
+ x[:, col] = idst(x[:, col], **kwargs)
89
+ return x
90
+
91
+
92
+ def naive_dct1(x, norm=None):
93
+ """Calculate textbook definition version of DCT-I."""
94
+ x = np.array(x, copy=True)
95
+ N = len(x)
96
+ M = N-1
97
+ y = np.zeros(N)
98
+ m0, m = 1, 2
99
+ if norm == 'ortho':
100
+ m0 = np.sqrt(1.0/M)
101
+ m = np.sqrt(2.0/M)
102
+ for k in range(N):
103
+ for n in range(1, N-1):
104
+ y[k] += m*x[n]*np.cos(np.pi*n*k/M)
105
+ y[k] += m0 * x[0]
106
+ y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)
107
+ if norm == 'ortho':
108
+ y[0] *= 1/np.sqrt(2)
109
+ y[N-1] *= 1/np.sqrt(2)
110
+ return y
111
+
112
+
113
+ def naive_dst1(x, norm=None):
114
+ """Calculate textbook definition version of DST-I."""
115
+ x = np.array(x, copy=True)
116
+ N = len(x)
117
+ M = N+1
118
+ y = np.zeros(N)
119
+ for k in range(N):
120
+ for n in range(N):
121
+ y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)
122
+ if norm == 'ortho':
123
+ y *= np.sqrt(0.5/M)
124
+ return y
125
+
126
+
127
+ def naive_dct4(x, norm=None):
128
+ """Calculate textbook definition version of DCT-IV."""
129
+ x = np.array(x, copy=True)
130
+ N = len(x)
131
+ y = np.zeros(N)
132
+ for k in range(N):
133
+ for n in range(N):
134
+ y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))
135
+ if norm == 'ortho':
136
+ y *= np.sqrt(2.0/N)
137
+ else:
138
+ y *= 2
139
+ return y
140
+
141
+
142
+ def naive_dst4(x, norm=None):
143
+ """Calculate textbook definition version of DST-IV."""
144
+ x = np.array(x, copy=True)
145
+ N = len(x)
146
+ y = np.zeros(N)
147
+ for k in range(N):
148
+ for n in range(N):
149
+ y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))
150
+ if norm == 'ortho':
151
+ y *= np.sqrt(2.0/N)
152
+ else:
153
+ y *= 2
154
+ return y
155
+
156
+
157
+ class TestComplex:
158
+ def test_dct_complex64(self):
159
+ y = dct(1j*np.arange(5, dtype=np.complex64))
160
+ x = 1j*dct(np.arange(5))
161
+ assert_array_almost_equal(x, y)
162
+
163
+ def test_dct_complex(self):
164
+ y = dct(np.arange(5)*1j)
165
+ x = 1j*dct(np.arange(5))
166
+ assert_array_almost_equal(x, y)
167
+
168
+ def test_idct_complex(self):
169
+ y = idct(np.arange(5)*1j)
170
+ x = 1j*idct(np.arange(5))
171
+ assert_array_almost_equal(x, y)
172
+
173
+ def test_dst_complex64(self):
174
+ y = dst(np.arange(5, dtype=np.complex64)*1j)
175
+ x = 1j*dst(np.arange(5))
176
+ assert_array_almost_equal(x, y)
177
+
178
+ def test_dst_complex(self):
179
+ y = dst(np.arange(5)*1j)
180
+ x = 1j*dst(np.arange(5))
181
+ assert_array_almost_equal(x, y)
182
+
183
+ def test_idst_complex(self):
184
+ y = idst(np.arange(5)*1j)
185
+ x = 1j*idst(np.arange(5))
186
+ assert_array_almost_equal(x, y)
187
+
188
+
189
+ class _TestDCTBase:
190
+ def setup_method(self):
191
+ self.rdt = None
192
+ self.dec = 14
193
+ self.type = None
194
+
195
+ def test_definition(self):
196
+ for i in FFTWDATA_SIZES:
197
+ x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
198
+ y = dct(x, type=self.type)
199
+ assert_equal(y.dtype, dt)
200
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
201
+ # should really use something like assert_array_approx_equal. The
202
+ # difference is due to fftw using a better algorithm w.r.t error
203
+ # propagation compared to the ones from fftpack.
204
+ assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
205
+ err_msg="Size %d failed" % i)
206
+
207
+ def test_axis(self):
208
+ nt = 2
209
+ for i in [7, 8, 9, 16, 32, 64]:
210
+ x = np.random.randn(nt, i)
211
+ y = dct(x, type=self.type)
212
+ for j in range(nt):
213
+ assert_array_almost_equal(y[j], dct(x[j], type=self.type),
214
+ decimal=self.dec)
215
+
216
+ x = x.T
217
+ y = dct(x, axis=0, type=self.type)
218
+ for j in range(nt):
219
+ assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
220
+ decimal=self.dec)
221
+
222
+
223
+ class _TestDCTIBase(_TestDCTBase):
224
+ def test_definition_ortho(self):
225
+ # Test orthornomal mode.
226
+ dt = np.result_type(np.float32, self.rdt)
227
+ for xr in X:
228
+ x = np.array(xr, dtype=self.rdt)
229
+ y = dct(x, norm='ortho', type=1)
230
+ y2 = naive_dct1(x, norm='ortho')
231
+ assert_equal(y.dtype, dt)
232
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
233
+
234
+ class _TestDCTIIBase(_TestDCTBase):
235
+ def test_definition_matlab(self):
236
+ # Test correspondence with MATLAB (orthornomal mode).
237
+ dt = np.result_type(np.float32, self.rdt)
238
+ for xr, yr in zip(X, Y):
239
+ x = np.array(xr, dtype=dt)
240
+ y = dct(x, norm="ortho", type=2)
241
+ assert_equal(y.dtype, dt)
242
+ assert_array_almost_equal(y, yr, decimal=self.dec)
243
+
244
+
245
+ class _TestDCTIIIBase(_TestDCTBase):
246
+ def test_definition_ortho(self):
247
+ # Test orthornomal mode.
248
+ dt = np.result_type(np.float32, self.rdt)
249
+ for xr in X:
250
+ x = np.array(xr, dtype=self.rdt)
251
+ y = dct(x, norm='ortho', type=2)
252
+ xi = dct(y, norm="ortho", type=3)
253
+ assert_equal(xi.dtype, dt)
254
+ assert_array_almost_equal(xi, x, decimal=self.dec)
255
+
256
+ class _TestDCTIVBase(_TestDCTBase):
257
+ def test_definition_ortho(self):
258
+ # Test orthornomal mode.
259
+ dt = np.result_type(np.float32, self.rdt)
260
+ for xr in X:
261
+ x = np.array(xr, dtype=self.rdt)
262
+ y = dct(x, norm='ortho', type=4)
263
+ y2 = naive_dct4(x, norm='ortho')
264
+ assert_equal(y.dtype, dt)
265
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
266
+
267
+
268
+ class TestDCTIDouble(_TestDCTIBase):
269
+ def setup_method(self):
270
+ self.rdt = np.float64
271
+ self.dec = 10
272
+ self.type = 1
273
+
274
+
275
+ class TestDCTIFloat(_TestDCTIBase):
276
+ def setup_method(self):
277
+ self.rdt = np.float32
278
+ self.dec = 4
279
+ self.type = 1
280
+
281
+
282
+ class TestDCTIInt(_TestDCTIBase):
283
+ def setup_method(self):
284
+ self.rdt = int
285
+ self.dec = 5
286
+ self.type = 1
287
+
288
+
289
+ class TestDCTIIDouble(_TestDCTIIBase):
290
+ def setup_method(self):
291
+ self.rdt = np.float64
292
+ self.dec = 10
293
+ self.type = 2
294
+
295
+
296
+ class TestDCTIIFloat(_TestDCTIIBase):
297
+ def setup_method(self):
298
+ self.rdt = np.float32
299
+ self.dec = 5
300
+ self.type = 2
301
+
302
+
303
+ class TestDCTIIInt(_TestDCTIIBase):
304
+ def setup_method(self):
305
+ self.rdt = int
306
+ self.dec = 5
307
+ self.type = 2
308
+
309
+
310
+ class TestDCTIIIDouble(_TestDCTIIIBase):
311
+ def setup_method(self):
312
+ self.rdt = np.float64
313
+ self.dec = 14
314
+ self.type = 3
315
+
316
+
317
+ class TestDCTIIIFloat(_TestDCTIIIBase):
318
+ def setup_method(self):
319
+ self.rdt = np.float32
320
+ self.dec = 5
321
+ self.type = 3
322
+
323
+
324
+ class TestDCTIIIInt(_TestDCTIIIBase):
325
+ def setup_method(self):
326
+ self.rdt = int
327
+ self.dec = 5
328
+ self.type = 3
329
+
330
+
331
+ class TestDCTIVDouble(_TestDCTIVBase):
332
+ def setup_method(self):
333
+ self.rdt = np.float64
334
+ self.dec = 12
335
+ self.type = 3
336
+
337
+
338
+ class TestDCTIVFloat(_TestDCTIVBase):
339
+ def setup_method(self):
340
+ self.rdt = np.float32
341
+ self.dec = 5
342
+ self.type = 3
343
+
344
+
345
+ class TestDCTIVInt(_TestDCTIVBase):
346
+ def setup_method(self):
347
+ self.rdt = int
348
+ self.dec = 5
349
+ self.type = 3
350
+
351
+
352
+ class _TestIDCTBase:
353
+ def setup_method(self):
354
+ self.rdt = None
355
+ self.dec = 14
356
+ self.type = None
357
+
358
+ def test_definition(self):
359
+ for i in FFTWDATA_SIZES:
360
+ xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
361
+ x = idct(yr, type=self.type)
362
+ if self.type == 1:
363
+ x /= 2 * (i-1)
364
+ else:
365
+ x /= 2 * i
366
+ assert_equal(x.dtype, dt)
367
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
368
+ # should really use something like assert_array_approx_equal. The
369
+ # difference is due to fftw using a better algorithm w.r.t error
370
+ # propagation compared to the ones from fftpack.
371
+ assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
372
+ err_msg="Size %d failed" % i)
373
+
374
+
375
+ class TestIDCTIDouble(_TestIDCTBase):
376
+ def setup_method(self):
377
+ self.rdt = np.float64
378
+ self.dec = 10
379
+ self.type = 1
380
+
381
+
382
+ class TestIDCTIFloat(_TestIDCTBase):
383
+ def setup_method(self):
384
+ self.rdt = np.float32
385
+ self.dec = 4
386
+ self.type = 1
387
+
388
+
389
+ class TestIDCTIInt(_TestIDCTBase):
390
+ def setup_method(self):
391
+ self.rdt = int
392
+ self.dec = 4
393
+ self.type = 1
394
+
395
+
396
+ class TestIDCTIIDouble(_TestIDCTBase):
397
+ def setup_method(self):
398
+ self.rdt = np.float64
399
+ self.dec = 10
400
+ self.type = 2
401
+
402
+
403
+ class TestIDCTIIFloat(_TestIDCTBase):
404
+ def setup_method(self):
405
+ self.rdt = np.float32
406
+ self.dec = 5
407
+ self.type = 2
408
+
409
+
410
+ class TestIDCTIIInt(_TestIDCTBase):
411
+ def setup_method(self):
412
+ self.rdt = int
413
+ self.dec = 5
414
+ self.type = 2
415
+
416
+
417
+ class TestIDCTIIIDouble(_TestIDCTBase):
418
+ def setup_method(self):
419
+ self.rdt = np.float64
420
+ self.dec = 14
421
+ self.type = 3
422
+
423
+
424
+ class TestIDCTIIIFloat(_TestIDCTBase):
425
+ def setup_method(self):
426
+ self.rdt = np.float32
427
+ self.dec = 5
428
+ self.type = 3
429
+
430
+
431
+ class TestIDCTIIIInt(_TestIDCTBase):
432
+ def setup_method(self):
433
+ self.rdt = int
434
+ self.dec = 5
435
+ self.type = 3
436
+
437
+ class TestIDCTIVDouble(_TestIDCTBase):
438
+ def setup_method(self):
439
+ self.rdt = np.float64
440
+ self.dec = 12
441
+ self.type = 4
442
+
443
+
444
+ class TestIDCTIVFloat(_TestIDCTBase):
445
+ def setup_method(self):
446
+ self.rdt = np.float32
447
+ self.dec = 5
448
+ self.type = 4
449
+
450
+
451
+ class TestIDCTIVInt(_TestIDCTBase):
452
+ def setup_method(self):
453
+ self.rdt = int
454
+ self.dec = 5
455
+ self.type = 4
456
+
457
+ class _TestDSTBase:
458
+ def setup_method(self):
459
+ self.rdt = None # dtype
460
+ self.dec = None # number of decimals to match
461
+ self.type = None # dst type
462
+
463
+ def test_definition(self):
464
+ for i in FFTWDATA_SIZES:
465
+ xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
466
+ y = dst(xr, type=self.type)
467
+ assert_equal(y.dtype, dt)
468
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
469
+ # should really use something like assert_array_approx_equal. The
470
+ # difference is due to fftw using a better algorithm w.r.t error
471
+ # propagation compared to the ones from fftpack.
472
+ assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
473
+ err_msg="Size %d failed" % i)
474
+
475
+
476
+ class _TestDSTIBase(_TestDSTBase):
477
+ def test_definition_ortho(self):
478
+ # Test orthornomal mode.
479
+ dt = np.result_type(np.float32, self.rdt)
480
+ for xr in X:
481
+ x = np.array(xr, dtype=self.rdt)
482
+ y = dst(x, norm='ortho', type=1)
483
+ y2 = naive_dst1(x, norm='ortho')
484
+ assert_equal(y.dtype, dt)
485
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
486
+
487
+ class _TestDSTIVBase(_TestDSTBase):
488
+ def test_definition_ortho(self):
489
+ # Test orthornomal mode.
490
+ dt = np.result_type(np.float32, self.rdt)
491
+ for xr in X:
492
+ x = np.array(xr, dtype=self.rdt)
493
+ y = dst(x, norm='ortho', type=4)
494
+ y2 = naive_dst4(x, norm='ortho')
495
+ assert_equal(y.dtype, dt)
496
+ assert_array_almost_equal(y, y2, decimal=self.dec)
497
+
498
+ class TestDSTIDouble(_TestDSTIBase):
499
+ def setup_method(self):
500
+ self.rdt = np.float64
501
+ self.dec = 12
502
+ self.type = 1
503
+
504
+
505
+ class TestDSTIFloat(_TestDSTIBase):
506
+ def setup_method(self):
507
+ self.rdt = np.float32
508
+ self.dec = 4
509
+ self.type = 1
510
+
511
+
512
+ class TestDSTIInt(_TestDSTIBase):
513
+ def setup_method(self):
514
+ self.rdt = int
515
+ self.dec = 5
516
+ self.type = 1
517
+
518
+
519
+ class TestDSTIIDouble(_TestDSTBase):
520
+ def setup_method(self):
521
+ self.rdt = np.float64
522
+ self.dec = 14
523
+ self.type = 2
524
+
525
+
526
+ class TestDSTIIFloat(_TestDSTBase):
527
+ def setup_method(self):
528
+ self.rdt = np.float32
529
+ self.dec = 6
530
+ self.type = 2
531
+
532
+
533
+ class TestDSTIIInt(_TestDSTBase):
534
+ def setup_method(self):
535
+ self.rdt = int
536
+ self.dec = 6
537
+ self.type = 2
538
+
539
+
540
+ class TestDSTIIIDouble(_TestDSTBase):
541
+ def setup_method(self):
542
+ self.rdt = np.float64
543
+ self.dec = 14
544
+ self.type = 3
545
+
546
+
547
+ class TestDSTIIIFloat(_TestDSTBase):
548
+ def setup_method(self):
549
+ self.rdt = np.float32
550
+ self.dec = 7
551
+ self.type = 3
552
+
553
+
554
+ class TestDSTIIIInt(_TestDSTBase):
555
+ def setup_method(self):
556
+ self.rdt = int
557
+ self.dec = 7
558
+ self.type = 3
559
+
560
+
561
+ class TestDSTIVDouble(_TestDSTIVBase):
562
+ def setup_method(self):
563
+ self.rdt = np.float64
564
+ self.dec = 12
565
+ self.type = 4
566
+
567
+
568
+ class TestDSTIVFloat(_TestDSTIVBase):
569
+ def setup_method(self):
570
+ self.rdt = np.float32
571
+ self.dec = 4
572
+ self.type = 4
573
+
574
+
575
+ class TestDSTIVInt(_TestDSTIVBase):
576
+ def setup_method(self):
577
+ self.rdt = int
578
+ self.dec = 5
579
+ self.type = 4
580
+
581
+
582
+ class _TestIDSTBase:
583
+ def setup_method(self):
584
+ self.rdt = None
585
+ self.dec = None
586
+ self.type = None
587
+
588
+ def test_definition(self):
589
+ for i in FFTWDATA_SIZES:
590
+ xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
591
+ x = idst(yr, type=self.type)
592
+ if self.type == 1:
593
+ x /= 2 * (i+1)
594
+ else:
595
+ x /= 2 * i
596
+ assert_equal(x.dtype, dt)
597
+ # XXX: we divide by np.max(x) because the tests fail otherwise. We
598
+ # should really use something like assert_array_approx_equal. The
599
+ # difference is due to fftw using a better algorithm w.r.t error
600
+ # propagation compared to the ones from fftpack.
601
+ assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
602
+ err_msg="Size %d failed" % i)
603
+
604
+
605
+ class TestIDSTIDouble(_TestIDSTBase):
606
+ def setup_method(self):
607
+ self.rdt = np.float64
608
+ self.dec = 12
609
+ self.type = 1
610
+
611
+
612
+ class TestIDSTIFloat(_TestIDSTBase):
613
+ def setup_method(self):
614
+ self.rdt = np.float32
615
+ self.dec = 4
616
+ self.type = 1
617
+
618
+
619
+ class TestIDSTIInt(_TestIDSTBase):
620
+ def setup_method(self):
621
+ self.rdt = int
622
+ self.dec = 4
623
+ self.type = 1
624
+
625
+
626
+ class TestIDSTIIDouble(_TestIDSTBase):
627
+ def setup_method(self):
628
+ self.rdt = np.float64
629
+ self.dec = 14
630
+ self.type = 2
631
+
632
+
633
+ class TestIDSTIIFloat(_TestIDSTBase):
634
+ def setup_method(self):
635
+ self.rdt = np.float32
636
+ self.dec = 6
637
+ self.type = 2
638
+
639
+
640
+ class TestIDSTIIInt(_TestIDSTBase):
641
+ def setup_method(self):
642
+ self.rdt = int
643
+ self.dec = 6
644
+ self.type = 2
645
+
646
+
647
+ class TestIDSTIIIDouble(_TestIDSTBase):
648
+ def setup_method(self):
649
+ self.rdt = np.float64
650
+ self.dec = 14
651
+ self.type = 3
652
+
653
+
654
+ class TestIDSTIIIFloat(_TestIDSTBase):
655
+ def setup_method(self):
656
+ self.rdt = np.float32
657
+ self.dec = 6
658
+ self.type = 3
659
+
660
+
661
+ class TestIDSTIIIInt(_TestIDSTBase):
662
+ def setup_method(self):
663
+ self.rdt = int
664
+ self.dec = 6
665
+ self.type = 3
666
+
667
+
668
+ class TestIDSTIVDouble(_TestIDSTBase):
669
+ def setup_method(self):
670
+ self.rdt = np.float64
671
+ self.dec = 12
672
+ self.type = 4
673
+
674
+
675
+ class TestIDSTIVFloat(_TestIDSTBase):
676
+ def setup_method(self):
677
+ self.rdt = np.float32
678
+ self.dec = 6
679
+ self.type = 4
680
+
681
+
682
+ class TestIDSTIVnt(_TestIDSTBase):
683
+ def setup_method(self):
684
+ self.rdt = int
685
+ self.dec = 6
686
+ self.type = 4
687
+
688
+
689
+ class TestOverwrite:
690
+ """Check input overwrite behavior."""
691
+
692
+ real_dtypes = [np.float32, np.float64]
693
+
694
+ def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw):
695
+ x2 = x.copy()
696
+ routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
697
+
698
+ sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format(
699
+ routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
700
+ if not overwrite_x:
701
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
702
+
703
+ def _check_1d(self, routine, dtype, shape, axis):
704
+ np.random.seed(1234)
705
+ if np.issubdtype(dtype, np.complexfloating):
706
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
707
+ else:
708
+ data = np.random.randn(*shape)
709
+ data = data.astype(dtype)
710
+
711
+ for type in [1, 2, 3, 4]:
712
+ for overwrite_x in [True, False]:
713
+ for norm in [None, 'ortho']:
714
+ self._check(data, routine, type, None, axis, norm,
715
+ overwrite_x)
716
+
717
+ def test_dct(self):
718
+ for dtype in self.real_dtypes:
719
+ self._check_1d(dct, dtype, (16,), -1)
720
+ self._check_1d(dct, dtype, (16, 2), 0)
721
+ self._check_1d(dct, dtype, (2, 16), 1)
722
+
723
+ def test_idct(self):
724
+ for dtype in self.real_dtypes:
725
+ self._check_1d(idct, dtype, (16,), -1)
726
+ self._check_1d(idct, dtype, (16, 2), 0)
727
+ self._check_1d(idct, dtype, (2, 16), 1)
728
+
729
+ def test_dst(self):
730
+ for dtype in self.real_dtypes:
731
+ self._check_1d(dst, dtype, (16,), -1)
732
+ self._check_1d(dst, dtype, (16, 2), 0)
733
+ self._check_1d(dst, dtype, (2, 16), 1)
734
+
735
+ def test_idst(self):
736
+ for dtype in self.real_dtypes:
737
+ self._check_1d(idst, dtype, (16,), -1)
738
+ self._check_1d(idst, dtype, (16, 2), 0)
739
+ self._check_1d(idst, dtype, (2, 16), 1)
740
+
741
+
742
+ class Test_DCTN_IDCTN:
743
+ dec = 14
744
+ dct_type = [1, 2, 3, 4]
745
+ norms = [None, 'ortho']
746
+ rstate = np.random.RandomState(1234)
747
+ shape = (32, 16)
748
+ data = rstate.randn(*shape)
749
+
750
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
751
+ (dstn, idstn)])
752
+ @pytest.mark.parametrize('axes', [None,
753
+ 1, (1,), [1],
754
+ 0, (0,), [0],
755
+ (0, 1), [0, 1],
756
+ (-2, -1), [-2, -1]])
757
+ @pytest.mark.parametrize('dct_type', dct_type)
758
+ @pytest.mark.parametrize('norm', ['ortho'])
759
+ def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
760
+ tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
761
+ tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
762
+ assert_array_almost_equal(self.data, tmp, decimal=12)
763
+
764
+ @pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref),
765
+ (dstn, dst_2d_ref)])
766
+ @pytest.mark.parametrize('dct_type', dct_type)
767
+ @pytest.mark.parametrize('norm', norms)
768
+ def test_dctn_vs_2d_reference(self, fforward, fforward_ref,
769
+ dct_type, norm):
770
+ y1 = fforward(self.data, type=dct_type, axes=None, norm=norm)
771
+ y2 = fforward_ref(self.data, type=dct_type, norm=norm)
772
+ assert_array_almost_equal(y1, y2, decimal=11)
773
+
774
+ @pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref),
775
+ (idstn, idst_2d_ref)])
776
+ @pytest.mark.parametrize('dct_type', dct_type)
777
+ @pytest.mark.parametrize('norm', [None, 'ortho'])
778
+ def test_idctn_vs_2d_reference(self, finverse, finverse_ref,
779
+ dct_type, norm):
780
+ fdata = dctn(self.data, type=dct_type, norm=norm)
781
+ y1 = finverse(fdata, type=dct_type, norm=norm)
782
+ y2 = finverse_ref(fdata, type=dct_type, norm=norm)
783
+ assert_array_almost_equal(y1, y2, decimal=11)
784
+
785
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
786
+ (dstn, idstn)])
787
+ def test_axes_and_shape(self, fforward, finverse):
788
+ with assert_raises(ValueError,
789
+ match="when given, axes and shape arguments"
790
+ " have to be of the same length"):
791
+ fforward(self.data, shape=self.data.shape[0], axes=(0, 1))
792
+
793
+ with assert_raises(ValueError,
794
+ match="when given, axes and shape arguments"
795
+ " have to be of the same length"):
796
+ fforward(self.data, shape=self.data.shape[0], axes=None)
797
+
798
+ with assert_raises(ValueError,
799
+ match="when given, axes and shape arguments"
800
+ " have to be of the same length"):
801
+ fforward(self.data, shape=self.data.shape, axes=0)
802
+
803
+ @pytest.mark.parametrize('fforward', [dctn, dstn])
804
+ def test_shape(self, fforward):
805
+ tmp = fforward(self.data, shape=(128, 128), axes=None)
806
+ assert_equal(tmp.shape, (128, 128))
807
+
808
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
809
+ (dstn, idstn)])
810
+ @pytest.mark.parametrize('axes', [1, (1,), [1],
811
+ 0, (0,), [0]])
812
+ def test_shape_is_none_with_axes(self, fforward, finverse, axes):
813
+ tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
814
+ tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
815
+ assert_array_almost_equal(self.data, tmp, decimal=self.dec)
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor _adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_batch_norm_impl_index_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
27
+ return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
28
+ }
29
+
30
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_choose_qparams_per_tensor_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _choose_qparams_per_tensor {
18
+ using schema = ::std::tuple<double,int64_t> (const at::Tensor &, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_choose_qparams_per_tensor")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)")
24
+ static ::std::tuple<double,int64_t> call(const at::Tensor & self, bool reduce_range);
25
+ static ::std::tuple<double,int64_t> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool reduce_range);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_clear_plan_cache_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _cufft_clear_plan_cache {
18
+ using schema = void (at::DeviceIndex);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cufft_clear_plan_cache")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cufft_clear_plan_cache(DeviceIndex device_index) -> ()")
24
+ static void call(at::DeviceIndex device_index);
25
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_dimI_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _dimI {
18
+ using schema = int64_t (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_dimI")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_dimI(Tensor self) -> int")
24
+ static int64_t call(const at::Tensor & self);
25
+ static int64_t redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_backward.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_embedding_bag_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
26
+ inline at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
27
+ return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
32
+ return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
33
+ }
34
+ }
35
+
36
+ // aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
37
+ inline at::Tensor _embedding_bag_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
38
+ return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
43
+ return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
44
+ }
45
+ }
46
+
47
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _foreach_cos {
18
+ using schema = ::std::vector<at::Tensor> (at::TensorList);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cos")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cos(Tensor[] self) -> Tensor[]")
24
+ static ::std::vector<at::Tensor> call(at::TensorList self);
25
+ static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
26
+ };
27
+
28
+ struct TORCH_API _foreach_cos_ {
29
+ using schema = void (at::TensorList);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cos_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cos_(Tensor(a!)[] self) -> ()")
35
+ static void call(at::TensorList self);
36
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
37
+ };
38
+
39
+ struct TORCH_API _foreach_cos_out {
40
+ using schema = void (at::TensorList, at::TensorList);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cos")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
46
+ static void call(at::TensorList self, at::TensorList out);
47
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
48
+ };
49
+
50
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _pack_padded_sequence {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pack_padded_sequence")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)")
24
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first);
25
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first);
26
+ };
27
+
28
+ struct TORCH_API _pack_padded_sequence_out {
29
+ using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pack_padded_sequence")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
35
+ static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1);
36
+ static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_remove_batch_dim_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
26
+ inline at::Tensor _remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
27
+ return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim);
28
+ }
29
+
30
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_from_tensor_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _reshape_from_tensor {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_reshape_from_tensor")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & shape);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & shape);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false, const c10::optional<at::Tensor> & dropout_mask={}, c10::optional<double> scale=c10::nullopt);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_addmm.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_sparse_addmm_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
26
+ inline at::Tensor _sparse_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
27
+ return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha);
28
+ }
29
+
30
+ // aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _sparse_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
32
+ return at::_ops::_sparse_addmm_out::call(self, mat1, mat2, beta, alpha, out);
33
+ }
34
+ // aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _sparse_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
36
+ return at::_ops::_sparse_addmm_out::call(self, mat1, mat2, beta, alpha, out);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_standard_gamma_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
26
+ inline at::Tensor _standard_gamma(const at::Tensor & self, c10::optional<at::Generator> generator=c10::nullopt) {
27
+ return at::_ops::_standard_gamma::call(self, generator);
28
+ }
29
+
30
+ // aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _standard_gamma_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Generator> generator=c10::nullopt) {
32
+ return at::_ops::_standard_gamma_out::call(self, generator, out);
33
+ }
34
+ // aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _standard_gamma_outf(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) {
36
+ return at::_ops::_standard_gamma_out::call(self, generator, out);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _standard_gamma_out(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out);
20
+ TORCH_API at::Tensor _s_gamma_cpu(const at::Tensor & self, c10::optional<at::Generator> generator=c10::nullopt);
21
+ TORCH_API at::Tensor _s_gamma_cuda(const at::Tensor & self, c10::optional<at::Generator> generator=c10::nullopt);
22
+ } // namespace native
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_to_copy_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
26
+ inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
27
+ return at::_ops::_to_copy::call(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
28
+ }
29
+ // aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
30
+ inline at::Tensor _to_copy(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) {
31
+ return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
32
+ }
33
+
34
+ // aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _to_copy_out(at::Tensor & out, const at::Tensor & self, bool non_blocking=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
36
+ return at::_ops::_to_copy_out::call(self, non_blocking, memory_format, out);
37
+ }
38
+ // aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
39
+ inline at::Tensor & _to_copy_outf(const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
40
+ return at::_ops::_to_copy_out::call(self, non_blocking, memory_format, out);
41
+ }
42
+
43
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
21
+ TORCH_API at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/all_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor all(const at::Tensor & self, at::Dimname dim, bool keepdim=false);
21
+ TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false);
22
+ TORCH_API at::Tensor & all_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out);
23
+
24
+ } // namespace compositeimplicitautograd
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API binary_cross_entropy_with_logits {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, const c10::optional<at::Tensor> &, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_with_logits")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction);
26
+ };
27
+
28
+ struct TORCH_API binary_cross_entropy_with_logits_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, const c10::optional<at::Tensor> &, int64_t, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_with_logits")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1);
21
+ TORCH_API at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1);
22
+ TORCH_API at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1);
23
+ TORCH_API at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1);
24
+
25
+ } // namespace compositeimplicitautograd
26
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/crow_indices_copy_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::crow_indices_copy(Tensor self) -> Tensor
26
+ inline at::Tensor crow_indices_copy(const at::Tensor & self) {
27
+ return at::_ops::crow_indices_copy::call(self);
28
+ }
29
+
30
+ // aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & crow_indices_copy_out(at::Tensor & out, const at::Tensor & self) {
32
+ return at::_ops::crow_indices_copy_out::call(self, out);
33
+ }
34
+ // aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & crow_indices_copy_outf(const at::Tensor & self, at::Tensor & out) {
36
+ return at::_ops::crow_indices_copy_out::call(self, out);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_out(at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W);
21
+ TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_outf(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & cudnn_grid_sampler_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & grid);
21
+ TORCH_API at::Tensor & cudnn_grid_sampler_outf(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dropout_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor dropout(const at::Tensor & input, double p, bool train);
20
+ TORCH_API at::Tensor & dropout_(at::Tensor & self, double p, bool train);
21
+ } // namespace native
22
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor erfc(const at::Tensor & self);
21
+ TORCH_API at::Tensor & erfc_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & erfc_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & erfc_(at::Tensor & self);
24
+
25
+ } // namespace meta
26
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API fft_rfft {
18
+ using schema = at::Tensor (const at::Tensor &, c10::optional<c10::SymInt>, int64_t, c10::optional<c10::string_view>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfft")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm);
26
+ };
27
+
28
+ struct TORCH_API fft_rfft_out {
29
+ using schema = at::Tensor & (const at::Tensor &, c10::optional<c10::SymInt>, int64_t, c10::optional<c10::string_view>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfft")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_linalg_lu_solve : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API log_sigmoid_backward_grad_input {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input);
26
+ };
27
+
28
+ struct TORCH_API log_sigmoid_backward {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_backward")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_tanh_cell_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API rnn_tanh_cell {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, const c10::optional<at::Tensor> &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_tanh_cell")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor sinc(const at::Tensor & self);
21
+ TORCH_API at::Tensor & sinc_(at::Tensor & self);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API softplus_out {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softplus")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out);
26
+ };
27
+
28
+ struct TORCH_API softplus {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softplus")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold);
37
+ };
38
+
39
+ }} // namespace at::_ops