ZTWHHH commited on
Commit
28e7a1b
·
verified ·
1 Parent(s): d47dacf

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc +0 -0
  2. parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py +204 -0
  4. parrot/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py +496 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h +25 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesce_ops.h +39 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape_ops.h +39 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_ops.h +39 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h +39 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h +24 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h +50 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_ops.h +39 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_backward.h +39 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h +39 -0
  15. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm.h +39 -0
  16. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  17. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/item.h +26 -0
  18. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h +23 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_compositeimplicitautograd_dispatch.h +25 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/ormqr.h +39 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_native.h +21 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h +24 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rshift_ops.h +83 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d.h +91 -0
  26. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y0_ops.h +39 -0
  27. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_u.h +67 -0
  28. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cuda_dispatch.h +25 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/to_compositeimplicitautograd_dispatch.h +27 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_meta_dispatch.h +28 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/vander_compositeimplicitautograd_dispatch.h +23 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as.h +26 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_cuda_dispatch.h +24 -0
  34. vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/__init__.cpython-310.pyc +0 -0
  35. vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/_fft.cpython-310.pyc +0 -0
  36. vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/_fftlog.cpython-310.pyc +0 -0
  37. vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/_helper.cpython-310.pyc +0 -0
  38. vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/_realtransforms.cpython-310.pyc +0 -0
  39. vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_fft.py +683 -0
  40. vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_helper.py +51 -0
  41. vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_realtransforms.py +922 -0
  42. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__init__.py +21 -0
  43. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
  44. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_array_utils.cpython-310.pyc +0 -0
  45. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_solve_triangular.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_special_matrices.cpython-310.pyc +0 -0
  49. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_uarray.cpython-310.pyc +0 -0
  50. vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/_array_utils.py +55 -0
parrot/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc ADDED
Binary file (779 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc ADDED
Binary file (6.98 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.testing import assert_equal, assert_
2
+ from pytest import raises as assert_raises
3
+
4
+ import time
5
+ import pytest
6
+ import ctypes
7
+ import threading
8
+ from scipy._lib import _ccallback_c as _test_ccallback_cython
9
+ from scipy._lib import _test_ccallback
10
+ from scipy._lib._ccallback import LowLevelCallable
11
+
12
+ try:
13
+ import cffi
14
+ HAVE_CFFI = True
15
+ except ImportError:
16
+ HAVE_CFFI = False
17
+
18
+
19
+ ERROR_VALUE = 2.0
20
+
21
+
22
+ def callback_python(a, user_data=None):
23
+ if a == ERROR_VALUE:
24
+ raise ValueError("bad value")
25
+
26
+ if user_data is None:
27
+ return a + 1
28
+ else:
29
+ return a + user_data
30
+
31
+ def _get_cffi_func(base, signature):
32
+ if not HAVE_CFFI:
33
+ pytest.skip("cffi not installed")
34
+
35
+ # Get function address
36
+ voidp = ctypes.cast(base, ctypes.c_void_p)
37
+ address = voidp.value
38
+
39
+ # Create corresponding cffi handle
40
+ ffi = cffi.FFI()
41
+ func = ffi.cast(signature, address)
42
+ return func
43
+
44
+
45
+ def _get_ctypes_data():
46
+ value = ctypes.c_double(2.0)
47
+ return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp)
48
+
49
+
50
+ def _get_cffi_data():
51
+ if not HAVE_CFFI:
52
+ pytest.skip("cffi not installed")
53
+ ffi = cffi.FFI()
54
+ return ffi.new('double *', 2.0)
55
+
56
+
57
+ CALLERS = {
58
+ 'simple': _test_ccallback.test_call_simple,
59
+ 'nodata': _test_ccallback.test_call_nodata,
60
+ 'nonlocal': _test_ccallback.test_call_nonlocal,
61
+ 'cython': _test_ccallback_cython.test_call_cython,
62
+ }
63
+
64
+ # These functions have signatures known to the callers
65
+ FUNCS = {
66
+ 'python': lambda: callback_python,
67
+ 'capsule': lambda: _test_ccallback.test_get_plus1_capsule(),
68
+ 'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
69
+ "plus1_cython"),
70
+ 'ctypes': lambda: _test_ccallback_cython.plus1_ctypes,
71
+ 'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes,
72
+ 'double (*)(double, int *, void *)'),
73
+ 'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(),
74
+ 'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
75
+ "plus1b_cython"),
76
+ 'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes,
77
+ 'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes,
78
+ 'double (*)(double, double, int *, void *)'),
79
+ }
80
+
81
+ # These functions have signatures the callers don't know
82
+ BAD_FUNCS = {
83
+ 'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(),
84
+ 'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
85
+ "plus1bc_cython"),
86
+ 'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes,
87
+ 'cffi_bc': lambda: _get_cffi_func(
88
+ _test_ccallback_cython.plus1bc_ctypes,
89
+ 'double (*)(double, double, double, int *, void *)'
90
+ ),
91
+ }
92
+
93
+ USER_DATAS = {
94
+ 'ctypes': _get_ctypes_data,
95
+ 'cffi': _get_cffi_data,
96
+ 'capsule': _test_ccallback.test_get_data_capsule,
97
+ }
98
+
99
+
100
+ def test_callbacks():
101
+ def check(caller, func, user_data):
102
+ caller = CALLERS[caller]
103
+ func = FUNCS[func]()
104
+ user_data = USER_DATAS[user_data]()
105
+
106
+ if func is callback_python:
107
+ def func2(x):
108
+ return func(x, 2.0)
109
+ else:
110
+ func2 = LowLevelCallable(func, user_data)
111
+ func = LowLevelCallable(func)
112
+
113
+ # Test basic call
114
+ assert_equal(caller(func, 1.0), 2.0)
115
+
116
+ # Test 'bad' value resulting to an error
117
+ assert_raises(ValueError, caller, func, ERROR_VALUE)
118
+
119
+ # Test passing in user_data
120
+ assert_equal(caller(func2, 1.0), 3.0)
121
+
122
+ for caller in sorted(CALLERS.keys()):
123
+ for func in sorted(FUNCS.keys()):
124
+ for user_data in sorted(USER_DATAS.keys()):
125
+ check(caller, func, user_data)
126
+
127
+
128
+ def test_bad_callbacks():
129
+ def check(caller, func, user_data):
130
+ caller = CALLERS[caller]
131
+ user_data = USER_DATAS[user_data]()
132
+ func = BAD_FUNCS[func]()
133
+
134
+ if func is callback_python:
135
+ def func2(x):
136
+ return func(x, 2.0)
137
+ else:
138
+ func2 = LowLevelCallable(func, user_data)
139
+ func = LowLevelCallable(func)
140
+
141
+ # Test that basic call fails
142
+ assert_raises(ValueError, caller, LowLevelCallable(func), 1.0)
143
+
144
+ # Test that passing in user_data also fails
145
+ assert_raises(ValueError, caller, func2, 1.0)
146
+
147
+ # Test error message
148
+ llfunc = LowLevelCallable(func)
149
+ try:
150
+ caller(llfunc, 1.0)
151
+ except ValueError as err:
152
+ msg = str(err)
153
+ assert_(llfunc.signature in msg, msg)
154
+ assert_('double (double, double, int *, void *)' in msg, msg)
155
+
156
+ for caller in sorted(CALLERS.keys()):
157
+ for func in sorted(BAD_FUNCS.keys()):
158
+ for user_data in sorted(USER_DATAS.keys()):
159
+ check(caller, func, user_data)
160
+
161
+
162
+ def test_signature_override():
163
+ caller = _test_ccallback.test_call_simple
164
+ func = _test_ccallback.test_get_plus1_capsule()
165
+
166
+ llcallable = LowLevelCallable(func, signature="bad signature")
167
+ assert_equal(llcallable.signature, "bad signature")
168
+ assert_raises(ValueError, caller, llcallable, 3)
169
+
170
+ llcallable = LowLevelCallable(func, signature="double (double, int *, void *)")
171
+ assert_equal(llcallable.signature, "double (double, int *, void *)")
172
+ assert_equal(caller(llcallable, 3), 4)
173
+
174
+
175
+ def test_threadsafety():
176
+ def callback(a, caller):
177
+ if a <= 0:
178
+ return 1
179
+ else:
180
+ res = caller(lambda x: callback(x, caller), a - 1)
181
+ return 2*res
182
+
183
+ def check(caller):
184
+ caller = CALLERS[caller]
185
+
186
+ results = []
187
+
188
+ count = 10
189
+
190
+ def run():
191
+ time.sleep(0.01)
192
+ r = caller(lambda x: callback(x, caller), count)
193
+ results.append(r)
194
+
195
+ threads = [threading.Thread(target=run) for j in range(20)]
196
+ for thread in threads:
197
+ thread.start()
198
+ for thread in threads:
199
+ thread.join()
200
+
201
+ assert_equal(results, [2.0**count]*len(threads))
202
+
203
+ for caller in CALLERS.keys():
204
+ check(caller)
parrot/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This test script is adopted from:
3
+ https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
4
+ """
5
+
6
+ import pkgutil
7
+ import types
8
+ import importlib
9
+ import warnings
10
+ from importlib import import_module
11
+
12
+ import pytest
13
+
14
+ import scipy
15
+
16
+ from scipy.conftest import xp_available_backends
17
+
18
+
19
+ def test_dir_testing():
20
+ """Assert that output of dir has only one "testing/tester"
21
+ attribute without duplicate"""
22
+ assert len(dir(scipy)) == len(set(dir(scipy)))
23
+
24
+
25
+ # Historically SciPy has not used leading underscores for private submodules
26
+ # much. This has resulted in lots of things that look like public modules
27
+ # (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
28
+ # but were never intended to be public. The PUBLIC_MODULES list contains
29
+ # modules that are either public because they were meant to be, or because they
30
+ # contain public functions/objects that aren't present in any other namespace
31
+ # for whatever reason and therefore should be treated as public.
32
+ PUBLIC_MODULES = ["scipy." + s for s in [
33
+ "cluster",
34
+ "cluster.vq",
35
+ "cluster.hierarchy",
36
+ "constants",
37
+ "datasets",
38
+ "fft",
39
+ "fftpack",
40
+ "integrate",
41
+ "interpolate",
42
+ "io",
43
+ "io.arff",
44
+ "io.matlab",
45
+ "io.wavfile",
46
+ "linalg",
47
+ "linalg.blas",
48
+ "linalg.cython_blas",
49
+ "linalg.lapack",
50
+ "linalg.cython_lapack",
51
+ "linalg.interpolative",
52
+ "misc",
53
+ "ndimage",
54
+ "odr",
55
+ "optimize",
56
+ "signal",
57
+ "signal.windows",
58
+ "sparse",
59
+ "sparse.linalg",
60
+ "sparse.csgraph",
61
+ "spatial",
62
+ "spatial.distance",
63
+ "spatial.transform",
64
+ "special",
65
+ "stats",
66
+ "stats.contingency",
67
+ "stats.distributions",
68
+ "stats.mstats",
69
+ "stats.qmc",
70
+ "stats.sampling"
71
+ ]]
72
+
73
+ # The PRIVATE_BUT_PRESENT_MODULES list contains modules that lacked underscores
74
+ # in their name and hence looked public, but weren't meant to be. All these
75
+ # namespace were deprecated in the 1.8.0 release - see "clear split between
76
+ # public and private API" in the 1.8.0 release notes.
77
+ # These private modules support will be removed in SciPy v2.0.0, as the
78
+ # deprecation messages emitted by each of these modules say.
79
+ PRIVATE_BUT_PRESENT_MODULES = [
80
+ 'scipy.constants.codata',
81
+ 'scipy.constants.constants',
82
+ 'scipy.fftpack.basic',
83
+ 'scipy.fftpack.convolve',
84
+ 'scipy.fftpack.helper',
85
+ 'scipy.fftpack.pseudo_diffs',
86
+ 'scipy.fftpack.realtransforms',
87
+ 'scipy.integrate.dop',
88
+ 'scipy.integrate.lsoda',
89
+ 'scipy.integrate.odepack',
90
+ 'scipy.integrate.quadpack',
91
+ 'scipy.integrate.vode',
92
+ 'scipy.interpolate.dfitpack',
93
+ 'scipy.interpolate.fitpack',
94
+ 'scipy.interpolate.fitpack2',
95
+ 'scipy.interpolate.interpnd',
96
+ 'scipy.interpolate.interpolate',
97
+ 'scipy.interpolate.ndgriddata',
98
+ 'scipy.interpolate.polyint',
99
+ 'scipy.interpolate.rbf',
100
+ 'scipy.io.arff.arffread',
101
+ 'scipy.io.harwell_boeing',
102
+ 'scipy.io.idl',
103
+ 'scipy.io.matlab.byteordercodes',
104
+ 'scipy.io.matlab.mio',
105
+ 'scipy.io.matlab.mio4',
106
+ 'scipy.io.matlab.mio5',
107
+ 'scipy.io.matlab.mio5_params',
108
+ 'scipy.io.matlab.mio5_utils',
109
+ 'scipy.io.matlab.mio_utils',
110
+ 'scipy.io.matlab.miobase',
111
+ 'scipy.io.matlab.streams',
112
+ 'scipy.io.mmio',
113
+ 'scipy.io.netcdf',
114
+ 'scipy.linalg.basic',
115
+ 'scipy.linalg.decomp',
116
+ 'scipy.linalg.decomp_cholesky',
117
+ 'scipy.linalg.decomp_lu',
118
+ 'scipy.linalg.decomp_qr',
119
+ 'scipy.linalg.decomp_schur',
120
+ 'scipy.linalg.decomp_svd',
121
+ 'scipy.linalg.matfuncs',
122
+ 'scipy.linalg.misc',
123
+ 'scipy.linalg.special_matrices',
124
+ 'scipy.misc.common',
125
+ 'scipy.misc.doccer',
126
+ 'scipy.ndimage.filters',
127
+ 'scipy.ndimage.fourier',
128
+ 'scipy.ndimage.interpolation',
129
+ 'scipy.ndimage.measurements',
130
+ 'scipy.ndimage.morphology',
131
+ 'scipy.odr.models',
132
+ 'scipy.odr.odrpack',
133
+ 'scipy.optimize.cobyla',
134
+ 'scipy.optimize.cython_optimize',
135
+ 'scipy.optimize.lbfgsb',
136
+ 'scipy.optimize.linesearch',
137
+ 'scipy.optimize.minpack',
138
+ 'scipy.optimize.minpack2',
139
+ 'scipy.optimize.moduleTNC',
140
+ 'scipy.optimize.nonlin',
141
+ 'scipy.optimize.optimize',
142
+ 'scipy.optimize.slsqp',
143
+ 'scipy.optimize.tnc',
144
+ 'scipy.optimize.zeros',
145
+ 'scipy.signal.bsplines',
146
+ 'scipy.signal.filter_design',
147
+ 'scipy.signal.fir_filter_design',
148
+ 'scipy.signal.lti_conversion',
149
+ 'scipy.signal.ltisys',
150
+ 'scipy.signal.signaltools',
151
+ 'scipy.signal.spectral',
152
+ 'scipy.signal.spline',
153
+ 'scipy.signal.waveforms',
154
+ 'scipy.signal.wavelets',
155
+ 'scipy.signal.windows.windows',
156
+ 'scipy.sparse.base',
157
+ 'scipy.sparse.bsr',
158
+ 'scipy.sparse.compressed',
159
+ 'scipy.sparse.construct',
160
+ 'scipy.sparse.coo',
161
+ 'scipy.sparse.csc',
162
+ 'scipy.sparse.csr',
163
+ 'scipy.sparse.data',
164
+ 'scipy.sparse.dia',
165
+ 'scipy.sparse.dok',
166
+ 'scipy.sparse.extract',
167
+ 'scipy.sparse.lil',
168
+ 'scipy.sparse.linalg.dsolve',
169
+ 'scipy.sparse.linalg.eigen',
170
+ 'scipy.sparse.linalg.interface',
171
+ 'scipy.sparse.linalg.isolve',
172
+ 'scipy.sparse.linalg.matfuncs',
173
+ 'scipy.sparse.sparsetools',
174
+ 'scipy.sparse.spfuncs',
175
+ 'scipy.sparse.sputils',
176
+ 'scipy.spatial.ckdtree',
177
+ 'scipy.spatial.kdtree',
178
+ 'scipy.spatial.qhull',
179
+ 'scipy.spatial.transform.rotation',
180
+ 'scipy.special.add_newdocs',
181
+ 'scipy.special.basic',
182
+ 'scipy.special.cython_special',
183
+ 'scipy.special.orthogonal',
184
+ 'scipy.special.sf_error',
185
+ 'scipy.special.specfun',
186
+ 'scipy.special.spfun_stats',
187
+ 'scipy.stats.biasedurn',
188
+ 'scipy.stats.kde',
189
+ 'scipy.stats.morestats',
190
+ 'scipy.stats.mstats_basic',
191
+ 'scipy.stats.mstats_extras',
192
+ 'scipy.stats.mvn',
193
+ 'scipy.stats.stats',
194
+ ]
195
+
196
+
197
+ def is_unexpected(name):
198
+ """Check if this needs to be considered."""
199
+ if '._' in name or '.tests' in name or '.setup' in name:
200
+ return False
201
+
202
+ if name in PUBLIC_MODULES:
203
+ return False
204
+
205
+ if name in PRIVATE_BUT_PRESENT_MODULES:
206
+ return False
207
+
208
+ return True
209
+
210
+
211
+ SKIP_LIST = [
212
+ 'scipy.conftest',
213
+ 'scipy.version',
214
+ 'scipy.special.libsf_error_state'
215
+ ]
216
+
217
+
218
+ # XXX: this test does more than it says on the tin - in using `pkgutil.walk_packages`,
219
+ # it will raise if it encounters any exceptions which are not handled by `ignore_errors`
220
+ # while attempting to import each discovered package.
221
+ # For now, `ignore_errors` only ignores what is necessary, but this could be expanded -
222
+ # for example, to all errors from private modules or git subpackages - if desired.
223
+ def test_all_modules_are_expected():
224
+ """
225
+ Test that we don't add anything that looks like a new public module by
226
+ accident. Check is based on filenames.
227
+ """
228
+
229
+ def ignore_errors(name):
230
+ # if versions of other array libraries are installed which are incompatible
231
+ # with the installed NumPy version, there can be errors on importing
232
+ # `array_api_compat`. This should only raise if SciPy is configured with
233
+ # that library as an available backend.
234
+ backends = {'cupy': 'cupy',
235
+ 'pytorch': 'torch',
236
+ 'dask.array': 'dask.array'}
237
+ for backend, dir_name in backends.items():
238
+ path = f'array_api_compat.{dir_name}'
239
+ if path in name and backend not in xp_available_backends:
240
+ return
241
+ raise
242
+
243
+ modnames = []
244
+
245
+ for _, modname, _ in pkgutil.walk_packages(path=scipy.__path__,
246
+ prefix=scipy.__name__ + '.',
247
+ onerror=ignore_errors):
248
+ if is_unexpected(modname) and modname not in SKIP_LIST:
249
+ # We have a name that is new. If that's on purpose, add it to
250
+ # PUBLIC_MODULES. We don't expect to have to add anything to
251
+ # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
252
+ modnames.append(modname)
253
+
254
+ if modnames:
255
+ raise AssertionError(f'Found unexpected modules: {modnames}')
256
+
257
+
258
+ # Stuff that clearly shouldn't be in the API and is detected by the next test
259
+ # below
260
+ SKIP_LIST_2 = [
261
+ 'scipy.char',
262
+ 'scipy.rec',
263
+ 'scipy.emath',
264
+ 'scipy.math',
265
+ 'scipy.random',
266
+ 'scipy.ctypeslib',
267
+ 'scipy.ma'
268
+ ]
269
+
270
+
271
+ def test_all_modules_are_expected_2():
272
+ """
273
+ Method checking all objects. The pkgutil-based method in
274
+ `test_all_modules_are_expected` does not catch imports into a namespace,
275
+ only filenames.
276
+ """
277
+
278
+ def find_unexpected_members(mod_name):
279
+ members = []
280
+ module = importlib.import_module(mod_name)
281
+ if hasattr(module, '__all__'):
282
+ objnames = module.__all__
283
+ else:
284
+ objnames = dir(module)
285
+
286
+ for objname in objnames:
287
+ if not objname.startswith('_'):
288
+ fullobjname = mod_name + '.' + objname
289
+ if isinstance(getattr(module, objname), types.ModuleType):
290
+ if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
291
+ members.append(fullobjname)
292
+
293
+ return members
294
+
295
+ unexpected_members = find_unexpected_members("scipy")
296
+ for modname in PUBLIC_MODULES:
297
+ unexpected_members.extend(find_unexpected_members(modname))
298
+
299
+ if unexpected_members:
300
+ raise AssertionError("Found unexpected object(s) that look like "
301
+ f"modules: {unexpected_members}")
302
+
303
+
304
+ def test_api_importable():
305
+ """
306
+ Check that all submodules listed higher up in this file can be imported
307
+ Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
308
+ simply need to be removed from the list (deprecation may or may not be
309
+ needed - apply common sense).
310
+ """
311
+ def check_importable(module_name):
312
+ try:
313
+ importlib.import_module(module_name)
314
+ except (ImportError, AttributeError):
315
+ return False
316
+
317
+ return True
318
+
319
+ module_names = []
320
+ for module_name in PUBLIC_MODULES:
321
+ if not check_importable(module_name):
322
+ module_names.append(module_name)
323
+
324
+ if module_names:
325
+ raise AssertionError("Modules in the public API that cannot be "
326
+ f"imported: {module_names}")
327
+
328
+ with warnings.catch_warnings(record=True):
329
+ warnings.filterwarnings('always', category=DeprecationWarning)
330
+ warnings.filterwarnings('always', category=ImportWarning)
331
+ for module_name in PRIVATE_BUT_PRESENT_MODULES:
332
+ if not check_importable(module_name):
333
+ module_names.append(module_name)
334
+
335
+ if module_names:
336
+ raise AssertionError("Modules that are not really public but looked "
337
+ "public and can not be imported: "
338
+ f"{module_names}")
339
+
340
+
341
+ @pytest.mark.parametrize(("module_name", "correct_module"),
342
+ [('scipy.constants.codata', None),
343
+ ('scipy.constants.constants', None),
344
+ ('scipy.fftpack.basic', None),
345
+ ('scipy.fftpack.helper', None),
346
+ ('scipy.fftpack.pseudo_diffs', None),
347
+ ('scipy.fftpack.realtransforms', None),
348
+ ('scipy.integrate.dop', None),
349
+ ('scipy.integrate.lsoda', None),
350
+ ('scipy.integrate.odepack', None),
351
+ ('scipy.integrate.quadpack', None),
352
+ ('scipy.integrate.vode', None),
353
+ ('scipy.interpolate.fitpack', None),
354
+ ('scipy.interpolate.fitpack2', None),
355
+ ('scipy.interpolate.interpolate', None),
356
+ ('scipy.interpolate.ndgriddata', None),
357
+ ('scipy.interpolate.polyint', None),
358
+ ('scipy.interpolate.rbf', None),
359
+ ('scipy.io.harwell_boeing', None),
360
+ ('scipy.io.idl', None),
361
+ ('scipy.io.mmio', None),
362
+ ('scipy.io.netcdf', None),
363
+ ('scipy.io.arff.arffread', 'arff'),
364
+ ('scipy.io.matlab.byteordercodes', 'matlab'),
365
+ ('scipy.io.matlab.mio_utils', 'matlab'),
366
+ ('scipy.io.matlab.mio', 'matlab'),
367
+ ('scipy.io.matlab.mio4', 'matlab'),
368
+ ('scipy.io.matlab.mio5_params', 'matlab'),
369
+ ('scipy.io.matlab.mio5_utils', 'matlab'),
370
+ ('scipy.io.matlab.mio5', 'matlab'),
371
+ ('scipy.io.matlab.miobase', 'matlab'),
372
+ ('scipy.io.matlab.streams', 'matlab'),
373
+ ('scipy.linalg.basic', None),
374
+ ('scipy.linalg.decomp', None),
375
+ ('scipy.linalg.decomp_cholesky', None),
376
+ ('scipy.linalg.decomp_lu', None),
377
+ ('scipy.linalg.decomp_qr', None),
378
+ ('scipy.linalg.decomp_schur', None),
379
+ ('scipy.linalg.decomp_svd', None),
380
+ ('scipy.linalg.matfuncs', None),
381
+ ('scipy.linalg.misc', None),
382
+ ('scipy.linalg.special_matrices', None),
383
+ ('scipy.misc.common', None),
384
+ ('scipy.ndimage.filters', None),
385
+ ('scipy.ndimage.fourier', None),
386
+ ('scipy.ndimage.interpolation', None),
387
+ ('scipy.ndimage.measurements', None),
388
+ ('scipy.ndimage.morphology', None),
389
+ ('scipy.odr.models', None),
390
+ ('scipy.odr.odrpack', None),
391
+ ('scipy.optimize.cobyla', None),
392
+ ('scipy.optimize.lbfgsb', None),
393
+ ('scipy.optimize.linesearch', None),
394
+ ('scipy.optimize.minpack', None),
395
+ ('scipy.optimize.minpack2', None),
396
+ ('scipy.optimize.moduleTNC', None),
397
+ ('scipy.optimize.nonlin', None),
398
+ ('scipy.optimize.optimize', None),
399
+ ('scipy.optimize.slsqp', None),
400
+ ('scipy.optimize.tnc', None),
401
+ ('scipy.optimize.zeros', None),
402
+ ('scipy.signal.bsplines', None),
403
+ ('scipy.signal.filter_design', None),
404
+ ('scipy.signal.fir_filter_design', None),
405
+ ('scipy.signal.lti_conversion', None),
406
+ ('scipy.signal.ltisys', None),
407
+ ('scipy.signal.signaltools', None),
408
+ ('scipy.signal.spectral', None),
409
+ ('scipy.signal.waveforms', None),
410
+ ('scipy.signal.wavelets', None),
411
+ ('scipy.signal.windows.windows', 'windows'),
412
+ ('scipy.sparse.lil', None),
413
+ ('scipy.sparse.linalg.dsolve', 'linalg'),
414
+ ('scipy.sparse.linalg.eigen', 'linalg'),
415
+ ('scipy.sparse.linalg.interface', 'linalg'),
416
+ ('scipy.sparse.linalg.isolve', 'linalg'),
417
+ ('scipy.sparse.linalg.matfuncs', 'linalg'),
418
+ ('scipy.sparse.sparsetools', None),
419
+ ('scipy.sparse.spfuncs', None),
420
+ ('scipy.sparse.sputils', None),
421
+ ('scipy.spatial.ckdtree', None),
422
+ ('scipy.spatial.kdtree', None),
423
+ ('scipy.spatial.qhull', None),
424
+ ('scipy.spatial.transform.rotation', 'transform'),
425
+ ('scipy.special.add_newdocs', None),
426
+ ('scipy.special.basic', None),
427
+ ('scipy.special.orthogonal', None),
428
+ ('scipy.special.sf_error', None),
429
+ ('scipy.special.specfun', None),
430
+ ('scipy.special.spfun_stats', None),
431
+ ('scipy.stats.biasedurn', None),
432
+ ('scipy.stats.kde', None),
433
+ ('scipy.stats.morestats', None),
434
+ ('scipy.stats.mstats_basic', 'mstats'),
435
+ ('scipy.stats.mstats_extras', 'mstats'),
436
+ ('scipy.stats.mvn', None),
437
+ ('scipy.stats.stats', None)])
438
+ def test_private_but_present_deprecation(module_name, correct_module):
439
+ # gh-18279, gh-17572, gh-17771 noted that deprecation warnings
440
+ # for imports from private modules
441
+ # were misleading. Check that this is resolved.
442
+ module = import_module(module_name)
443
+ if correct_module is None:
444
+ import_name = f'scipy.{module_name.split(".")[1]}'
445
+ else:
446
+ import_name = f'scipy.{module_name.split(".")[1]}.{correct_module}'
447
+
448
+ correct_import = import_module(import_name)
449
+
450
+ # Attributes that were formerly in `module_name` can still be imported from
451
+ # `module_name`, albeit with a deprecation warning.
452
+ for attr_name in module.__all__:
453
+ if attr_name == "varmats_from_mat":
454
+ # defer handling this case, see
455
+ # https://github.com/scipy/scipy/issues/19223
456
+ continue
457
+ # ensure attribute is present where the warning is pointing
458
+ assert getattr(correct_import, attr_name, None) is not None
459
+ message = f"Please import `{attr_name}` from the `{import_name}`..."
460
+ with pytest.deprecated_call(match=message):
461
+ getattr(module, attr_name)
462
+
463
+ # Attributes that were not in `module_name` get an error notifying the user
464
+ # that the attribute is not in `module_name` and that `module_name` is deprecated.
465
+ message = f"`{module_name}` is deprecated..."
466
+ with pytest.raises(AttributeError, match=message):
467
+ getattr(module, "ekki")
468
+
469
+
470
+ def test_misc_doccer_deprecation():
471
+ # gh-18279, gh-17572, gh-17771 noted that deprecation warnings
472
+ # for imports from private modules were misleading.
473
+ # Check that this is resolved.
474
+ # `test_private_but_present_deprecation` cannot be used since `correct_import`
475
+ # is a different subpackage (`_lib` instead of `misc`).
476
+ module = import_module('scipy.misc.doccer')
477
+ correct_import = import_module('scipy._lib.doccer')
478
+
479
+ # Attributes that were formerly in `scipy.misc.doccer` can still be imported from
480
+ # `scipy.misc.doccer`, albeit with a deprecation warning. The specific message
481
+ # depends on whether the attribute is in `scipy._lib.doccer` or not.
482
+ for attr_name in module.__all__:
483
+ attr = getattr(correct_import, attr_name, None)
484
+ if attr is None:
485
+ message = f"`scipy.misc.{attr_name}` is deprecated..."
486
+ else:
487
+ message = f"Please import `{attr_name}` from the `scipy._lib.doccer`..."
488
+ with pytest.deprecated_call(match=message):
489
+ getattr(module, attr_name)
490
+
491
+ # Attributes that were not in `scipy.misc.doccer` get an error
492
+ # notifying the user that the attribute is not in `scipy.misc.doccer`
493
+ # and that `scipy.misc.doccer` is deprecated.
494
+ message = "`scipy.misc.doccer` is deprecated..."
495
+ with pytest.raises(AttributeError, match=message):
496
+ getattr(module, "ekki")
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval);
21
+ TORCH_API at::Tensor & _amp_update_scale_out(at::Tensor & out, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval);
22
+ TORCH_API at::Tensor & _amp_update_scale_outf(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out);
23
+
24
+ } // namespace compositeexplicitautograd
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesce_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _coalesce {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesce")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesce(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API _coalesce_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesce")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _mkldnn_reshape {
18
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_reshape")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_reshape(Tensor self, int[] shape) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef shape);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape);
26
+ };
27
+
28
+ struct TORCH_API _mkldnn_reshape_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_reshape")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _sparse_csr_sum_dim_dtype {
18
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional<at::ScalarType>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csr_sum")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_dtype")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype);
26
+ };
27
+
28
+ struct TORCH_API _sparse_csr_sum_dim_dtype_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional<at::ScalarType>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csr_sum")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_dtype_out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _test_autograd_multiple_dispatch_view_copy {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_autograd_multiple_dispatch_view_copy")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API _test_autograd_multiple_dispatch_view_copy_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_autograd_multiple_dispatch_view_copy")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _to_dense_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<bool> masked_grad=c10::nullopt);
21
+ TORCH_API at::Tensor & _to_dense_outf(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<bool> masked_grad, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API abs {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API abs_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API abs_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API argmin {
18
+ using schema = at::Tensor (const at::Tensor &, c10::optional<int64_t>, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::argmin")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim);
26
+ };
27
+
28
+ struct TORCH_API argmin_out {
29
+ using schema = at::Tensor & (const at::Tensor &, c10::optional<int64_t>, bool, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::argmin")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_backward.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/cudnn_grid_sampler_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
27
+ return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
28
+ }
29
+
30
+ // aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
32
+ return at::_ops::cudnn_grid_sampler_backward_out::call(self, grid, grad_output, out0, out1);
33
+ }
34
+ // aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_outf(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
36
+ return at::_ops::cudnn_grid_sampler_backward_out::call(self, grid, grad_output, out0, out1);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API frobenius_norm_dim {
18
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::frobenius_norm")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim);
26
+ };
27
+
28
+ struct TORCH_API frobenius_norm_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::frobenius_norm")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/hspmm_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
26
+ inline at::Tensor & hspmm_out(at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) {
27
+ return at::_ops::hspmm_out::call(mat1, mat2, out);
28
+ }
29
+ // aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
30
+ inline at::Tensor & hspmm_outf(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
31
+ return at::_ops::hspmm_out::call(mat1, mat2, out);
32
+ }
33
+
34
+ // aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
35
+ inline at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) {
36
+ return at::_ops::hspmm::call(mat1, mat2);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/item.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/item_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+
26
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false);
21
+
22
+ } // namespace compositeexplicitautograd
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor orgqr(const at::Tensor & self, const at::Tensor & input2);
21
+ TORCH_API at::Tensor & orgqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2);
22
+ TORCH_API at::Tensor & orgqr_outf(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out);
23
+
24
+ } // namespace compositeimplicitautograd
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/ormqr.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/ormqr_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
26
+ inline at::Tensor & ormqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) {
27
+ return at::_ops::ormqr_out::call(self, input2, input3, left, transpose, out);
28
+ }
29
+ // aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
30
+ inline at::Tensor & ormqr_outf(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
31
+ return at::_ops::ormqr_out::call(self, input2, input3, left, transpose, out);
32
+ }
33
+
34
+ // aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
35
+ inline at::Tensor ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) {
36
+ return at::_ops::ormqr::call(self, input2, input3, left, transpose);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor pad_sequence(at::TensorList sequences, bool batch_first=false, double padding_value=0.0);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & permute_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims);
21
+ TORCH_API at::Tensor & permute_copy_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rshift_ops.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API __rshift___Scalar {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__rshift__")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__rshift__.Scalar(Tensor self, Scalar other) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
26
+ };
27
+
28
+ struct TORCH_API __rshift___Tensor {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__rshift__")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__rshift__.Tensor(Tensor self, Tensor other) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
37
+ };
38
+
39
+ struct TORCH_API __irshift___Scalar {
40
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__irshift__")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
46
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
48
+ };
49
+
50
+ struct TORCH_API __irshift___Tensor {
51
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__irshift__")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
57
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
58
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
59
+ };
60
+
61
+ struct TORCH_API __rshift___Scalar_out {
62
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__rshift__")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
68
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
69
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
70
+ };
71
+
72
+ struct TORCH_API __rshift___Tensor_out {
73
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__rshift__")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
79
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
81
+ };
82
+
83
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/slow_conv_transpose2d_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
26
+ inline at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
27
+ return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
32
+ return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out);
33
+ }
34
+ }
35
+
36
+ // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
37
+ inline at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
38
+ return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
42
+ at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
43
+ return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out);
44
+ }
45
+ }
46
+
47
+ // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
48
+ inline at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) {
49
+ return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
53
+ at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) {
54
+ return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
55
+ }
56
+ }
57
+
58
+ // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
59
+ inline at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
60
+ return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
64
+ at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
65
+ return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
66
+ }
67
+ }
68
+
69
+ // aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor
70
+ inline at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
71
+ return at::_ops::slow_conv_transpose2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation));
72
+ }
73
+ namespace symint {
74
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
75
+ at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
76
+ return at::_ops::slow_conv_transpose2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation));
77
+ }
78
+ }
79
+
80
+ // aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor
81
+ inline at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) {
82
+ return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
83
+ }
84
+ namespace symint {
85
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
86
+ at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) {
87
+ return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
88
+ }
89
+ }
90
+
91
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y0_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API special_bessel_y0 {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_y0")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_y0(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API special_bessel_y0_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_y0")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_u.h ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
26
+ inline at::Tensor special_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n) {
27
+ return at::_ops::special_chebyshev_polynomial_u::call(x, n);
28
+ }
29
+
30
+ // aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
31
+ inline at::Tensor special_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) {
32
+ return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n);
33
+ }
34
+
35
+ // aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
36
+ inline at::Tensor special_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) {
37
+ return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n);
38
+ }
39
+
40
+ // aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
41
+ inline at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) {
42
+ return at::_ops::special_chebyshev_polynomial_u_out::call(x, n, out);
43
+ }
44
+ // aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
45
+ inline at::Tensor & special_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
46
+ return at::_ops::special_chebyshev_polynomial_u_out::call(x, n, out);
47
+ }
48
+
49
+ // aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
50
+ inline at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
51
+ return at::_ops::special_chebyshev_polynomial_u_x_scalar_out::call(x, n, out);
52
+ }
53
+ // aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
54
+ inline at::Tensor & special_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
55
+ return at::_ops::special_chebyshev_polynomial_u_x_scalar_out::call(x, n, out);
56
+ }
57
+
58
+ // aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
59
+ inline at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
60
+ return at::_ops::special_chebyshev_polynomial_u_n_scalar_out::call(x, n, out);
61
+ }
62
+ // aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
63
+ inline at::Tensor & special_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
64
+ return at::_ops::special_chebyshev_polynomial_u_n_scalar_out::call(x, n, out);
65
+ }
66
+
67
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor special_modified_bessel_i0(const at::Tensor & self);
21
+ TORCH_API at::Tensor & special_modified_bessel_i0_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & special_modified_bessel_i0_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/to_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor to(const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt);
21
+ TORCH_API at::Tensor to(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format);
22
+ TORCH_API at::Tensor to(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt);
23
+ TORCH_API at::Tensor to(const at::Tensor & self, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt);
24
+ TORCH_API at::Tensor to(const at::Tensor & self, const at::Tensor & other, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt);
25
+
26
+ } // namespace compositeimplicitautograd
27
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_meta_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
21
+ TORCH_API at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
22
+ TORCH_API at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
23
+ TORCH_API at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
24
+ TORCH_API at::Tensor & upsample_trilinear3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
25
+ TORCH_API at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
26
+
27
+ } // namespace meta
28
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/vander_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor vander(const at::Tensor & x, c10::optional<int64_t> N=c10::nullopt, bool increasing=false);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/view_as_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+
26
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_cuda_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor view(const at::Tensor & self, at::IntArrayRef size);
21
+ TORCH_API at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size);
22
+
23
+ } // namespace cuda
24
+ } // namespace at
vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/_fft.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/_fftlog.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/_helper.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__pycache__/_realtransforms.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_fft.py ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numbers import Number
2
+ import warnings
3
+
4
+ import numpy as np
5
+
6
+ import cupy
7
+
8
+ from cupy.fft._fft import (_fft, _default_fft_func, hfft as _hfft,
9
+ ihfft as _ihfft, _swap_direction)
10
+
11
+ _scipy_150 = False
12
+ _scipy_160 = False
13
+ try:
14
+ import scipy
15
+ import scipy.fft as _scipy_fft
16
+ except ImportError:
17
+ class _DummyModule:
18
+ def __getattr__(self, name):
19
+ return None
20
+
21
+ _scipy_fft = _DummyModule()
22
+ else:
23
+ from numpy.lib import NumpyVersion as Version
24
+ _scipy_150 = Version(scipy.__version__) >= Version('1.5.0')
25
+ _scipy_160 = Version(scipy.__version__) >= Version('1.6.0')
26
+ del Version
27
+ del scipy
28
+
29
+ # Backend support for scipy.fft
30
+
31
+ __ua_domain__ = 'numpy.scipy.fft'
32
+ _implemented: dict = {}
33
+
34
+
35
+ def __ua_convert__(dispatchables, coerce):
36
+ if coerce:
37
+ try:
38
+ replaced = [
39
+ cupy.asarray(d.value) if d.coercible and d.type is np.ndarray
40
+ else d.value for d in dispatchables]
41
+ except TypeError:
42
+ return NotImplemented
43
+ else:
44
+ replaced = [d.value for d in dispatchables]
45
+
46
+ if not all(d.type is not np.ndarray or isinstance(r, cupy.ndarray)
47
+ for r, d in zip(replaced, dispatchables)):
48
+ return NotImplemented
49
+
50
+ return replaced
51
+
52
+
53
+ def __ua_function__(method, args, kwargs):
54
+ fn = _implemented.get(method, None)
55
+ if fn is None:
56
+ return NotImplemented
57
+ if 'plan' in kwargs and not _scipy_150:
58
+ warnings.warn('The \'plan\' argument is supported in SciPy v1.5.0+')
59
+ return fn(*args, **kwargs)
60
+
61
+
62
+ def _implements(scipy_func):
63
+ """Decorator adds function to the dictionary of implemented functions"""
64
+ def inner(func):
65
+ _implemented[scipy_func] = func
66
+ return func
67
+
68
+ return inner
69
+
70
+
71
+ def _assequence(x):
72
+ """Convert scalars to a sequence, otherwise pass through ``x`` unchanged"""
73
+ if isinstance(x, Number):
74
+ return (x,)
75
+ return x
76
+
77
+
78
+ @_implements(_scipy_fft.fft)
79
+ def fft(x, n=None, axis=-1, norm=None, overwrite_x=False, *, plan=None):
80
+ """Compute the one-dimensional FFT.
81
+
82
+ Args:
83
+ x (cupy.ndarray): Array to be transformed.
84
+ n (None or int): Length of the transformed axis of the output. If ``n``
85
+ is not given, the length of the input along the axis specified by
86
+ ``axis`` is used.
87
+ axis (int): Axis over which to compute the FFT.
88
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
89
+ to specify the normalization mode. Default is ``None``, which is
90
+ an alias of ``"backward"``.
91
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
92
+ plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
93
+ transforming ``x`` over ``axis``, which can be obtained using::
94
+
95
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, n, axis)
96
+
97
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
98
+ an auto-generated plan behind the scene.
99
+
100
+ Returns:
101
+ cupy.ndarray:
102
+ The transformed array which shape is specified by ``n`` and type
103
+ will convert to complex if that of the input is another.
104
+
105
+ .. seealso:: :func:`scipy.fft.fft`
106
+ """
107
+ from cupy.cuda import cufft
108
+ return _fft(x, (n,), (axis,), norm, cufft.CUFFT_FORWARD,
109
+ overwrite_x=overwrite_x, plan=plan)
110
+
111
+
112
+ @_implements(_scipy_fft.ifft)
113
+ def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, *, plan=None):
114
+ """Compute the one-dimensional inverse FFT.
115
+
116
+ Args:
117
+ x (cupy.ndarray): Array to be transformed.
118
+ n (None or int): Length of the transformed axis of the output. If ``n``
119
+ is not given, the length of the input along the axis specified by
120
+ ``axis`` is used.
121
+ axis (int): Axis over which to compute the FFT.
122
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
123
+ to specify the normalization mode. Default is ``None``, which is
124
+ an alias of ``"backward"``.
125
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
126
+ plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
127
+ transforming ``x`` over ``axis``, which can be obtained using::
128
+
129
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, n, axis)
130
+
131
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
132
+ an auto-generated plan behind the scene.
133
+
134
+ Returns:
135
+ cupy.ndarray:
136
+ The transformed array which shape is specified by ``n`` and type
137
+ will convert to complex if that of the input is another.
138
+
139
+ .. seealso:: :func:`scipy.fft.ifft`
140
+ """
141
+ from cupy.cuda import cufft
142
+ return _fft(x, (n,), (axis,), norm, cufft.CUFFT_INVERSE,
143
+ overwrite_x=overwrite_x, plan=plan)
144
+
145
+
146
+ @_implements(_scipy_fft.fft2)
147
+ def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, *, plan=None):
148
+ """Compute the two-dimensional FFT.
149
+
150
+ Args:
151
+ x (cupy.ndarray): Array to be transformed.
152
+ s (None or tuple of ints): Shape of the transformed axes of the
153
+ output. If ``s`` is not given, the lengths of the input along
154
+ the axes specified by ``axes`` are used.
155
+ axes (tuple of ints): Axes over which to compute the FFT.
156
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
157
+ to specify the normalization mode. Default is ``None``, which is
158
+ an alias of ``"backward"``.
159
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
160
+ plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
161
+ transforming ``x`` over ``axes``, which can be obtained using::
162
+
163
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, s, axes)
164
+
165
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
166
+ an auto-generated plan behind the scene.
167
+
168
+ Returns:
169
+ cupy.ndarray:
170
+ The transformed array which shape is specified by ``s`` and
171
+ type will convert to complex if that of the input is another.
172
+
173
+ .. seealso:: :func:`scipy.fft.fft2`
174
+ """
175
+ return fftn(x, s, axes, norm, overwrite_x, plan=plan)
176
+
177
+
178
+ @_implements(_scipy_fft.ifft2)
179
+ def ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, *,
180
+ plan=None):
181
+ """Compute the two-dimensional inverse FFT.
182
+
183
+ Args:
184
+ x (cupy.ndarray): Array to be transformed.
185
+ s (None or tuple of ints): Shape of the transformed axes of the
186
+ output. If ``s`` is not given, the lengths of the input along
187
+ the axes specified by ``axes`` are used.
188
+ axes (tuple of ints): Axes over which to compute the FFT.
189
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
190
+ to specify the normalization mode. Default is ``None``, which is
191
+ an alias of ``"backward"``.
192
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
193
+ plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
194
+ transforming ``x`` over ``axes``, which can be obtained using::
195
+
196
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, s, axes)
197
+
198
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
199
+ an auto-generated plan behind the scene.
200
+
201
+ Returns:
202
+ cupy.ndarray:
203
+ The transformed array which shape is specified by ``s`` and
204
+ type will convert to complex if that of the input is another.
205
+
206
+ .. seealso:: :func:`scipy.fft.ifft2`
207
+ """
208
+ return ifftn(x, s, axes, norm, overwrite_x, plan=plan)
209
+
210
+
211
+ @_implements(_scipy_fft.fftn)
212
+ def fftn(x, s=None, axes=None, norm=None, overwrite_x=False, *, plan=None):
213
+ """Compute the N-dimensional FFT.
214
+
215
+ Args:
216
+ x (cupy.ndarray): Array to be transformed.
217
+ s (None or tuple of ints): Shape of the transformed axes of the
218
+ output. If ``s`` is not given, the lengths of the input along
219
+ the axes specified by ``axes`` are used.
220
+ axes (tuple of ints): Axes over which to compute the FFT.
221
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
222
+ to specify the normalization mode. Default is ``None``, which is
223
+ an alias of ``"backward"``.
224
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
225
+ plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
226
+ transforming ``x`` over ``axes``, which can be obtained using::
227
+
228
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, s, axes)
229
+
230
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
231
+ an auto-generated plan behind the scene.
232
+
233
+ Returns:
234
+ cupy.ndarray:
235
+ The transformed array which shape is specified by ``s`` and
236
+ type will convert to complex if that of the input is another.
237
+
238
+ .. seealso:: :func:`scipy.fft.fftn`
239
+ """
240
+ from cupy.cuda import cufft
241
+
242
+ s = _assequence(s)
243
+ axes = _assequence(axes)
244
+ func = _default_fft_func(x, s, axes)
245
+ return func(x, s, axes, norm, cufft.CUFFT_FORWARD, overwrite_x=overwrite_x,
246
+ plan=plan)
247
+
248
+
249
+ @_implements(_scipy_fft.ifftn)
250
+ def ifftn(x, s=None, axes=None, norm=None, overwrite_x=False, *, plan=None):
251
+ """Compute the N-dimensional inverse FFT.
252
+
253
+ Args:
254
+ x (cupy.ndarray): Array to be transformed.
255
+ s (None or tuple of ints): Shape of the transformed axes of the
256
+ output. If ``s`` is not given, the lengths of the input along
257
+ the axes specified by ``axes`` are used.
258
+ axes (tuple of ints): Axes over which to compute the FFT.
259
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
260
+ to specify the normalization mode. Default is ``None``, which is
261
+ an alias of ``"backward"``.
262
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
263
+ plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
264
+ transforming ``x`` over ``axes``, which can be obtained using::
265
+
266
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, s, axes)
267
+
268
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
269
+ an auto-generated plan behind the scene.
270
+
271
+ Returns:
272
+ cupy.ndarray:
273
+ The transformed array which shape is specified by ``s`` and
274
+ type will convert to complex if that of the input is another.
275
+
276
+ .. seealso:: :func:`scipy.fft.ifftn`
277
+ """
278
+ from cupy.cuda import cufft
279
+
280
+ s = _assequence(s)
281
+ axes = _assequence(axes)
282
+ func = _default_fft_func(x, s, axes)
283
+ return func(x, s, axes, norm, cufft.CUFFT_INVERSE, overwrite_x=overwrite_x,
284
+ plan=plan)
285
+
286
+
287
+ @_implements(_scipy_fft.rfft)
288
+ def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, *, plan=None):
289
+ """Compute the one-dimensional FFT for real input.
290
+
291
+ The returned array contains the positive frequency components of the
292
+ corresponding :func:`fft`, up to and including the Nyquist frequency.
293
+
294
+ Args:
295
+ x (cupy.ndarray): Array to be transformed.
296
+ n (None or int): Length of the transformed axis of the output. If ``n``
297
+ is not given, the length of the input along the axis specified by
298
+ ``axis`` is used.
299
+ axis (int): Axis over which to compute the FFT.
300
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
301
+ to specify the normalization mode. Default is ``None``, which is
302
+ an alias of ``"backward"``.
303
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
304
+ plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
305
+ transforming ``x`` over ``axis``, which can be obtained using::
306
+
307
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, n, axis,
308
+ value_type='R2C')
309
+
310
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
311
+ an auto-generated plan behind the scene.
312
+
313
+ Returns:
314
+ cupy.ndarray:
315
+ The transformed array.
316
+
317
+ .. seealso:: :func:`scipy.fft.rfft`
318
+
319
+ """
320
+ from cupy.cuda import cufft
321
+
322
+ return _fft(x, (n,), (axis,), norm, cufft.CUFFT_FORWARD, 'R2C',
323
+ overwrite_x=overwrite_x, plan=plan)
324
+
325
+
326
+ @_implements(_scipy_fft.irfft)
327
+ def irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, *, plan=None):
328
+ """Compute the one-dimensional inverse FFT for real input.
329
+
330
+ Args:
331
+ x (cupy.ndarray): Array to be transformed.
332
+ n (None or int): Length of the transformed axis of the output. If ``n``
333
+ is not given, the length of the input along the axis specified by
334
+ ``axis`` is used.
335
+ axis (int): Axis over which to compute the FFT.
336
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
337
+ to specify the normalization mode. Default is ``None``, which is
338
+ an alias of ``"backward"``.
339
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
340
+ plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
341
+ transforming ``x`` over ``axis``, which can be obtained using::
342
+
343
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, n, axis,
344
+ value_type='C2R')
345
+
346
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
347
+ an auto-generated plan behind the scene.
348
+
349
+ Returns:
350
+ cupy.ndarray:
351
+ The transformed array.
352
+
353
+ .. seealso:: :func:`scipy.fft.irfft`
354
+ """
355
+ from cupy.cuda import cufft
356
+ return _fft(x, (n,), (axis,), norm, cufft.CUFFT_INVERSE, 'C2R',
357
+ overwrite_x=overwrite_x, plan=plan)
358
+
359
+
360
+ @_implements(_scipy_fft.rfft2)
361
+ def rfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, *,
362
+ plan=None):
363
+ """Compute the two-dimensional FFT for real input.
364
+
365
+ Args:
366
+ a (cupy.ndarray): Array to be transform.
367
+ s (None or tuple of ints): Shape to use from the input. If ``s`` is not
368
+ given, the lengths of the input along the axes specified by
369
+ ``axes`` are used.
370
+ axes (tuple of ints): Axes over which to compute the FFT.
371
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
372
+ to specify the normalization mode. Default is ``None``, which is
373
+ an alias of ``"backward"``.
374
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
375
+ plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
376
+ transforming ``x`` over ``axes``, which can be obtained using::
377
+
378
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, s, axes,
379
+ value_type='R2C')
380
+
381
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
382
+ an auto-generated plan behind the scene.
383
+
384
+ Returns:
385
+ cupy.ndarray:
386
+ The transformed array which shape is specified by ``s`` and type
387
+ will convert to complex if the input is other. The length of the
388
+ last axis transformed will be ``s[-1]//2+1``.
389
+
390
+ .. seealso:: :func:`scipy.fft.rfft2`
391
+ """
392
+ return rfftn(x, s, axes, norm, overwrite_x, plan=plan)
393
+
394
+
395
+ @_implements(_scipy_fft.irfft2)
396
+ def irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, *,
397
+ plan=None):
398
+ """Compute the two-dimensional inverse FFT for real input.
399
+
400
+ Args:
401
+ a (cupy.ndarray): Array to be transform.
402
+ s (None or tuple of ints): Shape of the output. If ``s`` is not given,
403
+ they are determined from the lengths of the input along the axes
404
+ specified by ``axes``.
405
+ axes (tuple of ints): Axes over which to compute the FFT.
406
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
407
+ to specify the normalization mode. Default is ``None``, which is
408
+ an alias of ``"backward"``.
409
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
410
+ plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
411
+ transforming ``x`` over ``axes``, which can be obtained using::
412
+
413
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, s, axes,
414
+ value_type='C2R')
415
+
416
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
417
+ an auto-generated plan behind the scene.
418
+
419
+ Returns:
420
+ cupy.ndarray:
421
+ The transformed array which shape is specified by ``s`` and type
422
+ will convert to complex if the input is other. If ``s`` is not
423
+ given, the length of final transformed axis of output will be
424
+ `2*(m-1)` where `m` is the length of the final transformed axis of
425
+ the input.
426
+
427
+ .. seealso:: :func:`scipy.fft.irfft2`
428
+ """
429
+ return irfftn(x, s, axes, norm, overwrite_x, plan=plan)
430
+
431
+
432
+ @_implements(_scipy_fft.rfftn)
433
+ def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, *, plan=None):
434
+ """Compute the N-dimensional FFT for real input.
435
+
436
+ Args:
437
+ a (cupy.ndarray): Array to be transform.
438
+ s (None or tuple of ints): Shape to use from the input. If ``s`` is not
439
+ given, the lengths of the input along the axes specified by
440
+ ``axes`` are used.
441
+ axes (tuple of ints): Axes over which to compute the FFT.
442
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
443
+ to specify the normalization mode. Default is ``None``, which is
444
+ an alias of ``"backward"``.
445
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
446
+ plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
447
+ transforming ``x`` over ``axes``, which can be obtained using::
448
+
449
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, s, axes,
450
+ value_type='R2C')
451
+
452
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
453
+ an auto-generated plan behind the scene.
454
+
455
+ Returns:
456
+ cupy.ndarray:
457
+ The transformed array which shape is specified by ``s`` and type
458
+ will convert to complex if the input is other. The length of the
459
+ last axis transformed will be ``s[-1]//2+1``.
460
+
461
+ .. seealso:: :func:`scipy.fft.rfftn`
462
+ """
463
+ from cupy.cuda import cufft
464
+
465
+ s = _assequence(s)
466
+ axes = _assequence(axes)
467
+ func = _default_fft_func(x, s, axes, value_type='R2C')
468
+ return func(x, s, axes, norm, cufft.CUFFT_FORWARD, 'R2C',
469
+ overwrite_x=overwrite_x, plan=plan)
470
+
471
+
472
+ @_implements(_scipy_fft.irfftn)
473
+ def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, *, plan=None):
474
+ """Compute the N-dimensional inverse FFT for real input.
475
+
476
+ Args:
477
+ a (cupy.ndarray): Array to be transform.
478
+ s (None or tuple of ints): Shape of the output. If ``s`` is not given,
479
+ they are determined from the lengths of the input along the axes
480
+ specified by ``axes``.
481
+ axes (tuple of ints): Axes over which to compute the FFT.
482
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
483
+ to specify the normalization mode. Default is ``None``, which is
484
+ an alias of ``"backward"``.
485
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
486
+ plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
487
+ transforming ``x`` over ``axes``, which can be obtained using::
488
+
489
+ plan = cupyx.scipy.fftpack.get_fft_plan(x, s, axes,
490
+ value_type='C2R')
491
+
492
+ Note that ``plan`` is defaulted to ``None``, meaning CuPy will use
493
+ an auto-generated plan behind the scene.
494
+
495
+ Returns:
496
+ cupy.ndarray:
497
+ The transformed array which shape is specified by ``s`` and type
498
+ will convert to complex if the input is other. If ``s`` is not
499
+ given, the length of final transformed axis of output will be
500
+ ``2*(m-1)`` where `m` is the length of the final transformed axis
501
+ of the input.
502
+
503
+ .. seealso:: :func:`scipy.fft.irfftn`
504
+ """
505
+ from cupy.cuda import cufft
506
+
507
+ s = _assequence(s)
508
+ axes = _assequence(axes)
509
+ func = _default_fft_func(x, s, axes, value_type='C2R')
510
+ return func(x, s, axes, norm, cufft.CUFFT_INVERSE, 'C2R',
511
+ overwrite_x=overwrite_x, plan=plan)
512
+
513
+
514
+ @_implements(_scipy_fft.hfft)
515
+ def hfft(x, n=None, axis=-1, norm=None, overwrite_x=False, *, plan=None):
516
+ """Compute the FFT of a signal that has Hermitian symmetry.
517
+
518
+ Args:
519
+ a (cupy.ndarray): Array to be transform.
520
+ n (None or int): Length of the transformed axis of the output. For
521
+ ``n`` output points, ``n//2+1`` input points are necessary. If
522
+ ``n`` is not given, it is determined from the length of the input
523
+ along the axis specified by ``axis``.
524
+ axis (int): Axis over which to compute the FFT.
525
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
526
+ to specify the normalization mode. Default is ``None``, which is
527
+ an alias of ``"backward"``.
528
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
529
+ plan (None): This argument is currently not supported.
530
+
531
+ Returns:
532
+ cupy.ndarray:
533
+ The transformed array which shape is specified by ``n`` and type
534
+ will convert to complex if the input is other. If ``n`` is not
535
+ given, the length of the transformed axis is ``2*(m-1)`` where `m`
536
+ is the length of the transformed axis of the input.
537
+
538
+ .. seealso:: :func:`scipy.fft.hfft`
539
+ """
540
+ # TODO(leofang): support R2C & C2R plans
541
+ if plan is not None:
542
+ raise NotImplementedError('hfft plan is currently not yet supported')
543
+ return _hfft(x, n, axis, norm)
544
+
545
+
546
+ @_implements(_scipy_fft.ihfft)
547
+ def ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, *, plan=None):
548
+ """Compute the FFT of a signal that has Hermitian symmetry.
549
+
550
+ Args:
551
+ a (cupy.ndarray): Array to be transform.
552
+ n (None or int): Number of points along transformation axis in the
553
+ input to use. If ``n`` is not given, the length of the input along
554
+ the axis specified by ``axis`` is used.
555
+ axis (int): Axis over which to compute the FFT.
556
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
557
+ to specify the normalization mode. Default is ``None``, which is
558
+ an alias of ``"backward"``.
559
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
560
+ plan (None): This argument is currently not supported.
561
+
562
+ Returns:
563
+ cupy.ndarray:
564
+ The transformed array which shape is specified by ``n`` and type
565
+ will convert to complex if the input is other. The length of the
566
+ transformed axis is ``n//2+1``.
567
+
568
+ .. seealso:: :func:`scipy.fft.ihfft`
569
+ """
570
+ # TODO(leofang): support R2C & C2R plans
571
+ if plan is not None:
572
+ raise NotImplementedError('ihfft plan is currently not yet supported')
573
+ return _ihfft(x, n, axis, norm)
574
+
575
+
576
+ @_implements(_scipy_fft.hfft2)
577
+ def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, *,
578
+ plan=None):
579
+ """Compute the FFT of a two-dimensional signal that has Hermitian symmetry.
580
+
581
+ Args:
582
+ x (cupy.ndarray): Array to be transformed.
583
+ s (None or tuple of ints): Shape of the real output.
584
+ axes (tuple of ints): Axes over which to compute the FFT.
585
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
586
+ to specify the normalization mode. Default is ``None``, which is
587
+ an alias of ``"backward"``.
588
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
589
+ (This argument is currently not supported)
590
+ plan (None): This argument is currently not supported.
591
+
592
+ Returns:
593
+ cupy.ndarray:
594
+ The real result of the 2-D Hermitian complex real FFT.
595
+
596
+ .. seealso:: :func:`scipy.fft.hfft2`
597
+ """
598
+ if plan is not None:
599
+ raise NotImplementedError('hfft2 plan is currently not yet supported')
600
+ return irfft2(x.conj(), s, axes, _swap_direction(norm))
601
+
602
+
603
+ @_implements(_scipy_fft.ihfft2)
604
+ def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, *,
605
+ plan=None):
606
+ """Compute the Inverse FFT of a two-dimensional signal that has Hermitian
607
+ symmetry.
608
+
609
+ Args:
610
+ x (cupy.ndarray): Array to be transformed.
611
+ s (None or tuple of ints): Shape of the real output.
612
+ axes (tuple of ints): Axes over which to compute the FFT.
613
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
614
+ to specify the normalization mode. Default is ``None``, which is
615
+ an alias of ``"backward"``.
616
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
617
+ (This argument is currently not supported)
618
+ plan (None): This argument is currently not supported.
619
+
620
+ Returns:
621
+ cupy.ndarray:
622
+ The real result of the 2-D Hermitian inverse complex real FFT.
623
+
624
+ .. seealso:: :func:`scipy.fft.ihfft2`
625
+ """
626
+ if plan is not None:
627
+ raise NotImplementedError('ihfft2 plan is currently not yet supported')
628
+ return rfft2(x, s, axes, _swap_direction(norm)).conj()
629
+
630
+
631
+ @_implements(_scipy_fft.hfftn)
632
+ def hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, *,
633
+ plan=None):
634
+ """Compute the FFT of a N-dimensional signal that has Hermitian symmetry.
635
+
636
+ Args:
637
+ x (cupy.ndarray): Array to be transformed.
638
+ s (None or tuple of ints): Shape of the real output.
639
+ axes (tuple of ints): Axes over which to compute the FFT.
640
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
641
+ to specify the normalization mode. Default is ``None``, which is
642
+ an alias of ``"backward"``.
643
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
644
+ (This argument is currently not supported)
645
+ plan (None): This argument is currently not supported.
646
+
647
+ Returns:
648
+ cupy.ndarray:
649
+ The real result of the N-D Hermitian complex real FFT.
650
+
651
+ .. seealso:: :func:`scipy.fft.hfftn`
652
+ """
653
+ if plan is not None:
654
+ raise NotImplementedError('hfftn plan is currently not yet supported')
655
+ return irfftn(x.conj(), s, axes, _swap_direction(norm))
656
+
657
+
658
+ @_implements(_scipy_fft.ihfftn)
659
+ def ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, *,
660
+ plan=None):
661
+ """Compute the Inverse FFT of a N-dimensional signal that has Hermitian
662
+ symmetry.
663
+
664
+ Args:
665
+ x (cupy.ndarray): Array to be transformed.
666
+ s (None or tuple of ints): Shape of the real output.
667
+ axes (tuple of ints): Axes over which to compute the FFT.
668
+ norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
669
+ to specify the normalization mode. Default is ``None``, which is
670
+ an alias of ``"backward"``.
671
+ overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
672
+ (This argument is currently not supported)
673
+ plan (None): This argument is currently not supported.
674
+
675
+ Returns:
676
+ cupy.ndarray:
677
+ The real result of the N-D Hermitian inverse complex real FFT.
678
+
679
+ .. seealso:: :func:`scipy.fft.ihfftn`
680
+ """
681
+ if plan is not None:
682
+ raise NotImplementedError('ihfftn plan is currently not yet supported')
683
+ return rfftn(x, s, axes, _swap_direction(norm)).conj()
vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_helper.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Tuple
2
+
3
+ import math
4
+
5
+
6
+ _next_fast_len_cache: Dict[Tuple[int, List[int]], int] = {}
7
+
8
+
9
+ def _next_fast_len_impl(n, primes):
10
+ if len(primes) == 0:
11
+ return math.inf
12
+ result = _next_fast_len_cache.get((n, primes), None)
13
+ if result is None:
14
+ if n == 1:
15
+ result = 1
16
+ else:
17
+ p = primes[0]
18
+ result = min(
19
+ _next_fast_len_impl((n + p - 1) // p, primes) * p,
20
+ _next_fast_len_impl(n, primes[1:]))
21
+ _next_fast_len_cache[(n, primes)] = result
22
+ return result
23
+
24
+
25
+ def next_fast_len(target, real=False):
26
+ """Find the next fast size to ``fft``.
27
+
28
+ Args:
29
+ target (int): The size of input array.
30
+ real (bool): ``True`` if the FFT involves real input or output.
31
+ This parameter is of no use, and only for compatibility to
32
+ SciPy's interface.
33
+
34
+ Returns:
35
+ int: The smallest fast length greater than or equal to the input value.
36
+
37
+ .. seealso:: :func:`scipy.fft.next_fast_len`
38
+
39
+ .. note::
40
+ It may return a different value to :func:`scipy.fft.next_fast_len`
41
+ as pocketfft's prime factors are different from cuFFT's factors.
42
+ For details, see the `cuFFT documentation`_.
43
+
44
+ .. _cuFFT documentation:
45
+ https://docs.nvidia.com/cuda/cufft/index.html#accuracy-and-performance
46
+ """
47
+ if target == 0:
48
+ return 0
49
+
50
+ primes = (2, 3, 5, 7)
51
+ return _next_fast_len_impl(target, primes)
vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_realtransforms.py ADDED
@@ -0,0 +1,922 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Real-to-real transforms
2
+
3
+ cuFFT does not implement real-to-real FFTs. This module implements forward
4
+ and inverse DCT-II and DCT-III transforms using FFTs.
5
+
6
+ A length N DCT can be computed using a length N FFT and some additional
7
+ multiplications and reordering of entries.
8
+
9
+ The approach taken here is based on the work in [1]_, [2]_ and is discussed in
10
+ the freely-available online resources [3]_, [4]_.
11
+
12
+ The implementation here follows that approach with only minor modification to
13
+ match the normalization conventions in SciPy.
14
+
15
+ The modifications to turn a type II or III DCT to a DST were implemented as
16
+ described in [5]_.
17
+
18
+ .. [1] J. Makhoul, "A fast cosine transform in one and two dimensions," in
19
+ IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 28,
20
+ no. 1, pp. 27-34, February 1980.
21
+
22
+ .. [2] M.J. Narasimha and A.M. Peterson, “On the computation of the discrete
23
+ cosine transform,” IEEE Trans. Commun., vol. 26, no. 6, pp. 934–936, 1978.
24
+
25
+ .. [3] http://fourier.eng.hmc.edu/e161/lectures/dct/node2.html
26
+
27
+ .. [4] https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
28
+
29
+ .. [5] X. Shao, S. G. Johnson. Type-II/III DCT/DST algorithms with reduced
30
+ number of arithmetic operations, Signal Processing, Volume 88, Issue 6,
31
+ pp. 1553-1564, 2008.
32
+ """
33
+
34
+ import math
35
+ import numbers
36
+ import operator
37
+
38
+ import cupy
39
+ from cupy import _core
40
+ from cupy.fft._fft import _cook_shape
41
+ from cupyx.scipy.fft import _fft
42
+ from cupy.exceptions import AxisError
43
+
44
+ __all__ = ['dct', 'dctn', 'dst', 'dstn', 'idct', 'idctn', 'idst', 'idstn']
45
+
46
+
47
+ def _promote_dtype(x):
48
+ if x.dtype.kind in 'bui':
49
+ # use float64 instead of promote_types to match SciPy's behavior
50
+ float_dtype = cupy.float64
51
+ else:
52
+ float_dtype = cupy.promote_types(x.dtype, cupy.float32)
53
+ return x.astype(float_dtype, copy=False)
54
+
55
+
56
+ def _get_dct_norm_factor(n, inorm, dct_type=2):
57
+ """Normalization factors for DCT/DST I-IV.
58
+
59
+ Parameters
60
+ ----------
61
+ n : int
62
+ Data size.
63
+ inorm : {'none', 'sqrt', 'full'}
64
+ When `inorm` is 'none', the scaling factor is 1.0 (unnormalized). When
65
+ `inorm` is 1, scaling by ``1/sqrt(d)`` as needed for an orthogonal
66
+ transform is used. When `inorm` is 2, normalization by ``1/d`` is
67
+ applied. The value of ``d`` depends on both `n` and the `dct_type`.
68
+ dct_type : {1, 2, 3, 4}
69
+ Which type of DCT or DST is being normalized?.
70
+
71
+ Returns
72
+ -------
73
+ fct : float
74
+ The normalization factor.
75
+ """
76
+ if inorm == 'none':
77
+ return 1
78
+ delta = -1 if dct_type == 1 else 0
79
+ d = 2 * (n + delta)
80
+ if inorm == 'full':
81
+ fct = 1 / d
82
+ elif inorm == 'sqrt':
83
+ fct = 1 / math.sqrt(d)
84
+ else:
85
+ raise ValueError('expected inorm = "none", "sqrt" or "full"')
86
+ return fct
87
+
88
+
89
+ def _reshuffle_dct2(x, n, axis, dst=False):
90
+ """Reorder entries to allow computation of DCT/DST-II via FFT."""
91
+ sl_even = [slice(None)] * x.ndim
92
+ sl_even[axis] = slice(0, None, 2)
93
+ sl_even = tuple(sl_even)
94
+ sl_odd = [slice(None)] * x.ndim
95
+ if n % 2:
96
+ sl_odd[axis] = slice(-2, None, -2)
97
+ sl_odd = tuple(sl_odd)
98
+ else:
99
+ sl_odd[axis] = slice(None, None, -2)
100
+ sl_odd = tuple(sl_odd)
101
+ if dst:
102
+ x = cupy.concatenate((x[sl_even], -x[sl_odd]), axis=axis)
103
+ else:
104
+ x = cupy.concatenate((x[sl_even], x[sl_odd]), axis=axis)
105
+ return x
106
+
107
+
108
+ _mult_factor_dct2 = _core.ElementwiseKernel(
109
+ in_params='R xr, int32 N, R norm_factor',
110
+ out_params='C y',
111
+ operation="""
112
+ C j(0., -1.);
113
+ y = (R)2.0 * norm_factor * exp(j * (R)(i * M_PI / (2 * N)));""",
114
+ )
115
+
116
+
117
+ def _exp_factor_dct2(x, n, axis, norm_factor, n_truncate=None):
118
+ """Twiddle & scaling factors for computation of DCT/DST-II via FFT."""
119
+ if n_truncate is None:
120
+ n_truncate = n
121
+ tmp = cupy.empty((n_truncate,), dtype=x.dtype)
122
+ _mult_factor_dct2(tmp.real, n, norm_factor, tmp)
123
+
124
+ if x.ndim == 1:
125
+ return tmp
126
+ tmp_shape = [1] * x.ndim
127
+ tmp_shape[axis] = n_truncate
128
+ tmp_shape = tuple(tmp_shape)
129
+ return tmp.reshape(tmp_shape)
130
+
131
+
132
+ def _dct_or_dst_type2(
133
+ x, n=None, axis=-1, forward=True, norm=None, dst=False, overwrite_x=False
134
+ ):
135
+ """Forward DCT/DST-II (or inverse DCT/DST-III) along a single axis
136
+
137
+ Parameters
138
+ ----------
139
+ x : cupy.ndarray
140
+ The data to transform.
141
+ n : int
142
+ The size of the transform. If None, ``x.shape[axis]`` is used.
143
+ axis : int
144
+ Axis along which the transform is applied.
145
+ forward : bool
146
+ Set true to indicate that this is a forward DCT-II as opposed to an
147
+ inverse DCT-III (The difference between the two is only in the
148
+ normalization factor).
149
+ norm : {None, 'ortho', 'forward', 'backward'}
150
+ The normalization convention to use.
151
+ dst : bool
152
+ If True, a discrete sine transform is computed rather than the discrete
153
+ cosine transform.
154
+ overwrite_x : bool
155
+ Indicates that it is okay to overwrite x. In practice, the current
156
+ implementation never performs the transform in-place.
157
+
158
+ Returns
159
+ -------
160
+ y: cupy.ndarray
161
+ The transformed array.
162
+ """
163
+ if axis < -x.ndim or axis >= x.ndim:
164
+ raise AxisError('axis out of range')
165
+ if axis < 0:
166
+ axis += x.ndim
167
+ if n is not None and n < 1:
168
+ raise ValueError(
169
+ f'invalid number of data points ({n}) specified'
170
+ )
171
+
172
+ x = _cook_shape(x, (n,), (axis,), 'R2R')
173
+ n = x.shape[axis]
174
+
175
+ x = _reshuffle_dct2(x, x.shape[axis], axis, dst)
176
+
177
+ if norm == 'ortho':
178
+ inorm = 'sqrt'
179
+ elif norm == 'forward':
180
+ inorm = 'full' if forward else 'none'
181
+ else:
182
+ inorm = 'none' if forward else 'full'
183
+ norm_factor = _get_dct_norm_factor(n, inorm=inorm, dct_type=2)
184
+
185
+ x = _fft.fft(x, n=n, axis=axis, overwrite_x=True)
186
+ tmp = _exp_factor_dct2(x, n, axis, norm_factor)
187
+
188
+ x *= tmp # broadcasting
189
+ x = cupy.real(x)
190
+
191
+ if norm == 'ortho':
192
+ sl0 = [slice(None)] * x.ndim
193
+ sl0[axis] = slice(1)
194
+ x[tuple(sl0)] *= math.sqrt(2) * 0.5
195
+
196
+ if dst:
197
+ slrev = [slice(None)] * x.ndim
198
+ slrev[axis] = slice(None, None, -1)
199
+ x = x[tuple(slrev)]
200
+ return x
201
+
202
+
203
+ def _reshuffle_dct3(y, n, axis, dst):
204
+ """Reorder entries to allow computation of DCT/DST-II via FFT."""
205
+ x = cupy.empty_like(y)
206
+ n_half = (n + 1) // 2
207
+
208
+ # Store first half of y in the even entries of the output
209
+ sl_even = [slice(None)] * y.ndim
210
+ sl_even[axis] = slice(0, None, 2)
211
+ sl_even = tuple(sl_even)
212
+
213
+ sl_half = [slice(None)] * y.ndim
214
+ sl_half[axis] = slice(0, n_half)
215
+ x[sl_even] = y[tuple(sl_half)]
216
+
217
+ # Store the second half of y in the odd entries of the output
218
+ sl_odd = [slice(None)] * y.ndim
219
+ sl_odd[axis] = slice(1, None, 2)
220
+ sl_odd = tuple(sl_odd)
221
+
222
+ sl_half[axis] = slice(-1, n_half - 1, -1)
223
+ if dst:
224
+ x[sl_odd] = -y[tuple(sl_half)]
225
+ else:
226
+ x[sl_odd] = y[tuple(sl_half)]
227
+ return x
228
+
229
+
230
+ _mult_factor_dct3 = _core.ElementwiseKernel(
231
+ in_params='R xr, int32 N, R norm_factor',
232
+ out_params='C y',
233
+ operation="""
234
+ C j(0., 1.);
235
+ y = (R)(2 * N * norm_factor) * exp(j * (R)(i * M_PI / (2 * N)));""",
236
+ )
237
+
238
+
239
+ def _exp_factor_dct3(x, n, axis, dtype, norm_factor):
240
+ """Twiddle & scaling factors for computation of DCT/DST-III via FFT."""
241
+ tmp = cupy.empty((n,), dtype=dtype)
242
+ _mult_factor_dct3(tmp.real, n, norm_factor, tmp)
243
+ if x.ndim == 1:
244
+ return tmp
245
+ # prepare shape for broadcasting along non-transformed axes
246
+ tmp_shape = [1] * x.ndim
247
+ tmp_shape[axis] = n
248
+ tmp_shape = tuple(tmp_shape)
249
+ return tmp.reshape(tmp_shape)
250
+
251
+
252
+ def _dct_or_dst_type3(
253
+ x, n=None, axis=-1, norm=None, forward=True, dst=False, overwrite_x=False
254
+ ):
255
+ """Forward DCT/DST-III (or inverse DCT/DST-II) along a single axis.
256
+
257
+ Parameters
258
+ ----------
259
+ x : cupy.ndarray
260
+ The data to transform.
261
+ n : int
262
+ The size of the transform. If None, ``x.shape[axis]`` is used.
263
+ axis : int
264
+ Axis along which the transform is applied.
265
+ forward : bool
266
+ Set true to indicate that this is a forward DCT-II as opposed to an
267
+ inverse DCT-III (The difference between the two is only in the
268
+ normalization factor).
269
+ norm : {None, 'ortho', 'forward', 'backward'}
270
+ The normalization convention to use.
271
+ dst : bool
272
+ If True, a discrete sine transform is computed rather than the discrete
273
+ cosine transform.
274
+ overwrite_x : bool
275
+ Indicates that it is okay to overwrite x. In practice, the current
276
+ implementation never performs the transform in-place.
277
+
278
+ Returns
279
+ -------
280
+ y: cupy.ndarray
281
+ The transformed array.
282
+
283
+ """
284
+ if axis < -x.ndim or axis >= x.ndim:
285
+ raise AxisError('axis out of range')
286
+ if axis < 0:
287
+ axis += x.ndim
288
+ if n is not None and n < 1:
289
+ raise ValueError(
290
+ f'invalid number of data points ({n}) specified'
291
+ )
292
+
293
+ x = _cook_shape(x, (n,), (axis,), 'R2R')
294
+ n = x.shape[axis]
295
+
296
+ # determine normalization factor
297
+ if norm == 'ortho':
298
+ sl0_scale = 0.5 * math.sqrt(2)
299
+ inorm = 'sqrt'
300
+ elif norm == 'forward':
301
+ sl0_scale = 0.5
302
+ inorm = 'full' if forward else 'none'
303
+ elif norm == 'backward' or norm is None:
304
+ sl0_scale = 0.5
305
+ inorm = 'none' if forward else 'full'
306
+ else:
307
+ raise ValueError(f'Invalid norm value "{norm}", should be "backward", '
308
+ '"ortho" or "forward"')
309
+ norm_factor = _get_dct_norm_factor(n, inorm=inorm, dct_type=3)
310
+ dtype = cupy.promote_types(x, cupy.complex64)
311
+
312
+ sl0 = [slice(None)] * x.ndim
313
+ sl0[axis] = slice(1)
314
+
315
+ if dst:
316
+ slrev = [slice(None)] * x.ndim
317
+ slrev[axis] = slice(None, None, -1)
318
+ x = x[tuple(slrev)]
319
+ if norm == 'ortho':
320
+ float_dtype = cupy.promote_types(x.dtype, cupy.float32)
321
+ if x.dtype != float_dtype:
322
+ x = x.astype(float_dtype)
323
+ elif not overwrite_x:
324
+ x = x.copy()
325
+ x[tuple(sl0)] *= math.sqrt(2)
326
+ sl0_scale = 0.5
327
+
328
+ # scale by exponentials and normalization factor
329
+ tmp = _exp_factor_dct3(x, n, axis, dtype, norm_factor)
330
+ x = x * tmp # broadcasting
331
+ x[tuple(sl0)] *= sl0_scale
332
+
333
+ # inverse fft
334
+ x = _fft.ifft(x, n=n, axis=axis, overwrite_x=True)
335
+ x = cupy.real(x)
336
+
337
+ # reorder entries
338
+ return _reshuffle_dct3(x, n, axis, dst)
339
+
340
+
341
+ @_fft._implements(_fft._scipy_fft.dct)
342
+ def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
343
+ """Return the Discrete Cosine Transform of an array, x.
344
+
345
+ Parameters
346
+ ----------
347
+ x : cupy.ndarray
348
+ The input array.
349
+ type : {1, 2, 3, 4}, optional
350
+ Type of the DCT (see Notes). Default type is 2. Currently CuPy only
351
+ supports types 2 and 3.
352
+ n : int, optional:
353
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
354
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
355
+ The default results in ``n = x.shape[axis]``.
356
+ axis : int, optional
357
+ Axis along which the dct is computed; the default is over the
358
+ last axis (i.e., ``axis=-1``).
359
+ norm : {"backward", "ortho", "forward"}, optional
360
+ Normalization mode (see Notes). Default is "backward".
361
+ overwrite_x : bool, optional
362
+ If True, the contents of `x` can be destroyed; the default is False.
363
+
364
+ Returns
365
+ -------
366
+ y : cupy.ndarray of real
367
+ The transformed input array.
368
+
369
+ See Also
370
+ --------
371
+ :func:`scipy.fft.dct`
372
+
373
+ Notes
374
+ -----
375
+ For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal
376
+ to MATLAB ``dct(x)``.
377
+
378
+ For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same
379
+ overall factor in both directions. By default, the transform is also
380
+ orthogonalized which for types 1, 2 and 3 means the transform definition is
381
+ modified to give orthogonality of the DCT matrix (see below).
382
+
383
+ For ``norm="backward"``, there is no scaling on `dct` and the `idct` is
384
+ scaled by ``1/N`` where ``N`` is the "logical" size of the DCT. For
385
+ ``norm="forward"`` the ``1/N`` normalization is applied to the forward
386
+ `dct` instead and the `idct` is unnormalized.
387
+
388
+ CuPy currently only supports DCT types 2 and 3. 'The' DCT generally
389
+ refers to DCT type 2, and 'the' Inverse DCT generally refers to DCT
390
+ type 3 [1]_. See the :func:`scipy.fft.dct` documentation for a full
391
+ description of each type.
392
+
393
+ References
394
+ ----------
395
+ .. [1] Wikipedia, "Discrete cosine transform",
396
+ https://en.wikipedia.org/wiki/Discrete_cosine_transform
397
+
398
+ """
399
+ if x.dtype.kind == 'c':
400
+ # separable application on real and imaginary parts
401
+ out = dct(x.real, type, n, axis, norm, overwrite_x)
402
+ out = out + 1j * dct(x.imag, type, n, axis, norm, overwrite_x)
403
+ return out
404
+
405
+ x = _promote_dtype(x)
406
+
407
+ if type == 2:
408
+ return _dct_or_dst_type2(
409
+ x, n=n, axis=axis, norm=norm, forward=True, dst=False
410
+ )
411
+ elif type == 3:
412
+ return _dct_or_dst_type3(
413
+ x, n=n, axis=axis, norm=norm, forward=True, dst=False
414
+ )
415
+ elif type in [1, 4]:
416
+ raise NotImplementedError(
417
+ 'Only DCT-II and DCT-III have been implemented.'
418
+ )
419
+ else:
420
+ raise ValueError('invalid DCT type')
421
+
422
+
423
+ @_fft._implements(_fft._scipy_fft.dst)
424
+ def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
425
+ """Return the Discrete Sine Transform of an array, x.
426
+
427
+ Parameters
428
+ ----------
429
+ x : cupy.ndarray
430
+ The input array.
431
+ type : {1, 2, 3, 4}, optional
432
+ Type of the DST (see Notes). Default type is 2.
433
+ n : int, optional
434
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
435
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
436
+ default results in ``n = x.shape[axis]``.
437
+ axis : int, optional
438
+ Axis along which the dst is computed; the default is over the
439
+ last axis (i.e., ``axis=-1``).
440
+ norm : {"backward", "ortho", "forward"}, optional
441
+ Normalization mode (see Notes). Default is "backward".
442
+ overwrite_x : bool, optional
443
+ If True, the contents of `x` can be destroyed; the default is False.
444
+
445
+ Returns
446
+ -------
447
+ dst : cupy.ndarray of real
448
+ The transformed input array.
449
+
450
+ See Also
451
+ --------
452
+ :func:`scipy.fft.dst`
453
+
454
+ Notes
455
+ -----
456
+
457
+ For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same
458
+ overall factor in both directions. By default, the transform is also
459
+ orthogonalized which for types 2 and 3 means the transform definition is
460
+ modified to give orthogonality of the DST matrix (see below).
461
+
462
+ For ``norm="backward"``, there is no scaling on the `dst` and the `idst` is
463
+ scaled by ``1/N`` where ``N`` is the "logical" size of the DST.
464
+
465
+ See the :func:`scipy.fft.dst` documentation for a full description of each
466
+ type. CuPy currently only supports DST types 2 and 3.
467
+ """
468
+ if x.dtype.kind == 'c':
469
+ # separable application on real and imaginary parts
470
+ out = dst(x.real, type, n, axis, norm, overwrite_x)
471
+ out = out + 1j * dst(x.imag, type, n, axis, norm, overwrite_x)
472
+ return out
473
+
474
+ x = _promote_dtype(x)
475
+
476
+ if type == 2:
477
+ return _dct_or_dst_type2(
478
+ x, n=n, axis=axis, norm=norm, forward=True, dst=True
479
+ )
480
+ elif type == 3:
481
+ return _dct_or_dst_type3(
482
+ x, n=n, axis=axis, norm=norm, forward=True, dst=True
483
+ )
484
+ elif type in [1, 4]:
485
+ raise NotImplementedError(
486
+ 'Only DST-II and DST-III have been implemented.'
487
+ )
488
+ else:
489
+ raise ValueError('invalid DST type')
490
+
491
+
492
+ @_fft._implements(_fft._scipy_fft.idct)
493
+ def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
494
+ """Return the Inverse Discrete Cosine Transform of an array, x.
495
+
496
+ Parameters
497
+ ----------
498
+ x : cupy.ndarray
499
+ The input array.
500
+ type : {1, 2, 3, 4}, optional
501
+ Type of the DCT (see Notes). Default type is 2.
502
+ n : int, optional
503
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
504
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
505
+ default results in ``n = x.shape[axis]``.
506
+ axis : int, optional
507
+ Axis along which the idct is computed; the default is over the
508
+ last axis (i.e., ``axis=-1``).
509
+ norm : {"backward", "ortho", "forward"}, optional
510
+ Normalization mode (see Notes). Default is "backward".
511
+ overwrite_x : bool, optional
512
+ If True, the contents of `x` can be destroyed; the default is False.
513
+
514
+ Returns
515
+ -------
516
+ idct : cupy.ndarray of real
517
+ The transformed input array.
518
+
519
+ See Also
520
+ --------
521
+ :func:`scipy.fft.idct`
522
+
523
+ Notes
524
+ -----
525
+ For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
526
+ MATLAB ``idct(x)``.
527
+
528
+ For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same
529
+ overall factor in both directions. By default, the transform is also
530
+ orthogonalized which for types 1, 2 and 3 means the transform definition is
531
+ modified to give orthogonality of the IDCT matrix (see `dct` for the full
532
+ definitions).
533
+
534
+ 'The' IDCT is the IDCT-II, which is the same as the normalized DCT-III
535
+ [1]_. See the :func:`scipy.fft.dct` documentation for a full description of
536
+ each type. CuPy currently only supports DCT types 2 and 3.
537
+
538
+ References
539
+ ----------
540
+ .. [1] Wikipedia, "Discrete sine transform",
541
+ https://en.wikipedia.org/wiki/Discrete_sine_transform
542
+ """
543
+ if x.dtype.kind == 'c':
544
+ # separable application on real and imaginary parts
545
+ out = idct(x.real, type, n, axis, norm, overwrite_x)
546
+ out = out + 1j * idct(x.imag, type, n, axis, norm, overwrite_x)
547
+ return out
548
+
549
+ x = _promote_dtype(x)
550
+
551
+ if type == 2:
552
+ # DCT-III is the inverse of DCT-II
553
+ return _dct_or_dst_type3(x, n=n, axis=axis, norm=norm, forward=False)
554
+ elif type == 3:
555
+ # DCT-II is the inverse of DCT-III
556
+ return _dct_or_dst_type2(x, n=n, axis=axis, norm=norm, forward=False)
557
+ elif type in [1, 4]:
558
+ raise NotImplementedError(
559
+ 'Only DCT-II and DCT-III have been implemented.'
560
+ )
561
+ else:
562
+ raise ValueError('invalid DCT type')
563
+
564
+
565
+ @_fft._implements(_fft._scipy_fft.idst)
566
+ def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
567
+ """Return the Inverse Discrete Sine Transform of an array, x.
568
+
569
+ Parameters
570
+ ----------
571
+ x : cupy.ndarray
572
+ The input array.
573
+ type : {1, 2, 3, 4}, optional
574
+ Type of the DST (see Notes). Default type is 2.
575
+ n : int, optional
576
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
577
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
578
+ default results in ``n = x.shape[axis]``.
579
+ axis : int, optional
580
+ Axis along which the idst is computed; the default is over the
581
+ last axis (i.e., ``axis=-1``).
582
+ norm : {"backward", "ortho", "forward"}, optional
583
+ Normalization mode (see Notes). Default is "backward".
584
+ overwrite_x : bool, optional
585
+ If True, the contents of `x` can be destroyed; the default is False.
586
+
587
+ Returns
588
+ -------
589
+ idst : cupy.ndarray of real
590
+ The transformed input array.
591
+
592
+ See Also
593
+ --------
594
+ :func:`scipy.fft.idst`
595
+
596
+ Notes
597
+ -----
598
+ For full details of the DST types and normalization modes, as well as
599
+ references, see :func:`scipy.fft.dst`.
600
+ """
601
+ if x.dtype.kind == 'c':
602
+ # separable application on real and imaginary parts
603
+ out = idst(x.real, type, n, axis, norm, overwrite_x)
604
+ out = out + 1j * idst(x.imag, type, n, axis, norm, overwrite_x)
605
+ return out
606
+
607
+ x = _promote_dtype(x)
608
+
609
+ if type == 2:
610
+ # DCT-III is the inverse of DCT-II
611
+ return _dct_or_dst_type3(
612
+ x, n=n, axis=axis, norm=norm, forward=False, dst=True
613
+ )
614
+ elif type == 3:
615
+ # DCT-II is the inverse of DCT-III
616
+ return _dct_or_dst_type2(
617
+ x, n=n, axis=axis, norm=norm, forward=False, dst=True
618
+ )
619
+ elif type in [1, 4]:
620
+ raise NotImplementedError(
621
+ 'Only DST-II and DST-III have been implemented.'
622
+ )
623
+ else:
624
+ raise ValueError('invalid DST type')
625
+
626
+
627
+ def _iterable_of_int(x, name=None):
628
+ """Convert ``x`` to an iterable sequence of int."""
629
+ if isinstance(x, numbers.Number):
630
+ x = (x,)
631
+
632
+ try:
633
+ x = [operator.index(a) for a in x]
634
+ except TypeError as e:
635
+ name = name or 'value'
636
+ raise ValueError(
637
+ f'{name} must be a scalar or iterable of integers'
638
+ ) from e
639
+
640
+ return x
641
+
642
+
643
+ def _init_nd_shape_and_axes(x, shape, axes):
644
+ """Handles shape and axes arguments for nd transforms."""
645
+ noshape = shape is None
646
+ noaxes = axes is None
647
+
648
+ if not noaxes:
649
+ axes = _iterable_of_int(axes, 'axes')
650
+ axes = [a + x.ndim if a < 0 else a for a in axes]
651
+
652
+ if any(a >= x.ndim or a < 0 for a in axes):
653
+ raise ValueError('axes exceeds dimensionality of input')
654
+ if len(set(axes)) != len(axes):
655
+ raise ValueError('all axes must be unique')
656
+
657
+ if not noshape:
658
+ shape = _iterable_of_int(shape, 'shape')
659
+ nshape = len(shape)
660
+ if axes and len(axes) != nshape:
661
+ raise ValueError(
662
+ 'when given, axes and shape arguments'
663
+ ' have to be of the same length'
664
+ )
665
+ if noaxes:
666
+ if nshape > x.ndim:
667
+ raise ValueError('shape requires more axes than are present')
668
+ axes = range(x.ndim - len(shape), x.ndim)
669
+
670
+ shape = [x.shape[a] if s == -1 else s for s, a in zip(shape, axes)]
671
+ elif noaxes:
672
+ shape = list(x.shape)
673
+ axes = range(x.ndim)
674
+ else:
675
+ shape = [x.shape[a] for a in axes]
676
+
677
+ if any(s < 1 for s in shape):
678
+ raise ValueError(
679
+ f'invalid number of data points ({shape}) specified'
680
+ )
681
+
682
+ return shape, axes
683
+
684
+
685
+ @_fft._implements(_fft._scipy_fft.dctn)
686
+ def dctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False):
687
+ """Compute a multidimensional Discrete Cosine Transform.
688
+
689
+ Parameters
690
+ ----------
691
+ x : cupy.ndarray
692
+ The input array.
693
+ type : {1, 2, 3, 4}, optional
694
+ Type of the DCT (see Notes). Default type is 2.
695
+ s : int or array_like of ints or None, optional
696
+ The shape of the result. If both `s` and `axes` (see below) are None,
697
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
698
+ ``numpy.take(x.shape, axes, axis=0)``.
699
+ If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
700
+ If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
701
+ ``s[i]``.
702
+ If any element of `s` is -1, the size of the corresponding dimension of
703
+ `x` is used.
704
+ axes : int or array_like of ints or None, optional
705
+ Axes over which the DCT is computed. If not given, the last ``len(s)``
706
+ axes are used, or all axes if `s` is also not specified.
707
+ norm : {"backward", "ortho", "forward"}, optional
708
+ Normalization mode (see Notes). Default is "backward".
709
+ overwrite_x : bool, optional
710
+ If True, the contents of `x` can be destroyed; the default is False.
711
+
712
+ Returns
713
+ -------
714
+ y : cupy.ndarray of real
715
+ The transformed input array.
716
+
717
+ See Also
718
+ --------
719
+ :func:`scipy.fft.dctn`
720
+
721
+ Notes
722
+ -----
723
+ For full details of the DCT types and normalization modes, as well as
724
+ references, see `dct`.
725
+ """
726
+ if x.dtype.kind == 'c':
727
+ # separable application on real and imaginary parts
728
+ out = dctn(x.real, type, s, axes, norm, overwrite_x)
729
+ out = out + 1j * dctn(x.imag, type, s, axes, norm, overwrite_x)
730
+ return out
731
+
732
+ shape, axes = _init_nd_shape_and_axes(x, s, axes)
733
+ x = _promote_dtype(x)
734
+
735
+ if len(axes) == 0:
736
+ return x
737
+
738
+ for n, axis in zip(shape, axes):
739
+ x = dct(
740
+ x, type=type, n=n, axis=axis, norm=norm, overwrite_x=overwrite_x
741
+ )
742
+ return x
743
+
744
+
745
+ @_fft._implements(_fft._scipy_fft.idctn)
746
+ def idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False):
747
+ """Compute a multidimensional Discrete Cosine Transform.
748
+
749
+ Parameters
750
+ ----------
751
+ x : cupy.ndarray
752
+ The input array.
753
+ type : {1, 2, 3, 4}, optional
754
+ Type of the DCT (see Notes). Default type is 2.
755
+ s : int or array_like of ints or None, optional
756
+ The shape of the result. If both `s` and `axes` (see below) are None,
757
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
758
+ ``numpy.take(x.shape, axes, axis=0)``.
759
+ If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
760
+ If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
761
+ ``s[i]``.
762
+ If any element of `s` is -1, the size of the corresponding dimension of
763
+ `x` is used.
764
+ axes : int or array_like of ints or None, optional
765
+ Axes over which the IDCT is computed. If not given, the last ``len(s)``
766
+ axes are used, or all axes if `s` is also not specified.
767
+ norm : {"backward", "ortho", "forward"}, optional
768
+ Normalization mode (see Notes). Default is "backward".
769
+ overwrite_x : bool, optional
770
+ If True, the contents of `x` can be destroyed; the default is False.
771
+
772
+ Returns
773
+ -------
774
+ y : cupy.ndarray of real
775
+ The transformed input array.
776
+
777
+ See Also
778
+ --------
779
+ :func:`scipy.fft.idctn`
780
+
781
+ Notes
782
+ -----
783
+ For full details of the IDCT types and normalization modes, as well as
784
+ references, see :func:`scipy.fft.idct`.
785
+ """
786
+ if x.dtype.kind == 'c':
787
+ # separable application on real and imaginary parts
788
+ out = idctn(x.real, type, s, axes, norm, overwrite_x)
789
+ out = out + 1j * idctn(x.imag, type, s, axes, norm, overwrite_x)
790
+ return out
791
+
792
+ shape, axes = _init_nd_shape_and_axes(x, s, axes)
793
+ x = _promote_dtype(x)
794
+
795
+ if len(axes) == 0:
796
+ return x
797
+
798
+ for n, axis in zip(shape, axes):
799
+ x = idct(
800
+ x, type=type, n=n, axis=axis, norm=norm, overwrite_x=overwrite_x
801
+ )
802
+ return x
803
+
804
+
805
+ @_fft._implements(_fft._scipy_fft.dstn)
806
+ def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False):
807
+ """Compute a multidimensional Discrete Sine Transform.
808
+
809
+ Parameters
810
+ ----------
811
+ x : cupy.ndarray
812
+ The input array.
813
+ type : {1, 2, 3, 4}, optional
814
+ Type of the DST (see Notes). Default type is 2.
815
+ s : int or array_like of ints or None, optional
816
+ The shape of the result. If both `s` and `axes` (see below) are None,
817
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
818
+ ``numpy.take(x.shape, axes, axis=0)``.
819
+ If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
820
+ If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
821
+ ``s[i]``.
822
+ If any element of `s` is -1, the size of the corresponding dimension of
823
+ `x` is used.
824
+ axes : int or array_like of ints or None, optional
825
+ Axes over which the DST is computed. If not given, the last ``len(s)``
826
+ axes are used, or all axes if `s` is also not specified.
827
+ norm : {"backward", "ortho", "forward"}, optional
828
+ Normalization mode (see Notes). Default is "backward".
829
+ overwrite_x : bool, optional
830
+ If True, the contents of `x` can be destroyed; the default is False.
831
+
832
+ Returns
833
+ -------
834
+ y : cupy.ndarray of real
835
+ The transformed input array.
836
+
837
+ See Also
838
+ --------
839
+ :func:`scipy.fft.dstn`
840
+
841
+ Notes
842
+ -----
843
+ For full details of the DST types and normalization modes, as well as
844
+ references, see :func:`scipy.fft.dst`.
845
+ """
846
+ if x.dtype.kind == 'c':
847
+ # separable application on real and imaginary parts
848
+ out = dstn(x.real, type, s, axes, norm, overwrite_x)
849
+ out = out + 1j * dstn(x.imag, type, s, axes, norm, overwrite_x)
850
+ return out
851
+
852
+ shape, axes = _init_nd_shape_and_axes(x, s, axes)
853
+ x = _promote_dtype(x)
854
+
855
+ if len(axes) == 0:
856
+ return x
857
+
858
+ for n, axis in zip(shape, axes):
859
+ x = dst(
860
+ x, type=type, n=n, axis=axis, norm=norm, overwrite_x=overwrite_x
861
+ )
862
+ return x
863
+
864
+
865
+ @_fft._implements(_fft._scipy_fft.idstn)
866
+ def idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False):
867
+ """Compute a multidimensional Discrete Sine Transform.
868
+
869
+ Parameters
870
+ ----------
871
+ x : cupy.ndarray
872
+ The input array.
873
+ type : {1, 2, 3, 4}, optional
874
+ Type of the DST (see Notes). Default type is 2.
875
+ s : int or array_like of ints or None, optional
876
+ The shape of the result. If both `s` and `axes` (see below) are None,
877
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
878
+ ``numpy.take(x.shape, axes, axis=0)``.
879
+ If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
880
+ If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
881
+ ``s[i]``.
882
+ If any element of `s` is -1, the size of the corresponding dimension of
883
+ `x` is used.
884
+ axes : int or array_like of ints or None, optional
885
+ Axes over which the IDST is computed. If not given, the last ``len(s)``
886
+ axes are used, or all axes if `s` is also not specified.
887
+ norm : {"backward", "ortho", "forward"}, optional
888
+ Normalization mode (see Notes). Default is "backward".
889
+ overwrite_x : bool, optional
890
+ If True, the contents of `x` can be destroyed; the default is False.
891
+
892
+ Returns
893
+ -------
894
+ y : cupy.ndarray of real
895
+ The transformed input array.
896
+
897
+ See Also
898
+ --------
899
+ :func:`scipy.fft.idstn`
900
+
901
+ Notes
902
+ -----
903
+ For full details of the IDST types and normalization modes, as well as
904
+ references, see :func:`scipy.fft.idst`.
905
+ """
906
+ if x.dtype.kind == 'c':
907
+ # separable application on real and imaginary parts
908
+ out = idstn(x.real, type, s, axes, norm, overwrite_x)
909
+ out = out + 1j * idstn(x.imag, type, s, axes, norm, overwrite_x)
910
+ return out
911
+
912
+ shape, axes = _init_nd_shape_and_axes(x, s, axes)
913
+ x = _promote_dtype(x)
914
+
915
+ if len(axes) == 0:
916
+ return x
917
+
918
+ for n, axis in zip(shape, axes):
919
+ x = idst(
920
+ x, type=type, n=n, axis=axis, norm=norm, overwrite_x=overwrite_x
921
+ )
922
+ return x
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: NOQA
2
+ from cupyx.scipy.linalg._special_matrices import (
3
+ tri, tril, triu, toeplitz, circulant, hankel,
4
+ hadamard, leslie, kron, block_diag, companion,
5
+ helmert, hilbert, dft,
6
+ fiedler, fiedler_companion, convolution_matrix
7
+ )
8
+ from cupyx.scipy.linalg._solve_triangular import solve_triangular # NOQA
9
+ from cupyx.scipy.linalg._decomp_lu import lu, lu_factor, lu_solve # NOQA
10
+
11
+ # uarray backend support (NEP 31)
12
+ # The uarray feature for scipy.linalg is experimental.
13
+ # The interface can change in the future.
14
+ from cupyx.scipy.linalg._uarray import __ua_convert__ # NOQA
15
+ from cupyx.scipy.linalg._uarray import __ua_domain__ # NOQA
16
+ from cupyx.scipy.linalg._uarray import __ua_function__ # NOQA
17
+
18
+ from cupyx.scipy.linalg._array_utils import bandwidth # NOQA
19
+ from cupyx.scipy.linalg._matfuncs import khatri_rao # NOQA
20
+
21
+ from cupyx.scipy.linalg._matfuncs import expm # NOQA
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_array_utils.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc ADDED
Binary file (3.01 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_solve_triangular.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_special_matrices.cpython-310.pyc ADDED
Binary file (18.9 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/__pycache__/_uarray.cpython-310.pyc ADDED
Binary file (2.03 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/linalg/_array_utils.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cupy
2
+ from cupy.linalg import _util
3
+
4
+ # Find the "bandwise position" of a nonzero cell
5
+ _kernel_cupy_band_pos_c = cupy.ElementwiseKernel(
6
+ 'T A, N r, N c',
7
+ 'N out',
8
+ 'out = A != 0 ? r - c : 0',
9
+ 'cupyx_scipy_linalg_band_pos'
10
+ )
11
+
12
+
13
+ def bandwidth(a):
14
+ """Return the lower and upper bandwidth of a 2D numeric array.
15
+ Parameters
16
+ ----------
17
+ a : ndarray
18
+ Input array of size (M, N)
19
+ Returns
20
+ -------
21
+ lu : tuple
22
+ 2-tuple of ints indicating the lower and upper bandwidth. A zero
23
+ denotes no sub- or super-diagonal on that side (triangular), and,
24
+ say for M rows (M-1) means that side is full. Same example applies
25
+ to the upper triangular part with (N-1).
26
+
27
+ .. seealso:: :func:`scipy.linalg.bandwidth`
28
+ """
29
+
30
+ a = cupy.asarray(a)
31
+
32
+ if a.size == 0:
33
+ return (0, 0)
34
+ _util._assert_2d(a)
35
+
36
+ # Create new matrix A which is C contiguous
37
+ if a.flags['F_CONTIGUOUS']:
38
+ A = a.T
39
+ else:
40
+ A = a
41
+
42
+ # row_num and col_num contain info on the row and column number of A
43
+ m, n = A.shape
44
+ row_num, col_num = cupy.mgrid[0:m, 0:n]
45
+ bandpts = _kernel_cupy_band_pos_c(A, row_num, col_num)
46
+
47
+ # If F contiguous, transpose
48
+ if a.flags['F_CONTIGUOUS']:
49
+ upper_band = int(cupy.amax(bandpts))
50
+ lower_band = -int(cupy.amin(bandpts))
51
+ else:
52
+ lower_band = int(cupy.amax(bandpts))
53
+ upper_band = -int(cupy.amin(bandpts))
54
+
55
+ return lower_band, upper_band