ZTWHHH commited on
Commit
5259b0c
·
verified ·
1 Parent(s): 9508b36

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. parrot/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so +3 -0
  3. parrot/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz +3 -0
  4. parrot/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_15_data.npz +3 -0
  5. parrot/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so +3 -0
  6. parrot/lib/python3.10/site-packages/scipy/special/_precompute/cosine_cdf.py +17 -0
  7. parrot/lib/python3.10/site-packages/scipy/special/_precompute/expn_asy.py +54 -0
  8. parrot/lib/python3.10/site-packages/scipy/special/_precompute/gammainc_asy.py +116 -0
  9. parrot/lib/python3.10/site-packages/scipy/special/_precompute/hyp2f1_data.py +484 -0
  10. parrot/lib/python3.10/site-packages/scipy/special/_precompute/loggamma.py +43 -0
  11. parrot/lib/python3.10/site-packages/scipy/special/_precompute/utils.py +38 -0
  12. parrot/lib/python3.10/site-packages/scipy/special/_precompute/wrightomega.py +41 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from_and_resize_compositeexplicitautograd_dispatch.h +24 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h +39 -0
  15. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs.h +44 -0
  16. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcdiv_cpu_dispatch.h +28 -0
  17. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_ops.h +50 -0
  18. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_cpu_dispatch.h +24 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_cpu_dispatch.h +23 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_native.h +21 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_native.h +21 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_ops.h +39 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_scale_native.h +22 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cuda_dispatch.h +24 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_cuda_dispatch.h +23 -0
  26. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h +23 -0
  27. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_multi_head_attention_cuda_dispatch.h +23 -0
  28. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_ops.h +50 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_backward_ops.h +28 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_meta_dispatch.h +25 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_compositeexplicitautograd_dispatch.h +29 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  34. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_native.h +28 -0
  35. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward_native.h +21 -0
  36. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_cpu_dispatch.h +26 -0
  37. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/equal_native.h +23 -0
  38. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_cuda_dispatch.h +23 -0
  39. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight.h +30 -0
  40. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout_compositeimplicitautograd_dispatch.h +24 -0
  41. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h +28 -0
  42. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h +24 -0
  43. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_ops.h +39 -0
  44. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_ops.h +28 -0
  45. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h +24 -0
  46. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_compositeimplicitautograd_dispatch.h +25 -0
  47. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h +23 -0
  48. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h +39 -0
  49. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linear_ops.h +39 -0
  50. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward.h +47 -0
.gitattributes CHANGED
@@ -1619,3 +1619,6 @@ vllm/lib/python3.10/site-packages/pycountry/locales/sv/LC_MESSAGES/iso639-3.mo f
1619
  vllm/lib/python3.10/site-packages/pycountry/locales/sv/LC_MESSAGES/iso3166-2.mo filter=lfs diff=lfs merge=lfs -text
1620
  vllm/lib/python3.10/site-packages/pycountry/locales/or/LC_MESSAGES/iso639-3.mo filter=lfs diff=lfs merge=lfs -text
1621
  vllm/lib/python3.10/site-packages/pycountry/locales/uk/LC_MESSAGES/iso639-3.mo filter=lfs diff=lfs merge=lfs -text
 
 
 
 
1619
  vllm/lib/python3.10/site-packages/pycountry/locales/sv/LC_MESSAGES/iso3166-2.mo filter=lfs diff=lfs merge=lfs -text
1620
  vllm/lib/python3.10/site-packages/pycountry/locales/or/LC_MESSAGES/iso639-3.mo filter=lfs diff=lfs merge=lfs -text
1621
  vllm/lib/python3.10/site-packages/pycountry/locales/uk/LC_MESSAGES/iso639-3.mo filter=lfs diff=lfs merge=lfs -text
1622
+ vllm/lib/python3.10/site-packages/pycountry/locales/fr/LC_MESSAGES/iso639-3.mo filter=lfs diff=lfs merge=lfs -text
1623
+ parrot/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1624
+ parrot/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcb5dd31e958898d23384218dccbe1a7616a621750707f11a0a2600992e19cf6
3
+ size 110000
parrot/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03ce8155a6cba0c1bf0a2441a10c228191f916dec36cb820723429811296bba8
3
+ size 3138
parrot/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_15_data.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13f3e1491a876bbf59d7ea10ad29c1f9b5996a2ab99216f31d5bfcd659012c1e
3
+ size 34462
parrot/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc50ceaffe3c8fcdb3066310456a4c77655f809fa27203dedd51fc012e2282a5
3
+ size 113256
parrot/lib/python3.10/site-packages/scipy/special/_precompute/cosine_cdf.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mpmath
2
+
3
+
4
+ def f(x):
5
+ return (mpmath.pi + x + mpmath.sin(x)) / (2*mpmath.pi)
6
+
7
+
8
+ # Note: 40 digits might be overkill; a few more digits than the default
9
+ # might be sufficient.
10
+ mpmath.mp.dps = 40
11
+ ts = mpmath.taylor(f, -mpmath.pi, 20)
12
+ p, q = mpmath.pade(ts, 9, 10)
13
+
14
+ p = [float(c) for c in p]
15
+ q = [float(c) for c in q]
16
+ print('p =', p)
17
+ print('q =', q)
parrot/lib/python3.10/site-packages/scipy/special/_precompute/expn_asy.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Precompute the polynomials for the asymptotic expansion of the
2
+ generalized exponential integral.
3
+
4
+ Sources
5
+ -------
6
+ [1] NIST, Digital Library of Mathematical Functions,
7
+ https://dlmf.nist.gov/8.20#ii
8
+
9
+ """
10
+ import os
11
+
12
+ try:
13
+ import sympy
14
+ from sympy import Poly
15
+ x = sympy.symbols('x')
16
+ except ImportError:
17
+ pass
18
+
19
+
20
+ def generate_A(K):
21
+ A = [Poly(1, x)]
22
+ for k in range(K):
23
+ A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
24
+ return A
25
+
26
+
27
+ WARNING = """\
28
+ /* This file was automatically generated by _precompute/expn_asy.py.
29
+ * Do not edit it manually!
30
+ */
31
+ """
32
+
33
+
34
+ def main():
35
+ print(__doc__)
36
+ fn = os.path.join('..', 'cephes', 'expn.h')
37
+
38
+ K = 12
39
+ A = generate_A(K)
40
+ with open(fn + '.new', 'w') as f:
41
+ f.write(WARNING)
42
+ f.write(f"#define nA {len(A)}\n")
43
+ for k, Ak in enumerate(A):
44
+ ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
45
+ f.write(f"static const double A{k}[] = {{tmp}};\n")
46
+ ", ".join([f"A{k}" for k in range(K + 1)])
47
+ f.write("static const double *A[] = {{tmp}};\n")
48
+ ", ".join([str(Ak.degree()) for Ak in A])
49
+ f.write("static const int Adegs[] = {{tmp}};\n")
50
+ os.rename(fn + '.new', fn)
51
+
52
+
53
+ if __name__ == "__main__":
54
+ main()
parrot/lib/python3.10/site-packages/scipy/special/_precompute/gammainc_asy.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Precompute coefficients of Temme's asymptotic expansion for gammainc.
3
+
4
+ This takes about 8 hours to run on a 2.3 GHz Macbook Pro with 4GB ram.
5
+
6
+ Sources:
7
+ [1] NIST, "Digital Library of Mathematical Functions",
8
+ https://dlmf.nist.gov/
9
+
10
+ """
11
+ import os
12
+ from scipy.special._precompute.utils import lagrange_inversion
13
+
14
+ try:
15
+ import mpmath as mp
16
+ except ImportError:
17
+ pass
18
+
19
+
20
+ def compute_a(n):
21
+ """a_k from DLMF 5.11.6"""
22
+ a = [mp.sqrt(2)/2]
23
+ for k in range(1, n):
24
+ ak = a[-1]/k
25
+ for j in range(1, len(a)):
26
+ ak -= a[j]*a[-j]/(j + 1)
27
+ ak /= a[0]*(1 + mp.mpf(1)/(k + 1))
28
+ a.append(ak)
29
+ return a
30
+
31
+
32
+ def compute_g(n):
33
+ """g_k from DLMF 5.11.3/5.11.5"""
34
+ a = compute_a(2*n)
35
+ g = [mp.sqrt(2)*mp.rf(0.5, k)*a[2*k] for k in range(n)]
36
+ return g
37
+
38
+
39
+ def eta(lam):
40
+ """Function from DLMF 8.12.1 shifted to be centered at 0."""
41
+ if lam > 0:
42
+ return mp.sqrt(2*(lam - mp.log(lam + 1)))
43
+ elif lam < 0:
44
+ return -mp.sqrt(2*(lam - mp.log(lam + 1)))
45
+ else:
46
+ return 0
47
+
48
+
49
+ def compute_alpha(n):
50
+ """alpha_n from DLMF 8.12.13"""
51
+ coeffs = mp.taylor(eta, 0, n - 1)
52
+ return lagrange_inversion(coeffs)
53
+
54
+
55
+ def compute_d(K, N):
56
+ """d_{k, n} from DLMF 8.12.12"""
57
+ M = N + 2*K
58
+ d0 = [-mp.mpf(1)/3]
59
+ alpha = compute_alpha(M + 2)
60
+ for n in range(1, M):
61
+ d0.append((n + 2)*alpha[n+2])
62
+ d = [d0]
63
+ g = compute_g(K)
64
+ for k in range(1, K):
65
+ dk = []
66
+ for n in range(M - 2*k):
67
+ dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2])
68
+ d.append(dk)
69
+ for k in range(K):
70
+ d[k] = d[k][:N]
71
+ return d
72
+
73
+
74
+ header = \
75
+ r"""/* This file was automatically generated by _precomp/gammainc.py.
76
+ * Do not edit it manually!
77
+ */
78
+
79
+ #ifndef IGAM_H
80
+ #define IGAM_H
81
+
82
+ #define K {}
83
+ #define N {}
84
+
85
+ static const double d[K][N] =
86
+ {{"""
87
+
88
+ footer = \
89
+ r"""
90
+ #endif
91
+ """
92
+
93
+
94
+ def main():
95
+ print(__doc__)
96
+ K = 25
97
+ N = 25
98
+ with mp.workdps(50):
99
+ d = compute_d(K, N)
100
+ fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h')
101
+ with open(fn + '.new', 'w') as f:
102
+ f.write(header.format(K, N))
103
+ for k, row in enumerate(d):
104
+ row = [mp.nstr(x, 17, min_fixed=0, max_fixed=0) for x in row]
105
+ f.write('{')
106
+ f.write(", ".join(row))
107
+ if k < K - 1:
108
+ f.write('},\n')
109
+ else:
110
+ f.write('}};\n')
111
+ f.write(footer)
112
+ os.rename(fn + '.new', fn)
113
+
114
+
115
+ if __name__ == "__main__":
116
+ main()
parrot/lib/python3.10/site-packages/scipy/special/_precompute/hyp2f1_data.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This script evaluates scipy's implementation of hyp2f1 against mpmath's.
2
+
3
+ Author: Albert Steppi
4
+
5
+ This script is long running and generates a large output file. With default
6
+ arguments, the generated file is roughly 700MB in size and it takes around
7
+ 40 minutes using an Intel(R) Core(TM) i5-8250U CPU with n_jobs set to 8
8
+ (full utilization). There are optional arguments which can be used to restrict
9
+ (or enlarge) the computations performed. These are described below.
10
+ The output of this script can be analyzed to identify suitable test cases and
11
+ to find parameter and argument regions where hyp2f1 needs to be improved.
12
+
13
+ The script has one mandatory positional argument for specifying the path to
14
+ the location where the output file is to be placed, and 4 optional arguments
15
+ --n_jobs, --grid_size, --regions, and --parameter_groups. --n_jobs specifies
16
+ the number of processes to use if running in parallel. The default value is 1.
17
+ The other optional arguments are explained below.
18
+
19
+ Produces a tab separated values file with 11 columns. The first four columns
20
+ contain the parameters a, b, c and the argument z. The next two contain |z| and
21
+ a region code for which region of the complex plane belongs to. The regions are
22
+
23
+ 0) z == 1
24
+ 1) |z| < 0.9 and real(z) >= 0
25
+ 2) |z| <= 1 and real(z) < 0
26
+ 3) 0.9 <= |z| <= 1 and |1 - z| < 0.9:
27
+ 4) 0.9 <= |z| <= 1 and |1 - z| >= 0.9 and real(z) >= 0:
28
+ 5) 1 < |z| < 1.1 and |1 - z| >= 0.9 and real(z) >= 0
29
+ 6) |z| > 1 and not in 5)
30
+
31
+ The --regions optional argument allows the user to specify a list of regions
32
+ to which computation will be restricted.
33
+
34
+ Parameters a, b, c are taken from a 10 * 10 * 10 grid with values at
35
+
36
+ -16, -8, -4, -2, -1, 1, 2, 4, 8, 16
37
+
38
+ with random perturbations applied.
39
+
40
+ There are 9 parameter groups handling the following cases.
41
+
42
+ 1) A, B, C, B - A, C - A, C - B, C - A - B all non-integral.
43
+ 2) B - A integral
44
+ 3) C - A integral
45
+ 4) C - B integral
46
+ 5) C - A - B integral
47
+ 6) A integral
48
+ 7) B integral
49
+ 8) C integral
50
+ 9) Wider range with c - a - b > 0.
51
+
52
+ The seventh column of the output file is an integer between 1 and 8 specifying
53
+ the parameter group as above.
54
+
55
+ The --parameter_groups optional argument allows the user to specify a list of
56
+ parameter groups to which computation will be restricted.
57
+
58
+ The argument z is taken from a grid in the box
59
+ -box_size <= real(z) <= box_size, -box_size <= imag(z) <= box_size.
60
+ with grid size specified using the optional command line argument --grid_size,
61
+ and box_size specified with the command line argument --box_size.
62
+ The default value of grid_size is 20 and the default value of box_size is 2.0,
63
+ yielding a 20 * 20 grid in the box with corners -2-2j, -2+2j, 2-2j, 2+2j.
64
+
65
+ The final four columns have the expected value of hyp2f1 for the given
66
+ parameters and argument as calculated with mpmath, the observed value
67
+ calculated with scipy's hyp2f1, the relative error, and the absolute error.
68
+
69
+ As special cases of hyp2f1 are moved from the original Fortran implementation
70
+ into Cython, this script can be used to ensure that no regressions occur and
71
+ to point out where improvements are needed.
72
+ """
73
+
74
+
75
+ import os
76
+ import csv
77
+ import argparse
78
+ import numpy as np
79
+ from itertools import product
80
+ from multiprocessing import Pool
81
+
82
+
83
+ from scipy.special import hyp2f1
84
+ from scipy.special.tests.test_hyp2f1 import mp_hyp2f1
85
+
86
+
87
+ def get_region(z):
88
+ """Assign numbers for regions where hyp2f1 must be handled differently."""
89
+ if z == 1 + 0j:
90
+ return 0
91
+ elif abs(z) < 0.9 and z.real >= 0:
92
+ return 1
93
+ elif abs(z) <= 1 and z.real < 0:
94
+ return 2
95
+ elif 0.9 <= abs(z) <= 1 and abs(1 - z) < 0.9:
96
+ return 3
97
+ elif 0.9 <= abs(z) <= 1 and abs(1 - z) >= 0.9:
98
+ return 4
99
+ elif 1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and z.real >= 0:
100
+ return 5
101
+ else:
102
+ return 6
103
+
104
+
105
+ def get_result(a, b, c, z, group):
106
+ """Get results for given parameter and value combination."""
107
+ expected, observed = mp_hyp2f1(a, b, c, z), hyp2f1(a, b, c, z)
108
+ if (
109
+ np.isnan(observed) and np.isnan(expected) or
110
+ expected == observed
111
+ ):
112
+ relative_error = 0.0
113
+ absolute_error = 0.0
114
+ elif np.isnan(observed):
115
+ # Set error to infinity if result is nan when not expected to be.
116
+ # Makes results easier to interpret.
117
+ relative_error = float("inf")
118
+ absolute_error = float("inf")
119
+ else:
120
+ absolute_error = abs(expected - observed)
121
+ relative_error = absolute_error / abs(expected)
122
+
123
+ return (
124
+ a,
125
+ b,
126
+ c,
127
+ z,
128
+ abs(z),
129
+ get_region(z),
130
+ group,
131
+ expected,
132
+ observed,
133
+ relative_error,
134
+ absolute_error,
135
+ )
136
+
137
+
138
+ def get_result_no_mp(a, b, c, z, group):
139
+ """Get results for given parameter and value combination."""
140
+ expected, observed = complex('nan'), hyp2f1(a, b, c, z)
141
+ relative_error, absolute_error = float('nan'), float('nan')
142
+ return (
143
+ a,
144
+ b,
145
+ c,
146
+ z,
147
+ abs(z),
148
+ get_region(z),
149
+ group,
150
+ expected,
151
+ observed,
152
+ relative_error,
153
+ absolute_error,
154
+ )
155
+
156
+
157
+ def get_results(params, Z, n_jobs=1, compute_mp=True):
158
+ """Batch compute results for multiple parameter and argument values.
159
+
160
+ Parameters
161
+ ----------
162
+ params : iterable
163
+ iterable of tuples of floats (a, b, c) specifying parameter values
164
+ a, b, c for hyp2f1
165
+ Z : iterable of complex
166
+ Arguments at which to evaluate hyp2f1
167
+ n_jobs : Optional[int]
168
+ Number of jobs for parallel execution.
169
+
170
+ Returns
171
+ -------
172
+ list
173
+ List of tuples of results values. See return value in source code
174
+ of `get_result`.
175
+ """
176
+ input_ = (
177
+ (a, b, c, z, group) for (a, b, c, group), z in product(params, Z)
178
+ )
179
+
180
+ with Pool(n_jobs) as pool:
181
+ rows = pool.starmap(
182
+ get_result if compute_mp else get_result_no_mp,
183
+ input_
184
+ )
185
+ return rows
186
+
187
+
188
+ def _make_hyp2f1_test_case(a, b, c, z, rtol):
189
+ """Generate string for single test case as used in test_hyp2f1.py."""
190
+ expected = mp_hyp2f1(a, b, c, z)
191
+ return (
192
+ " pytest.param(\n"
193
+ " Hyp2f1TestCase(\n"
194
+ f" a={a},\n"
195
+ f" b={b},\n"
196
+ f" c={c},\n"
197
+ f" z={z},\n"
198
+ f" expected={expected},\n"
199
+ f" rtol={rtol},\n"
200
+ " ),\n"
201
+ " ),"
202
+ )
203
+
204
+
205
+ def make_hyp2f1_test_cases(rows):
206
+ """Generate string for a list of test cases for test_hyp2f1.py.
207
+
208
+ Parameters
209
+ ----------
210
+ rows : list
211
+ List of lists of the form [a, b, c, z, rtol] where a, b, c, z are
212
+ parameters and the argument for hyp2f1 and rtol is an expected
213
+ relative error for the associated test case.
214
+
215
+ Returns
216
+ -------
217
+ str
218
+ String for a list of test cases. The output string can be printed
219
+ or saved to a file and then copied into an argument for
220
+ `pytest.mark.parameterize` within `scipy.special.tests.test_hyp2f1.py`.
221
+ """
222
+ result = "[\n"
223
+ result += '\n'.join(
224
+ _make_hyp2f1_test_case(a, b, c, z, rtol)
225
+ for a, b, c, z, rtol in rows
226
+ )
227
+ result += "\n]"
228
+ return result
229
+
230
+
231
+ def main(
232
+ outpath,
233
+ n_jobs=1,
234
+ box_size=2.0,
235
+ grid_size=20,
236
+ regions=None,
237
+ parameter_groups=None,
238
+ compute_mp=True,
239
+ ):
240
+ outpath = os.path.realpath(os.path.expanduser(outpath))
241
+
242
+ random_state = np.random.RandomState(1234)
243
+ # Parameters a, b, c selected near these values.
244
+ root_params = np.array(
245
+ [-16, -8, -4, -2, -1, 1, 2, 4, 8, 16]
246
+ )
247
+ # Perturbations to apply to root values.
248
+ perturbations = 0.1 * random_state.random_sample(
249
+ size=(3, len(root_params))
250
+ )
251
+
252
+ params = []
253
+ # Parameter group 1
254
+ # -----------------
255
+ # No integer differences. This has been confirmed for the above seed.
256
+ A = root_params + perturbations[0, :]
257
+ B = root_params + perturbations[1, :]
258
+ C = root_params + perturbations[2, :]
259
+ params.extend(
260
+ sorted(
261
+ ((a, b, c, 1) for a, b, c in product(A, B, C)),
262
+ key=lambda x: max(abs(x[0]), abs(x[1])),
263
+ )
264
+ )
265
+
266
+ # Parameter group 2
267
+ # -----------------
268
+ # B - A an integer
269
+ A = root_params + 0.5
270
+ B = root_params + 0.5
271
+ C = root_params + perturbations[1, :]
272
+ params.extend(
273
+ sorted(
274
+ ((a, b, c, 2) for a, b, c in product(A, B, C)),
275
+ key=lambda x: max(abs(x[0]), abs(x[1])),
276
+ )
277
+ )
278
+
279
+ # Parameter group 3
280
+ # -----------------
281
+ # C - A an integer
282
+ A = root_params + 0.5
283
+ B = root_params + perturbations[1, :]
284
+ C = root_params + 0.5
285
+ params.extend(
286
+ sorted(
287
+ ((a, b, c, 3) for a, b, c in product(A, B, C)),
288
+ key=lambda x: max(abs(x[0]), abs(x[1])),
289
+ )
290
+ )
291
+
292
+ # Parameter group 4
293
+ # -----------------
294
+ # C - B an integer
295
+ A = root_params + perturbations[0, :]
296
+ B = root_params + 0.5
297
+ C = root_params + 0.5
298
+ params.extend(
299
+ sorted(
300
+ ((a, b, c, 4) for a, b, c in product(A, B, C)),
301
+ key=lambda x: max(abs(x[0]), abs(x[1])),
302
+ )
303
+ )
304
+
305
+ # Parameter group 5
306
+ # -----------------
307
+ # C - A - B an integer
308
+ A = root_params + 0.25
309
+ B = root_params + 0.25
310
+ C = root_params + 0.5
311
+ params.extend(
312
+ sorted(
313
+ ((a, b, c, 5) for a, b, c in product(A, B, C)),
314
+ key=lambda x: max(abs(x[0]), abs(x[1])),
315
+ )
316
+ )
317
+
318
+ # Parameter group 6
319
+ # -----------------
320
+ # A an integer
321
+ A = root_params
322
+ B = root_params + perturbations[0, :]
323
+ C = root_params + perturbations[1, :]
324
+ params.extend(
325
+ sorted(
326
+ ((a, b, c, 6) for a, b, c in product(A, B, C)),
327
+ key=lambda x: max(abs(x[0]), abs(x[1])),
328
+ )
329
+ )
330
+
331
+ # Parameter group 7
332
+ # -----------------
333
+ # B an integer
334
+ A = root_params + perturbations[0, :]
335
+ B = root_params
336
+ C = root_params + perturbations[1, :]
337
+ params.extend(
338
+ sorted(
339
+ ((a, b, c, 7) for a, b, c in product(A, B, C)),
340
+ key=lambda x: max(abs(x[0]), abs(x[1])),
341
+ )
342
+ )
343
+
344
+ # Parameter group 8
345
+ # -----------------
346
+ # C an integer
347
+ A = root_params + perturbations[0, :]
348
+ B = root_params + perturbations[1, :]
349
+ C = root_params
350
+ params.extend(
351
+ sorted(
352
+ ((a, b, c, 8) for a, b, c in product(A, B, C)),
353
+ key=lambda x: max(abs(x[0]), abs(x[1])),
354
+ )
355
+ )
356
+
357
+ # Parameter group 9
358
+ # -----------------
359
+ # Wide range of magnitudes, c - a - b > 0.
360
+ phi = (1 + np.sqrt(5))/2
361
+ P = phi**np.arange(16)
362
+ P = np.hstack([-P, P])
363
+ group_9_params = sorted(
364
+ (
365
+ (a, b, c, 9) for a, b, c in product(P, P, P) if c - a - b > 0
366
+ ),
367
+ key=lambda x: max(abs(x[0]), abs(x[1])),
368
+ )
369
+
370
+ if parameter_groups is not None:
371
+ # Group 9 params only used if specified in arguments.
372
+ params.extend(group_9_params)
373
+ params = [
374
+ (a, b, c, group) for a, b, c, group in params
375
+ if group in parameter_groups
376
+ ]
377
+
378
+ # grid_size * grid_size grid in box with corners
379
+ # -2 - 2j, -2 + 2j, 2 - 2j, 2 + 2j
380
+ X, Y = np.meshgrid(
381
+ np.linspace(-box_size, box_size, grid_size),
382
+ np.linspace(-box_size, box_size, grid_size)
383
+ )
384
+ Z = X + Y * 1j
385
+ Z = Z.flatten().tolist()
386
+ # Add z = 1 + 0j (region 0).
387
+ Z.append(1 + 0j)
388
+ if regions is not None:
389
+ Z = [z for z in Z if get_region(z) in regions]
390
+
391
+ # Evaluate scipy and mpmath's hyp2f1 for all parameter combinations
392
+ # above against all arguments in the grid Z
393
+ rows = get_results(params, Z, n_jobs=n_jobs, compute_mp=compute_mp)
394
+
395
+ with open(outpath, "w", newline="") as f:
396
+ writer = csv.writer(f, delimiter="\t")
397
+ writer.writerow(
398
+ [
399
+ "a",
400
+ "b",
401
+ "c",
402
+ "z",
403
+ "|z|",
404
+ "region",
405
+ "parameter_group",
406
+ "expected", # mpmath's hyp2f1
407
+ "observed", # scipy's hyp2f1
408
+ "relative_error",
409
+ "absolute_error",
410
+ ]
411
+ )
412
+ for row in rows:
413
+ writer.writerow(row)
414
+
415
+
416
+ if __name__ == "__main__":
417
+ parser = argparse.ArgumentParser(
418
+ description="Test scipy's hyp2f1 against mpmath's on a grid in the"
419
+ " complex plane over a grid of parameter values. Saves output to file"
420
+ " specified in positional argument \"outpath\"."
421
+ " Caution: With default arguments, the generated output file is"
422
+ " roughly 700MB in size. Script may take several hours to finish if"
423
+ " \"--n_jobs\" is set to 1."
424
+ )
425
+ parser.add_argument(
426
+ "outpath", type=str, help="Path to output tsv file."
427
+ )
428
+ parser.add_argument(
429
+ "--n_jobs",
430
+ type=int,
431
+ default=1,
432
+ help="Number of jobs for multiprocessing.",
433
+ )
434
+ parser.add_argument(
435
+ "--box_size",
436
+ type=float,
437
+ default=2.0,
438
+ help="hyp2f1 is evaluated in box of side_length 2*box_size centered"
439
+ " at the origin."
440
+ )
441
+ parser.add_argument(
442
+ "--grid_size",
443
+ type=int,
444
+ default=20,
445
+ help="hyp2f1 is evaluated on grid_size * grid_size grid in box of side"
446
+ " length 2*box_size centered at the origin."
447
+ )
448
+ parser.add_argument(
449
+ "--parameter_groups",
450
+ type=int,
451
+ nargs='+',
452
+ default=None,
453
+ help="Restrict to supplied parameter groups. See the Docstring for"
454
+ " this module for more info on parameter groups. Calculate for all"
455
+ " parameter groups by default."
456
+ )
457
+ parser.add_argument(
458
+ "--regions",
459
+ type=int,
460
+ nargs='+',
461
+ default=None,
462
+ help="Restrict to argument z only within the supplied regions. See"
463
+ " the Docstring for this module for more info on regions. Calculate"
464
+ " for all regions by default."
465
+ )
466
+ parser.add_argument(
467
+ "--no_mp",
468
+ action='store_true',
469
+ help="If this flag is set, do not compute results with mpmath. Saves"
470
+ " time if results have already been computed elsewhere. Fills in"
471
+ " \"expected\" column with None values."
472
+ )
473
+ args = parser.parse_args()
474
+ compute_mp = not args.no_mp
475
+ print(args.parameter_groups)
476
+ main(
477
+ args.outpath,
478
+ n_jobs=args.n_jobs,
479
+ box_size=args.box_size,
480
+ grid_size=args.grid_size,
481
+ parameter_groups=args.parameter_groups,
482
+ regions=args.regions,
483
+ compute_mp=compute_mp,
484
+ )
parrot/lib/python3.10/site-packages/scipy/special/_precompute/loggamma.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Precompute series coefficients for log-Gamma."""
2
+
3
+ try:
4
+ import mpmath
5
+ except ImportError:
6
+ pass
7
+
8
+
9
+ def stirling_series(N):
10
+ with mpmath.workdps(100):
11
+ coeffs = [mpmath.bernoulli(2*n)/(2*n*(2*n - 1))
12
+ for n in range(1, N + 1)]
13
+ return coeffs
14
+
15
+
16
+ def taylor_series_at_1(N):
17
+ coeffs = []
18
+ with mpmath.workdps(100):
19
+ coeffs.append(-mpmath.euler)
20
+ for n in range(2, N + 1):
21
+ coeffs.append((-1)**n*mpmath.zeta(n)/n)
22
+ return coeffs
23
+
24
+
25
+ def main():
26
+ print(__doc__)
27
+ print()
28
+ stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
29
+ for x in stirling_series(8)[::-1]]
30
+ taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
31
+ for x in taylor_series_at_1(23)[::-1]]
32
+ print("Stirling series coefficients")
33
+ print("----------------------------")
34
+ print("\n".join(stirling_coeffs))
35
+ print()
36
+ print("Taylor series coefficients")
37
+ print("--------------------------")
38
+ print("\n".join(taylor_coeffs))
39
+ print()
40
+
41
+
42
+ if __name__ == '__main__':
43
+ main()
parrot/lib/python3.10/site-packages/scipy/special/_precompute/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ import mpmath as mp
3
+ except ImportError:
4
+ pass
5
+
6
+ try:
7
+ from sympy.abc import x
8
+ except ImportError:
9
+ pass
10
+
11
+
12
+ def lagrange_inversion(a):
13
+ """Given a series
14
+
15
+ f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),
16
+
17
+ use the Lagrange inversion formula to compute a series
18
+
19
+ g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)
20
+
21
+ so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so
22
+ necessarily b[0] = 0 too.
23
+
24
+ The algorithm is naive and could be improved, but speed isn't an
25
+ issue here and it's easy to read.
26
+
27
+ """
28
+ n = len(a)
29
+ f = sum(a[i]*x**i for i in range(n))
30
+ h = (x/f).series(x, 0, n).removeO()
31
+ hpower = [h**0]
32
+ for k in range(n):
33
+ hpower.append((hpower[-1]*h).expand())
34
+ b = [mp.mpf(0)]
35
+ for k in range(1, n):
36
+ b.append(hpower[k].coeff(x, k - 1)/k)
37
+ b = [mp.mpf(x) for x in b]
38
+ return b
parrot/lib/python3.10/site-packages/scipy/special/_precompute/wrightomega.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ try:
4
+ import mpmath
5
+ except ImportError:
6
+ pass
7
+
8
+
9
+ def mpmath_wrightomega(x):
10
+ return mpmath.lambertw(mpmath.exp(x), mpmath.mpf('-0.5'))
11
+
12
+
13
+ def wrightomega_series_error(x):
14
+ series = x
15
+ desired = mpmath_wrightomega(x)
16
+ return abs(series - desired) / desired
17
+
18
+
19
+ def wrightomega_exp_error(x):
20
+ exponential_approx = mpmath.exp(x)
21
+ desired = mpmath_wrightomega(x)
22
+ return abs(exponential_approx - desired) / desired
23
+
24
+
25
+ def main():
26
+ desired_error = 2 * np.finfo(float).eps
27
+ print('Series Error')
28
+ for x in [1e5, 1e10, 1e15, 1e20]:
29
+ with mpmath.workdps(100):
30
+ error = wrightomega_series_error(x)
31
+ print(x, error, error < desired_error)
32
+
33
+ print('Exp error')
34
+ for x in [-10, -25, -50, -100, -200, -400, -700, -740]:
35
+ with mpmath.workdps(100):
36
+ error = wrightomega_exp_error(x)
37
+ print(x, error, error < desired_error)
38
+
39
+
40
+ if __name__ == '__main__':
41
+ main()
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from_and_resize_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _copy_from_and_resize_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & dst);
21
+ TORCH_API at::Tensor & _copy_from_and_resize_outf(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _fake_quantize_per_tensor_affine_cachemask_tensor_qparams {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)")
24
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max);
25
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max);
26
+ };
27
+
28
+ struct TORCH_API _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out {
29
+ using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
35
+ static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1);
36
+ static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_foreach_abs_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_foreach_abs(Tensor[] self) -> Tensor[]
26
+ inline ::std::vector<at::Tensor> _foreach_abs(at::TensorList self) {
27
+ return at::_ops::_foreach_abs::call(self);
28
+ }
29
+
30
+ // aten::_foreach_abs_(Tensor(a!)[] self) -> ()
31
+ inline void _foreach_abs_(at::TensorList self) {
32
+ return at::_ops::_foreach_abs_::call(self);
33
+ }
34
+
35
+ // aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
36
+ inline void _foreach_abs_out(at::TensorList out, at::TensorList self) {
37
+ return at::_ops::_foreach_abs_out::call(self, out);
38
+ }
39
+ // aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
40
+ inline void _foreach_abs_outf(at::TensorList self, at::TensorList out) {
41
+ return at::_ops::_foreach_abs_out::call(self, out);
42
+ }
43
+
44
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcdiv_cpu_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API ::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1);
21
+ TORCH_API void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1);
22
+ TORCH_API ::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars);
23
+ TORCH_API void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars);
24
+ TORCH_API ::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars);
25
+ TORCH_API void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars);
26
+
27
+ } // namespace cpu
28
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _foreach_round {
18
+ using schema = ::std::vector<at::Tensor> (at::TensorList);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_round")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_round(Tensor[] self) -> Tensor[]")
24
+ static ::std::vector<at::Tensor> call(at::TensorList self);
25
+ static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
26
+ };
27
+
28
+ struct TORCH_API _foreach_round_ {
29
+ using schema = void (at::TensorList);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_round_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_round_(Tensor(a!)[] self) -> ()")
35
+ static void call(at::TensorList self);
36
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
37
+ };
38
+
39
+ struct TORCH_API _foreach_round_out {
40
+ using schema = void (at::TensorList, at::TensorList);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_round")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
46
+ static void call(at::TensorList self, at::TensorList out);
47
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
48
+ };
49
+
50
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_cpu_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API ::std::vector<at::Tensor> _foreach_sign(at::TensorList self);
21
+ TORCH_API void _foreach_sign_(at::TensorList self);
22
+
23
+ } // namespace cpu
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional<double> scale=c10::nullopt);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _indices_sparse(const at::Tensor & self);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _is_any_true(const at::Tensor & self);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _make_dual_copy {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_make_dual_copy")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level);
26
+ };
27
+
28
+ struct TORCH_API _make_dual_copy_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_make_dual_copy")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_scale_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _masked_scale_out(const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out);
20
+ TORCH_API at::Tensor masked_scale_cuda(const at::Tensor & self, const at::Tensor & mask, double scale);
21
+ } // namespace native
22
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cuda_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride);
21
+ TORCH_API at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride);
22
+
23
+ } // namespace cuda
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor _standard_gamma_grad(const at::Tensor & self, const at::Tensor & output);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_multi_head_attention_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor _triton_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={});
21
+
22
+ } // namespace cuda
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _upsample_bilinear2d_aa_vec {
18
+ using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, bool, c10::optional<at::ArrayRef<double>>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_bilinear2d_aa")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors);
26
+ };
27
+
28
+ struct TORCH_API _upsample_bilinear2d_aa_out {
29
+ using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, c10::optional<double>, c10::optional<double>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_bilinear2d_aa")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out);
37
+ };
38
+
39
+ struct TORCH_API _upsample_bilinear2d_aa {
40
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool, c10::optional<double>, c10::optional<double>);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_bilinear2d_aa")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor")
46
+ static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w);
47
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w);
48
+ };
49
+
50
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1);
21
+ TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_backward_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API affine_grid_generator_backward {
18
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::affine_grid_generator_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_meta_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional<int64_t> divisor_override=c10::nullopt);
21
+ TORCH_API at::Tensor & avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional<int64_t> divisor_override=c10::nullopt);
22
+ TORCH_API at::Tensor & avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out);
23
+
24
+ } // namespace meta
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor bitwise_xor(const at::Tensor & self, const at::Scalar & other);
21
+ TORCH_API at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other);
22
+ TORCH_API at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
23
+ TORCH_API at::Tensor & bitwise_xor_(at::Tensor & self, const at::Scalar & other);
24
+ TORCH_API at::Tensor bitwise_xor(const at::Scalar & self, const at::Tensor & other);
25
+ TORCH_API at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other);
26
+ TORCH_API at::Tensor & bitwise_xor_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out);
27
+
28
+ } // namespace compositeexplicitautograd
29
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API at::Tensor & bitwise_xor_(at::Tensor & self, const at::Tensor & other);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_native.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/bitwise_xor_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_bitwise_xor_out : public at::meta::structured_bitwise_xor_Tensor {
20
+ void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out);
21
+ };
22
+ TORCH_API at::Tensor bitwise_xor(const at::Tensor & self, const at::Scalar & other);
23
+ TORCH_API at::Tensor & bitwise_xor_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
24
+ TORCH_API at::Tensor & bitwise_xor_(at::Tensor & self, const at::Scalar & other);
25
+ TORCH_API at::Tensor bitwise_xor(const at::Scalar & self, const at::Tensor & other);
26
+ TORCH_API at::Tensor & bitwise_xor_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out);
27
+ } // namespace native
28
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_cpu_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor cumsum(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt);
21
+ TORCH_API at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt);
22
+ TORCH_API at::Tensor & cumsum_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
23
+ TORCH_API at::Tensor & cumsum_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt);
24
+
25
+ } // namespace cpu
26
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/equal_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API bool cpu_equal(const at::Tensor & self, const at::Tensor & other);
20
+ TORCH_API bool cuda_equal(const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API bool equal_quantized_cpu(const at::Tensor & self, const at::Tensor & other);
22
+ } // namespace native
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor & exponential_(at::Tensor & self, double lambd=1, c10::optional<at::Generator> generator=c10::nullopt);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/fbgemm_linear_int8_weight_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
26
+ inline at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
27
+ return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
28
+ }
29
+
30
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor feature_dropout(const at::Tensor & input, double p, bool train);
21
+ TORCH_API at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
21
+ TORCH_API at::Tensor fft_irfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
22
+ TORCH_API at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
23
+ TORCH_API at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
24
+ TORCH_API at::Tensor & fft_irfft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
25
+ TORCH_API at::Tensor & fft_irfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
26
+
27
+ } // namespace compositeimplicitautograd
28
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API ::std::vector<at::Tensor> hsplit(const at::Tensor & self, int64_t sections);
21
+ TORCH_API ::std::vector<at::Tensor> hsplit(const at::Tensor & self, at::IntArrayRef indices);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API index_Tensor {
18
+ using schema = at::Tensor (const at::Tensor &, const c10::List<c10::optional<at::Tensor>> &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index.Tensor(Tensor self, Tensor?[] indices) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices);
26
+ };
27
+
28
+ struct TORCH_API index_Tensor_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const c10::List<c10::optional<at::Tensor>> &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API index_select_backward {
18
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, int64_t, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_select_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & indices_copy_out(at::Tensor & out, const at::Tensor & self);
21
+ TORCH_API at::Tensor & indices_copy_outf(const at::Tensor & self, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor inverse(const at::Tensor & self);
21
+ TORCH_API at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace compositeimplicitautograd
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API bool is_floating_point(const at::Tensor & self);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/linalg_cholesky_ex_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false) {
27
+ return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors);
28
+ }
29
+
30
+ // aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false) {
32
+ return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info);
33
+ }
34
+ // aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
36
+ return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linear_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API linear {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linear")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias);
26
+ };
27
+
28
+ struct TORCH_API linear_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linear")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/masked_scatter_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
26
+ inline at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes) {
27
+ return at::_ops::masked_scatter_backward::call(grad_output, mask, c10::fromIntArrayRefSlow(sizes));
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes) {
32
+ return at::_ops::masked_scatter_backward::call(grad_output, mask, c10::fromIntArrayRefSlow(sizes));
33
+ }
34
+ }
35
+
36
+ // aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
37
+ inline at::Tensor masked_scatter_backward_symint(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) {
38
+ return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) {
43
+ return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes);
44
+ }
45
+ }
46
+
47
+ }