ZTWHHH commited on
Commit
6da29cc
·
verified ·
1 Parent(s): 87c8d33

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava/lib/tcl8.6/msgs/ar_jo.msg +39 -0
  2. llava/lib/tcl8.6/msgs/de_at.msg +35 -0
  3. llava/lib/tcl8.6/msgs/en_ie.msg +7 -0
  4. llava/lib/tcl8.6/msgs/es_sv.msg +6 -0
  5. llava/lib/tcl8.6/msgs/fa_in.msg +52 -0
  6. llava/lib/tcl8.6/msgs/fi.msg +50 -0
  7. llava/lib/tcl8.6/msgs/fr.msg +52 -0
  8. llava/lib/tcl8.6/msgs/ja.msg +44 -0
  9. llava/lib/tcl8.6/msgs/mr_in.msg +6 -0
  10. llava/lib/tcl8.6/msgs/sk.msg +52 -0
  11. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_meta_dispatch.h +24 -0
  12. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight.h +91 -0
  13. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h +24 -0
  14. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcmul.h +82 -0
  15. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_cuda_dispatch.h +28 -0
  16. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_ops.h +50 -0
  17. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h +25 -0
  18. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h +26 -0
  19. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h +25 -0
  20. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_ops.h +39 -0
  21. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_native.h +20 -0
  22. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h +23 -0
  23. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe.h +69 -0
  24. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h +26 -0
  25. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h +24 -0
  26. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured.h +30 -0
  27. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_native.h +26 -0
  28. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/align_to_native.h +22 -0
  29. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt_cuda_dispatch.h +25 -0
  30. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_native.h +21 -0
  31. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h +23 -0
  32. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_ops.h +83 -0
  33. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  34. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_cuda_dispatch.h +23 -0
  35. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/deg2rad_ops.h +50 -0
  36. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h +24 -0
  37. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout.h +35 -0
  38. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h +28 -0
  39. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h +30 -0
  40. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_native.h +22 -0
  41. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h +26 -0
  42. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h +26 -0
  43. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_ops.h +50 -0
  44. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h +30 -0
  45. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h +21 -0
  46. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h +25 -0
  47. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h +26 -0
  48. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h +30 -0
  49. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h +22 -0
  50. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h +39 -0
llava/lib/tcl8.6/msgs/ar_jo.msg ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset ar_JO DAYS_OF_WEEK_ABBREV [list \
4
+ "\u0627\u0644\u0623\u062d\u062f"\
5
+ "\u0627\u0644\u0627\u062b\u0646\u064a\u0646"\
6
+ "\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621"\
7
+ "\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621"\
8
+ "\u0627\u0644\u062e\u0645\u064a\u0633"\
9
+ "\u0627\u0644\u062c\u0645\u0639\u0629"\
10
+ "\u0627\u0644\u0633\u0628\u062a"]
11
+ ::msgcat::mcset ar_JO MONTHS_ABBREV [list \
12
+ "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\
13
+ "\u0634\u0628\u0627\u0637"\
14
+ "\u0622\u0630\u0627\u0631"\
15
+ "\u0646\u064a\u0633\u0627\u0646"\
16
+ "\u0646\u0648\u0627\u0631"\
17
+ "\u062d\u0632\u064a\u0631\u0627\u0646"\
18
+ "\u062a\u0645\u0648\u0632"\
19
+ "\u0622\u0628"\
20
+ "\u0623\u064a\u0644\u0648\u0644"\
21
+ "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\
22
+ "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\
23
+ "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\
24
+ ""]
25
+ ::msgcat::mcset ar_JO MONTHS_FULL [list \
26
+ "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\
27
+ "\u0634\u0628\u0627\u0637"\
28
+ "\u0622\u0630\u0627\u0631"\
29
+ "\u0646\u064a\u0633\u0627\u0646"\
30
+ "\u0646\u0648\u0627\u0631"\
31
+ "\u062d\u0632\u064a\u0631\u0627\u0646"\
32
+ "\u062a\u0645\u0648\u0632"\
33
+ "\u0622\u0628"\
34
+ "\u0623\u064a\u0644\u0648\u0644"\
35
+ "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\
36
+ "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\
37
+ "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\
38
+ ""]
39
+ }
llava/lib/tcl8.6/msgs/de_at.msg ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset de_AT MONTHS_ABBREV [list \
4
+ "J\u00e4n"\
5
+ "Feb"\
6
+ "M\u00e4r"\
7
+ "Apr"\
8
+ "Mai"\
9
+ "Jun"\
10
+ "Jul"\
11
+ "Aug"\
12
+ "Sep"\
13
+ "Okt"\
14
+ "Nov"\
15
+ "Dez"\
16
+ ""]
17
+ ::msgcat::mcset de_AT MONTHS_FULL [list \
18
+ "J\u00e4nner"\
19
+ "Februar"\
20
+ "M\u00e4rz"\
21
+ "April"\
22
+ "Mai"\
23
+ "Juni"\
24
+ "Juli"\
25
+ "August"\
26
+ "September"\
27
+ "Oktober"\
28
+ "November"\
29
+ "Dezember"\
30
+ ""]
31
+ ::msgcat::mcset de_AT DATE_FORMAT "%Y-%m-%d"
32
+ ::msgcat::mcset de_AT TIME_FORMAT "%T"
33
+ ::msgcat::mcset de_AT TIME_FORMAT_12 "%T"
34
+ ::msgcat::mcset de_AT DATE_TIME_FORMAT "%a %d %b %Y %T %z"
35
+ }
llava/lib/tcl8.6/msgs/en_ie.msg ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset en_IE DATE_FORMAT "%d/%m/%y"
4
+ ::msgcat::mcset en_IE TIME_FORMAT "%T"
5
+ ::msgcat::mcset en_IE TIME_FORMAT_12 "%T"
6
+ ::msgcat::mcset en_IE DATE_TIME_FORMAT "%a %d %b %Y %T %z"
7
+ }
llava/lib/tcl8.6/msgs/es_sv.msg ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset es_SV DATE_FORMAT "%m-%d-%Y"
4
+ ::msgcat::mcset es_SV TIME_FORMAT_12 "%I:%M:%S %P"
5
+ ::msgcat::mcset es_SV DATE_TIME_FORMAT "%m-%d-%Y %I:%M:%S %P %z"
6
+ }
llava/lib/tcl8.6/msgs/fa_in.msg ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset fa_IN DAYS_OF_WEEK_ABBREV [list \
4
+ "\u06cc\u2214"\
5
+ "\u062f\u2214"\
6
+ "\u0633\u2214"\
7
+ "\u0686\u2214"\
8
+ "\u067e\u2214"\
9
+ "\u062c\u2214"\
10
+ "\u0634\u2214"]
11
+ ::msgcat::mcset fa_IN DAYS_OF_WEEK_FULL [list \
12
+ "\u06cc\u06cc\u200c\u0634\u0646\u0628\u0647"\
13
+ "\u062f\u0648\u0634\u0646\u0628\u0647"\
14
+ "\u0633\u0647\u200c\u0634\u0646\u0628\u0647"\
15
+ "\u0686\u0647\u0627\u0631\u0634\u0646\u0628\u0647"\
16
+ "\u067e\u0646\u062c\u200c\u0634\u0646\u0628\u0647"\
17
+ "\u062c\u0645\u0639\u0647"\
18
+ "\u0634\u0646\u0628\u0647"]
19
+ ::msgcat::mcset fa_IN MONTHS_ABBREV [list \
20
+ "\u0698\u0627\u0646"\
21
+ "\u0641\u0648\u0631"\
22
+ "\u0645\u0627\u0631"\
23
+ "\u0622\u0648\u0631"\
24
+ "\u0645\u0640\u0647"\
25
+ "\u0698\u0648\u0646"\
26
+ "\u0698\u0648\u06cc"\
27
+ "\u0627\u0648\u062a"\
28
+ "\u0633\u067e\u062a"\
29
+ "\u0627\u0643\u062a"\
30
+ "\u0646\u0648\u0627"\
31
+ "\u062f\u0633\u0627"\
32
+ ""]
33
+ ::msgcat::mcset fa_IN MONTHS_FULL [list \
34
+ "\u0698\u0627\u0646\u0648\u06cc\u0647"\
35
+ "\u0641\u0648\u0631\u0648\u06cc\u0647"\
36
+ "\u0645\u0627\u0631\u0633"\
37
+ "\u0622\u0648\u0631\u06cc\u0644"\
38
+ "\u0645\u0647"\
39
+ "\u0698\u0648\u0626\u0646"\
40
+ "\u0698\u0648\u0626\u06cc\u0647"\
41
+ "\u0627\u0648\u062a"\
42
+ "\u0633\u067e\u062a\u0627\u0645\u0628\u0631"\
43
+ "\u0627\u0643\u062a\u0628\u0631"\
44
+ "\u0646\u0648\u0627\u0645\u0628\u0631"\
45
+ "\u062f\u0633\u0627\u0645\u0628\u0631"\
46
+ ""]
47
+ ::msgcat::mcset fa_IN AM "\u0635\u0628\u062d"
48
+ ::msgcat::mcset fa_IN PM "\u0639\u0635\u0631"
49
+ ::msgcat::mcset fa_IN DATE_FORMAT "%A %d %B %Y"
50
+ ::msgcat::mcset fa_IN TIME_FORMAT_12 "%I:%M:%S %z"
51
+ ::msgcat::mcset fa_IN DATE_TIME_FORMAT "%A %d %B %Y %I:%M:%S %z %z"
52
+ }
llava/lib/tcl8.6/msgs/fi.msg ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset fi DAYS_OF_WEEK_ABBREV [list \
4
+ "su"\
5
+ "ma"\
6
+ "ti"\
7
+ "ke"\
8
+ "to"\
9
+ "pe"\
10
+ "la"]
11
+ ::msgcat::mcset fi DAYS_OF_WEEK_FULL [list \
12
+ "sunnuntai"\
13
+ "maanantai"\
14
+ "tiistai"\
15
+ "keskiviikko"\
16
+ "torstai"\
17
+ "perjantai"\
18
+ "lauantai"]
19
+ ::msgcat::mcset fi MONTHS_ABBREV [list \
20
+ "tammi"\
21
+ "helmi"\
22
+ "maalis"\
23
+ "huhti"\
24
+ "touko"\
25
+ "kes\u00e4"\
26
+ "hein\u00e4"\
27
+ "elo"\
28
+ "syys"\
29
+ "loka"\
30
+ "marras"\
31
+ "joulu"\
32
+ ""]
33
+ ::msgcat::mcset fi MONTHS_FULL [list \
34
+ "tammikuu"\
35
+ "helmikuu"\
36
+ "maaliskuu"\
37
+ "huhtikuu"\
38
+ "toukokuu"\
39
+ "kes\u00e4kuu"\
40
+ "hein\u00e4kuu"\
41
+ "elokuu"\
42
+ "syyskuu"\
43
+ "lokakuu"\
44
+ "marraskuu"\
45
+ "joulukuu"\
46
+ ""]
47
+ ::msgcat::mcset fi DATE_FORMAT "%e.%m.%Y"
48
+ ::msgcat::mcset fi TIME_FORMAT "%k:%M:%S"
49
+ ::msgcat::mcset fi DATE_TIME_FORMAT "%e.%m.%Y %k:%M:%S %z"
50
+ }
llava/lib/tcl8.6/msgs/fr.msg ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset fr DAYS_OF_WEEK_ABBREV [list \
4
+ "dim."\
5
+ "lun."\
6
+ "mar."\
7
+ "mer."\
8
+ "jeu."\
9
+ "ven."\
10
+ "sam."]
11
+ ::msgcat::mcset fr DAYS_OF_WEEK_FULL [list \
12
+ "dimanche"\
13
+ "lundi"\
14
+ "mardi"\
15
+ "mercredi"\
16
+ "jeudi"\
17
+ "vendredi"\
18
+ "samedi"]
19
+ ::msgcat::mcset fr MONTHS_ABBREV [list \
20
+ "janv."\
21
+ "f\u00e9vr."\
22
+ "mars"\
23
+ "avr."\
24
+ "mai"\
25
+ "juin"\
26
+ "juil."\
27
+ "ao\u00fbt"\
28
+ "sept."\
29
+ "oct."\
30
+ "nov."\
31
+ "d\u00e9c."\
32
+ ""]
33
+ ::msgcat::mcset fr MONTHS_FULL [list \
34
+ "janvier"\
35
+ "f\u00e9vrier"\
36
+ "mars"\
37
+ "avril"\
38
+ "mai"\
39
+ "juin"\
40
+ "juillet"\
41
+ "ao\u00fbt"\
42
+ "septembre"\
43
+ "octobre"\
44
+ "novembre"\
45
+ "d\u00e9cembre"\
46
+ ""]
47
+ ::msgcat::mcset fr BCE "av. J.-C."
48
+ ::msgcat::mcset fr CE "ap. J.-C."
49
+ ::msgcat::mcset fr DATE_FORMAT "%e %B %Y"
50
+ ::msgcat::mcset fr TIME_FORMAT "%H:%M:%S"
51
+ ::msgcat::mcset fr DATE_TIME_FORMAT "%e %B %Y %H:%M:%S %z"
52
+ }
llava/lib/tcl8.6/msgs/ja.msg ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset ja DAYS_OF_WEEK_ABBREV [list \
4
+ "\u65e5"\
5
+ "\u6708"\
6
+ "\u706b"\
7
+ "\u6c34"\
8
+ "\u6728"\
9
+ "\u91d1"\
10
+ "\u571f"]
11
+ ::msgcat::mcset ja DAYS_OF_WEEK_FULL [list \
12
+ "\u65e5\u66dc\u65e5"\
13
+ "\u6708\u66dc\u65e5"\
14
+ "\u706b\u66dc\u65e5"\
15
+ "\u6c34\u66dc\u65e5"\
16
+ "\u6728\u66dc\u65e5"\
17
+ "\u91d1\u66dc\u65e5"\
18
+ "\u571f\u66dc\u65e5"]
19
+ ::msgcat::mcset ja MONTHS_FULL [list \
20
+ "1\u6708"\
21
+ "2\u6708"\
22
+ "3\u6708"\
23
+ "4\u6708"\
24
+ "5\u6708"\
25
+ "6\u6708"\
26
+ "7\u6708"\
27
+ "8\u6708"\
28
+ "9\u6708"\
29
+ "10\u6708"\
30
+ "11\u6708"\
31
+ "12\u6708"]
32
+ ::msgcat::mcset ja BCE "\u7d00\u5143\u524d"
33
+ ::msgcat::mcset ja CE "\u897f\u66a6"
34
+ ::msgcat::mcset ja AM "\u5348\u524d"
35
+ ::msgcat::mcset ja PM "\u5348\u5f8c"
36
+ ::msgcat::mcset ja DATE_FORMAT "%Y/%m/%d"
37
+ ::msgcat::mcset ja TIME_FORMAT "%k:%M:%S"
38
+ ::msgcat::mcset ja TIME_FORMAT_12 "%P %I:%M:%S"
39
+ ::msgcat::mcset ja DATE_TIME_FORMAT "%Y/%m/%d %k:%M:%S %z"
40
+ ::msgcat::mcset ja LOCALE_DATE_FORMAT "%EY\u5e74%m\u6708%d\u65e5"
41
+ ::msgcat::mcset ja LOCALE_TIME_FORMAT "%H\u6642%M\u5206%S\u79d2"
42
+ ::msgcat::mcset ja LOCALE_DATE_TIME_FORMAT "%EY\u5e74%m\u6708%d\u65e5 (%a) %H\u6642%M\u5206%S\u79d2 %z"
43
+ ::msgcat::mcset ja LOCALE_ERAS "{-9223372036854775808 \u897f\u66a6 0} {-3061011600 \u660e\u6cbb 1867} {-1812186000 \u5927\u6b63 1911} {-1357635600 \u662d\u548c 1925} {600220800 \u5e73\u6210 1988} {1556668800 \u4ee4\u548c 2018}"
44
+ }
llava/lib/tcl8.6/msgs/mr_in.msg ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset mr_IN DATE_FORMAT "%d %M %Y"
4
+ ::msgcat::mcset mr_IN TIME_FORMAT_12 "%I:%M:%S %P"
5
+ ::msgcat::mcset mr_IN DATE_TIME_FORMAT "%d %M %Y %I:%M:%S %P %z"
6
+ }
llava/lib/tcl8.6/msgs/sk.msg ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # created by tools/loadICU.tcl -- do not edit
2
+ namespace eval ::tcl::clock {
3
+ ::msgcat::mcset sk DAYS_OF_WEEK_ABBREV [list \
4
+ "Ne"\
5
+ "Po"\
6
+ "Ut"\
7
+ "St"\
8
+ "\u0160t"\
9
+ "Pa"\
10
+ "So"]
11
+ ::msgcat::mcset sk DAYS_OF_WEEK_FULL [list \
12
+ "Nede\u013ee"\
13
+ "Pondelok"\
14
+ "Utorok"\
15
+ "Streda"\
16
+ "\u0160tvrtok"\
17
+ "Piatok"\
18
+ "Sobota"]
19
+ ::msgcat::mcset sk MONTHS_ABBREV [list \
20
+ "jan"\
21
+ "feb"\
22
+ "mar"\
23
+ "apr"\
24
+ "m\u00e1j"\
25
+ "j\u00fan"\
26
+ "j\u00fal"\
27
+ "aug"\
28
+ "sep"\
29
+ "okt"\
30
+ "nov"\
31
+ "dec"\
32
+ ""]
33
+ ::msgcat::mcset sk MONTHS_FULL [list \
34
+ "janu\u00e1r"\
35
+ "febru\u00e1r"\
36
+ "marec"\
37
+ "apr\u00edl"\
38
+ "m\u00e1j"\
39
+ "j\u00fan"\
40
+ "j\u00fal"\
41
+ "august"\
42
+ "september"\
43
+ "okt\u00f3ber"\
44
+ "november"\
45
+ "december"\
46
+ ""]
47
+ ::msgcat::mcset sk BCE "pred n.l."
48
+ ::msgcat::mcset sk CE "n.l."
49
+ ::msgcat::mcset sk DATE_FORMAT "%e.%m.%Y"
50
+ ::msgcat::mcset sk TIME_FORMAT "%k:%M:%S"
51
+ ::msgcat::mcset sk DATE_TIME_FORMAT "%e.%m.%Y %k:%M:%S %z"
52
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_meta_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor & _add_relu_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1);
21
+ TORCH_API at::Tensor & _add_relu_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1);
22
+
23
+ } // namespace meta
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_cudnn_rnn_flatten_weight_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
26
+ inline at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
27
+ return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
32
+ return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
33
+ }
34
+ }
35
+
36
+ // aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
37
+ inline at::Tensor _cudnn_rnn_flatten_weight_symint(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
38
+ return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
43
+ return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
44
+ }
45
+ }
46
+
47
+ // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
48
+ inline at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
49
+ return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
53
+ at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
54
+ return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
55
+ }
56
+ }
57
+
58
+ // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
59
+ inline at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
60
+ return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
64
+ at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
65
+ return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
66
+ }
67
+ }
68
+
69
+ // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
70
+ inline at::Tensor & _cudnn_rnn_flatten_weight_symint_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
71
+ return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
72
+ }
73
+ namespace symint {
74
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
75
+ at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
76
+ return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
77
+ }
78
+ }
79
+
80
+ // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
81
+ inline at::Tensor & _cudnn_rnn_flatten_weight_symint_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
82
+ return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
83
+ }
84
+ namespace symint {
85
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
86
+ at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
87
+ return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
88
+ }
89
+ }
90
+
91
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt> _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, ::std::optional<int64_t> max_seqlen_q, ::std::optional<int64_t> max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, ::std::optional<double> scale=::std::nullopt, const ::std::optional<at::Tensor> & seqlen_k={}, ::std::optional<int64_t> window_size=::std::nullopt);
21
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt> _efficient_attention_forward_symint(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, ::std::optional<c10::SymInt> max_seqlen_q, ::std::optional<c10::SymInt> max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, ::std::optional<double> scale=::std::nullopt, const ::std::optional<at::Tensor> & seqlen_k={}, ::std::optional<int64_t> window_size=::std::nullopt);
22
+
23
+ } // namespace cuda
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcmul.h ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_foreach_addcmul_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
26
+ inline ::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
27
+ return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value);
28
+ }
29
+
30
+ // aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
31
+ inline ::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
32
+ return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars);
33
+ }
34
+
35
+ // aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
36
+ inline ::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
37
+ return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars);
38
+ }
39
+
40
+ // aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
41
+ inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
42
+ return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value);
43
+ }
44
+
45
+ // aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
46
+ inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
47
+ return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars);
48
+ }
49
+
50
+ // aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
51
+ inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
52
+ return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars);
53
+ }
54
+
55
+ // aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
56
+ inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
57
+ return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
58
+ }
59
+ // aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
60
+ inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
61
+ return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
62
+ }
63
+
64
+ // aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
65
+ inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
66
+ return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
67
+ }
68
+ // aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
69
+ inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
70
+ return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
71
+ }
72
+
73
+ // aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
74
+ inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
75
+ return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
76
+ }
77
+ // aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
78
+ inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
79
+ return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
80
+ }
81
+
82
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_cuda_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, const at::Scalar & scalar);
21
+ TORCH_API void _foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar);
22
+ TORCH_API ::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, at::TensorList other);
23
+ TORCH_API void _foreach_clamp_max_(at::TensorList self, at::TensorList other);
24
+ TORCH_API ::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
25
+ TORCH_API void _foreach_clamp_max_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
26
+
27
+ } // namespace cuda
28
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _foreach_log {
18
+ using schema = ::std::vector<at::Tensor> (at::TensorList);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log(Tensor[] self) -> Tensor[]")
24
+ static ::std::vector<at::Tensor> call(at::TensorList self);
25
+ static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
26
+ };
27
+
28
+ struct TORCH_API _foreach_log_ {
29
+ using schema = void (at::TensorList);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log_(Tensor(a!)[] self) -> ()")
35
+ static void call(at::TensorList self);
36
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
37
+ };
38
+
39
+ struct TORCH_API _foreach_log_out {
40
+ using schema = void (at::TensorList, at::TensorList);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
46
+ static void call(at::TensorList self, at::TensorList out);
47
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
48
+ };
49
+
50
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype);
21
+ TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype);
22
+ TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _mps_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups);
21
+ TORCH_API at::Tensor & _mps_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out);
22
+ TORCH_API at::Tensor & _mps_convolution_transpose_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups);
23
+ TORCH_API at::Tensor & _mps_convolution_transpose_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_training(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_training_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps);
22
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_training_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
23
+
24
+ } // namespace compositeexplicitautograd
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _native_batch_norm_legit_no_training {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const at::Tensor &, const at::Tensor &, double, double);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit_no_training")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)")
24
+ static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps);
25
+ static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps);
26
+ };
27
+
28
+ struct TORCH_API _native_batch_norm_legit_no_training_out {
29
+ using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, const at::Tensor &, const at::Tensor &, double, double, at::Tensor &, at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit_no_training")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
35
+ static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
36
+ static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_native.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ } // namespace native
20
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array<bool,4> grad_input_mask, bool is_causal=false, ::std::optional<double> scale=::std::nullopt);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
26
+ inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
27
+ return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
32
+ return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
33
+ }
34
+ }
35
+
36
+ // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
37
+ inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
38
+ return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
42
+ at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
43
+ return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
44
+ }
45
+ }
46
+
47
+ // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
48
+ inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) {
49
+ return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
53
+ at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) {
54
+ return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
55
+ }
56
+ }
57
+
58
+ // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
59
+ inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
60
+ return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
64
+ at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
65
+ return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
66
+ }
67
+ }
68
+
69
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
21
+ TORCH_API at::Tensor _to_copy(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format);
22
+ TORCH_API at::Tensor & _to_copy_out(at::Tensor & out, const at::Tensor & self, bool non_blocking=false, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
23
+ TORCH_API at::Tensor & _to_copy_outf(const at::Tensor & self, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _to_sparse_csc_out(const at::Tensor & self, ::std::optional<int64_t> dense_dim, at::Tensor & out);
20
+ TORCH_API at::Tensor dense_to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim=::std::nullopt);
21
+ TORCH_API at::Tensor coo_to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim=::std::nullopt);
22
+ TORCH_API at::Tensor sparse_compressed_to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim=::std::nullopt);
23
+ } // namespace native
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_to_sparse_semi_structured_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> _to_sparse_semi_structured(const at::Tensor & dense) {
27
+ return at::_ops::_to_sparse_semi_structured::call(dense);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_native.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/adaptive_max_pool2d_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_adaptive_max_pool2d_out_cpu : public at::meta::structured_adaptive_max_pool2d {
20
+ void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices);
21
+ };
22
+ struct TORCH_API structured_adaptive_max_pool2d_out_cuda : public at::meta::structured_adaptive_max_pool2d {
23
+ void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices);
24
+ };
25
+ } // namespace native
26
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/align_to_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor align_to(const at::Tensor & self, at::DimnameList names);
20
+ TORCH_API at::Tensor align_to(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx);
21
+ } // namespace native
22
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor batch_norm_elemt(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps);
21
+ TORCH_API at::Tensor & batch_norm_elemt_out(at::Tensor & out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps);
22
+ TORCH_API at::Tensor & batch_norm_elemt_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor cdist(const at::Tensor & x1, const at::Tensor & x2, double p=2, ::std::optional<int64_t> compute_mode=::std::nullopt);
20
+ } // namespace native
21
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_ops.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API clamp {
18
+ using schema = at::Tensor (const at::Tensor &, const ::std::optional<at::Scalar> &, const ::std::optional<at::Scalar> &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max);
26
+ };
27
+
28
+ struct TORCH_API clamp_Tensor {
29
+ using schema = at::Tensor (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max);
37
+ };
38
+
39
+ struct TORCH_API clamp_ {
40
+ using schema = at::Tensor & (at::Tensor &, const ::std::optional<at::Scalar> &, const ::std::optional<at::Scalar> &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)")
46
+ static at::Tensor & call(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max);
48
+ };
49
+
50
+ struct TORCH_API clamp__Tensor {
51
+ using schema = at::Tensor & (at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)")
57
+ static at::Tensor & call(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max);
58
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max);
59
+ };
60
+
61
+ struct TORCH_API clamp_out {
62
+ using schema = at::Tensor & (const at::Tensor &, const ::std::optional<at::Scalar> &, const ::std::optional<at::Scalar> &, at::Tensor &);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)")
68
+ static at::Tensor & call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out);
69
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out);
70
+ };
71
+
72
+ struct TORCH_API clamp_Tensor_out {
73
+ using schema = at::Tensor & (const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)")
79
+ static at::Tensor & call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out);
81
+ };
82
+
83
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor col_indices_copy(const at::Tensor & self);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor cudnn_grid_sampler(const at::Tensor & self, const at::Tensor & grid);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/deg2rad_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API deg2rad {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::deg2rad")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "deg2rad(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API deg2rad_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::deg2rad_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "deg2rad_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API deg2rad_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::deg2rad")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train);
21
+ TORCH_API at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/feature_dropout_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
26
+ inline at::Tensor feature_dropout(const at::Tensor & input, double p, bool train) {
27
+ return at::_ops::feature_dropout::call(input, p, train);
28
+ }
29
+
30
+ // aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
31
+ inline at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train) {
32
+ return at::_ops::feature_dropout_::call(self, p, train);
33
+ }
34
+
35
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor fft_fft(const at::Tensor & self, ::std::optional<int64_t> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt);
21
+ TORCH_API at::Tensor fft_fft_symint(const at::Tensor & self, ::std::optional<c10::SymInt> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt);
22
+ TORCH_API at::Tensor & fft_fft_out(at::Tensor & out, const at::Tensor & self, ::std::optional<int64_t> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt);
23
+ TORCH_API at::Tensor & fft_fft_outf(const at::Tensor & self, ::std::optional<int64_t> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
24
+ TORCH_API at::Tensor & fft_fft_symint_out(at::Tensor & out, const at::Tensor & self, ::std::optional<c10::SymInt> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt);
25
+ TORCH_API at::Tensor & fft_fft_symint_outf(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
26
+
27
+ } // namespace compositeimplicitautograd
28
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/flatten_dense_tensors_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor
26
+ inline at::Tensor flatten_dense_tensors(at::TensorList tensors) {
27
+ return at::_ops::flatten_dense_tensors::call(tensors);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> geqrf(const at::Tensor & self);
20
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> geqrf_out(const at::Tensor & self, at::Tensor & a, at::Tensor & tau);
21
+ } // namespace native
22
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other);
21
+ TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other);
22
+ TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other);
23
+ TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other);
24
+
25
+ } // namespace compositeexplicitautogradnonfunctional
26
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor hardswish(const at::Tensor & self);
21
+ TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & hardswish_(at::Tensor & self);
24
+
25
+ } // namespace cuda
26
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API histogramdd {
18
+ using schema = ::std::tuple<at::Tensor,::std::vector<at::Tensor>> (const at::Tensor &, at::IntArrayRef, ::std::optional<at::ArrayRef<double>>, const ::std::optional<at::Tensor> &, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histogramdd")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)")
24
+ static ::std::tuple<at::Tensor,::std::vector<at::Tensor>> call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density);
25
+ static ::std::tuple<at::Tensor,::std::vector<at::Tensor>> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density);
26
+ };
27
+
28
+ struct TORCH_API histogramdd_int_bins {
29
+ using schema = ::std::tuple<at::Tensor,::std::vector<at::Tensor>> (const at::Tensor &, int64_t, ::std::optional<at::ArrayRef<double>>, const ::std::optional<at::Tensor> &, bool);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histogramdd")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_bins")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)")
35
+ static ::std::tuple<at::Tensor,::std::vector<at::Tensor>> call(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density);
36
+ static ::std::tuple<at::Tensor,::std::vector<at::Tensor>> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density);
37
+ };
38
+
39
+ struct TORCH_API histogramdd_TensorList_bins {
40
+ using schema = ::std::tuple<at::Tensor,::std::vector<at::Tensor>> (const at::Tensor &, at::TensorList, ::std::optional<at::ArrayRef<double>>, const ::std::optional<at::Tensor> &, bool);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histogramdd")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "TensorList_bins")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)")
46
+ static ::std::tuple<at::Tensor,::std::vector<at::Tensor>> call(const at::Tensor & self, at::TensorList bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density);
47
+ static ::std::tuple<at::Tensor,::std::vector<at::Tensor>> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density);
48
+ };
49
+
50
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/instance_norm_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
26
+ inline at::Tensor instance_norm(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
27
+ return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor instance_norm(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled);
20
+ } // namespace native
21
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor isposinf(const at::Tensor & self);
21
+ TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace meta
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other);
21
+ TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other);
22
+ TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other);
23
+ TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other);
24
+
25
+ } // namespace compositeexplicitautogradnonfunctional
26
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight);
21
+ TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight);
22
+ TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out);
23
+ TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight);
24
+ TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight);
25
+ TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight);
26
+ TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out);
27
+ TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight);
28
+
29
+ } // namespace meta
30
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L");
20
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs);
21
+ } // namespace native
22
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/linalg_lu_factor_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor(const at::Tensor & A, bool pivot=true) {
27
+ return at::_ops::linalg_lu_factor::call(A, pivot);
28
+ }
29
+
30
+ // aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true) {
32
+ return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots);
33
+ }
34
+ // aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
36
+ return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots);
37
+ }
38
+
39
+ }