diff --git a/llava/lib/tcl8.6/msgs/ar_jo.msg b/llava/lib/tcl8.6/msgs/ar_jo.msg new file mode 100644 index 0000000000000000000000000000000000000000..0f5e26975e8eaf3f6e097403ead544f8da63a00e --- /dev/null +++ b/llava/lib/tcl8.6/msgs/ar_jo.msg @@ -0,0 +1,39 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ar_JO DAYS_OF_WEEK_ABBREV [list \ + "\u0627\u0644\u0623\u062d\u062f"\ + "\u0627\u0644\u0627\u062b\u0646\u064a\u0646"\ + "\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621"\ + "\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621"\ + "\u0627\u0644\u062e\u0645\u064a\u0633"\ + "\u0627\u0644\u062c\u0645\u0639\u0629"\ + "\u0627\u0644\u0633\u0628\u062a"] + ::msgcat::mcset ar_JO MONTHS_ABBREV [list \ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0634\u0628\u0627\u0637"\ + "\u0622\u0630\u0627\u0631"\ + "\u0646\u064a\u0633\u0627\u0646"\ + "\u0646\u0648\u0627\u0631"\ + "\u062d\u0632\u064a\u0631\u0627\u0646"\ + "\u062a\u0645\u0648\u0632"\ + "\u0622\u0628"\ + "\u0623\u064a\u0644\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\ + ""] + ::msgcat::mcset ar_JO MONTHS_FULL [list \ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0634\u0628\u0627\u0637"\ + "\u0622\u0630\u0627\u0631"\ + "\u0646\u064a\u0633\u0627\u0646"\ + "\u0646\u0648\u0627\u0631"\ + "\u062d\u0632\u064a\u0631\u0627\u0646"\ + "\u062a\u0645\u0648\u0632"\ + "\u0622\u0628"\ + "\u0623\u064a\u0644\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\ + ""] +} diff --git a/llava/lib/tcl8.6/msgs/de_at.msg b/llava/lib/tcl8.6/msgs/de_at.msg new file mode 100644 index 0000000000000000000000000000000000000000..61bc266698ca72e52c9ec6ad118ccd52ddb548c2 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/de_at.msg @@ -0,0 +1,35 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset de_AT MONTHS_ABBREV [list \ + "J\u00e4n"\ + "Feb"\ + "M\u00e4r"\ + "Apr"\ + "Mai"\ + "Jun"\ + "Jul"\ + "Aug"\ + "Sep"\ + "Okt"\ + "Nov"\ + "Dez"\ + ""] + ::msgcat::mcset de_AT MONTHS_FULL [list \ + "J\u00e4nner"\ + "Februar"\ + "M\u00e4rz"\ + "April"\ + "Mai"\ + "Juni"\ + "Juli"\ + "August"\ + "September"\ + "Oktober"\ + "November"\ + "Dezember"\ + ""] + ::msgcat::mcset de_AT DATE_FORMAT "%Y-%m-%d" + ::msgcat::mcset de_AT TIME_FORMAT "%T" + ::msgcat::mcset de_AT TIME_FORMAT_12 "%T" + ::msgcat::mcset de_AT DATE_TIME_FORMAT "%a %d %b %Y %T %z" +} diff --git a/llava/lib/tcl8.6/msgs/en_ie.msg b/llava/lib/tcl8.6/msgs/en_ie.msg new file mode 100644 index 0000000000000000000000000000000000000000..ba621cf2c819eafd3909e90afc0e7f240f9e4234 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/en_ie.msg @@ -0,0 +1,7 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset en_IE DATE_FORMAT "%d/%m/%y" + ::msgcat::mcset en_IE TIME_FORMAT "%T" + ::msgcat::mcset en_IE TIME_FORMAT_12 "%T" + ::msgcat::mcset en_IE DATE_TIME_FORMAT "%a %d %b %Y %T %z" +} diff --git a/llava/lib/tcl8.6/msgs/es_sv.msg b/llava/lib/tcl8.6/msgs/es_sv.msg new file mode 100644 index 0000000000000000000000000000000000000000..fc7954d6a5df82779ca4d64d4b881666e1f2bfba --- /dev/null +++ b/llava/lib/tcl8.6/msgs/es_sv.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es_SV DATE_FORMAT "%m-%d-%Y" + ::msgcat::mcset es_SV TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset es_SV DATE_TIME_FORMAT "%m-%d-%Y %I:%M:%S %P %z" +} diff --git a/llava/lib/tcl8.6/msgs/fa_in.msg b/llava/lib/tcl8.6/msgs/fa_in.msg new file mode 100644 index 0000000000000000000000000000000000000000..adc9e91d370bb85b0141bedf05e35e98b43f9ad6 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/fa_in.msg @@ -0,0 +1,52 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset fa_IN DAYS_OF_WEEK_ABBREV [list \ + "\u06cc\u2214"\ + "\u062f\u2214"\ + "\u0633\u2214"\ + "\u0686\u2214"\ + "\u067e\u2214"\ + "\u062c\u2214"\ + "\u0634\u2214"] + ::msgcat::mcset fa_IN DAYS_OF_WEEK_FULL [list \ + "\u06cc\u06cc\u200c\u0634\u0646\u0628\u0647"\ + "\u062f\u0648\u0634\u0646\u0628\u0647"\ + "\u0633\u0647\u200c\u0634\u0646\u0628\u0647"\ + "\u0686\u0647\u0627\u0631\u0634\u0646\u0628\u0647"\ + "\u067e\u0646\u062c\u200c\u0634\u0646\u0628\u0647"\ + "\u062c\u0645\u0639\u0647"\ + "\u0634\u0646\u0628\u0647"] + ::msgcat::mcset fa_IN MONTHS_ABBREV [list \ + "\u0698\u0627\u0646"\ + "\u0641\u0648\u0631"\ + "\u0645\u0627\u0631"\ + "\u0622\u0648\u0631"\ + "\u0645\u0640\u0647"\ + "\u0698\u0648\u0646"\ + "\u0698\u0648\u06cc"\ + "\u0627\u0648\u062a"\ + "\u0633\u067e\u062a"\ + "\u0627\u0643\u062a"\ + "\u0646\u0648\u0627"\ + "\u062f\u0633\u0627"\ + ""] + ::msgcat::mcset fa_IN MONTHS_FULL [list \ + "\u0698\u0627\u0646\u0648\u06cc\u0647"\ + "\u0641\u0648\u0631\u0648\u06cc\u0647"\ + "\u0645\u0627\u0631\u0633"\ + "\u0622\u0648\u0631\u06cc\u0644"\ + "\u0645\u0647"\ + "\u0698\u0648\u0626\u0646"\ + "\u0698\u0648\u0626\u06cc\u0647"\ + "\u0627\u0648\u062a"\ + "\u0633\u067e\u062a\u0627\u0645\u0628\u0631"\ + "\u0627\u0643\u062a\u0628\u0631"\ + "\u0646\u0648\u0627\u0645\u0628\u0631"\ + "\u062f\u0633\u0627\u0645\u0628\u0631"\ + ""] + ::msgcat::mcset fa_IN AM "\u0635\u0628\u062d" + ::msgcat::mcset fa_IN PM "\u0639\u0635\u0631" + ::msgcat::mcset fa_IN DATE_FORMAT "%A %d %B %Y" + ::msgcat::mcset fa_IN TIME_FORMAT_12 "%I:%M:%S %z" + ::msgcat::mcset fa_IN DATE_TIME_FORMAT "%A %d %B %Y %I:%M:%S %z %z" +} diff --git a/llava/lib/tcl8.6/msgs/fi.msg b/llava/lib/tcl8.6/msgs/fi.msg new file mode 100644 index 0000000000000000000000000000000000000000..acabba09b433ea886930c37b229b6264a2d81add --- /dev/null +++ b/llava/lib/tcl8.6/msgs/fi.msg @@ -0,0 +1,50 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset fi DAYS_OF_WEEK_ABBREV [list \ + "su"\ + "ma"\ + "ti"\ + "ke"\ + "to"\ + "pe"\ + "la"] + ::msgcat::mcset fi DAYS_OF_WEEK_FULL [list \ + "sunnuntai"\ + "maanantai"\ + "tiistai"\ + "keskiviikko"\ + "torstai"\ + "perjantai"\ + "lauantai"] + ::msgcat::mcset fi MONTHS_ABBREV [list \ + "tammi"\ + "helmi"\ + "maalis"\ + "huhti"\ + "touko"\ + "kes\u00e4"\ + "hein\u00e4"\ + "elo"\ + "syys"\ + "loka"\ + "marras"\ + "joulu"\ + ""] + ::msgcat::mcset fi MONTHS_FULL [list \ + "tammikuu"\ + "helmikuu"\ + "maaliskuu"\ + "huhtikuu"\ + "toukokuu"\ + "kes\u00e4kuu"\ + "hein\u00e4kuu"\ + "elokuu"\ + "syyskuu"\ + "lokakuu"\ + "marraskuu"\ + "joulukuu"\ + ""] + ::msgcat::mcset fi DATE_FORMAT "%e.%m.%Y" + ::msgcat::mcset fi TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset fi DATE_TIME_FORMAT "%e.%m.%Y %k:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/fr.msg b/llava/lib/tcl8.6/msgs/fr.msg new file mode 100644 index 0000000000000000000000000000000000000000..55b19bf949e4e0b07d28d69f23a5c44aad764062 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/fr.msg @@ -0,0 +1,52 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset fr DAYS_OF_WEEK_ABBREV [list \ + "dim."\ + "lun."\ + "mar."\ + "mer."\ + "jeu."\ + "ven."\ + "sam."] + ::msgcat::mcset fr DAYS_OF_WEEK_FULL [list \ + "dimanche"\ + "lundi"\ + "mardi"\ + "mercredi"\ + "jeudi"\ + "vendredi"\ + "samedi"] + ::msgcat::mcset fr MONTHS_ABBREV [list \ + "janv."\ + "f\u00e9vr."\ + "mars"\ + "avr."\ + "mai"\ + "juin"\ + "juil."\ + "ao\u00fbt"\ + "sept."\ + "oct."\ + "nov."\ + "d\u00e9c."\ + ""] + ::msgcat::mcset fr MONTHS_FULL [list \ + "janvier"\ + "f\u00e9vrier"\ + "mars"\ + "avril"\ + "mai"\ + "juin"\ + "juillet"\ + "ao\u00fbt"\ + "septembre"\ + "octobre"\ + "novembre"\ + "d\u00e9cembre"\ + ""] + ::msgcat::mcset fr BCE "av. J.-C." + ::msgcat::mcset fr CE "ap. J.-C." + ::msgcat::mcset fr DATE_FORMAT "%e %B %Y" + ::msgcat::mcset fr TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset fr DATE_TIME_FORMAT "%e %B %Y %H:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/ja.msg b/llava/lib/tcl8.6/msgs/ja.msg new file mode 100644 index 0000000000000000000000000000000000000000..cf70c2f326043e83efc1792226331764d271be6f --- /dev/null +++ b/llava/lib/tcl8.6/msgs/ja.msg @@ -0,0 +1,44 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ja DAYS_OF_WEEK_ABBREV [list \ + "\u65e5"\ + "\u6708"\ + "\u706b"\ + "\u6c34"\ + "\u6728"\ + "\u91d1"\ + "\u571f"] + ::msgcat::mcset ja DAYS_OF_WEEK_FULL [list \ + "\u65e5\u66dc\u65e5"\ + "\u6708\u66dc\u65e5"\ + "\u706b\u66dc\u65e5"\ + "\u6c34\u66dc\u65e5"\ + "\u6728\u66dc\u65e5"\ + "\u91d1\u66dc\u65e5"\ + "\u571f\u66dc\u65e5"] + ::msgcat::mcset ja MONTHS_FULL [list \ + "1\u6708"\ + "2\u6708"\ + "3\u6708"\ + "4\u6708"\ + "5\u6708"\ + "6\u6708"\ + "7\u6708"\ + "8\u6708"\ + "9\u6708"\ + "10\u6708"\ + "11\u6708"\ + "12\u6708"] + ::msgcat::mcset ja BCE "\u7d00\u5143\u524d" + ::msgcat::mcset ja CE "\u897f\u66a6" + ::msgcat::mcset ja AM "\u5348\u524d" + ::msgcat::mcset ja PM "\u5348\u5f8c" + ::msgcat::mcset ja DATE_FORMAT "%Y/%m/%d" + ::msgcat::mcset ja TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset ja TIME_FORMAT_12 "%P %I:%M:%S" + ::msgcat::mcset ja DATE_TIME_FORMAT "%Y/%m/%d %k:%M:%S %z" + ::msgcat::mcset ja LOCALE_DATE_FORMAT "%EY\u5e74%m\u6708%d\u65e5" + ::msgcat::mcset ja LOCALE_TIME_FORMAT "%H\u6642%M\u5206%S\u79d2" + ::msgcat::mcset ja LOCALE_DATE_TIME_FORMAT "%EY\u5e74%m\u6708%d\u65e5 (%a) %H\u6642%M\u5206%S\u79d2 %z" + ::msgcat::mcset ja LOCALE_ERAS "{-9223372036854775808 \u897f\u66a6 0} {-3061011600 \u660e\u6cbb 1867} {-1812186000 \u5927\u6b63 1911} {-1357635600 \u662d\u548c 1925} {600220800 \u5e73\u6210 1988} {1556668800 \u4ee4\u548c 2018}" +} diff --git a/llava/lib/tcl8.6/msgs/mr_in.msg b/llava/lib/tcl8.6/msgs/mr_in.msg new file mode 100644 index 0000000000000000000000000000000000000000..1889da5c4525a8c1c5fe1be62bb5bd29746c5d5c --- /dev/null +++ b/llava/lib/tcl8.6/msgs/mr_in.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset mr_IN DATE_FORMAT "%d %M %Y" + ::msgcat::mcset mr_IN TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset mr_IN DATE_TIME_FORMAT "%d %M %Y %I:%M:%S %P %z" +} diff --git a/llava/lib/tcl8.6/msgs/sk.msg b/llava/lib/tcl8.6/msgs/sk.msg new file mode 100644 index 0000000000000000000000000000000000000000..9b2f0aadd2de5b7bd3aedf9e1fdc47caf6704a07 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/sk.msg @@ -0,0 +1,52 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset sk DAYS_OF_WEEK_ABBREV [list \ + "Ne"\ + "Po"\ + "Ut"\ + "St"\ + "\u0160t"\ + "Pa"\ + "So"] + ::msgcat::mcset sk DAYS_OF_WEEK_FULL [list \ + "Nede\u013ee"\ + "Pondelok"\ + "Utorok"\ + "Streda"\ + "\u0160tvrtok"\ + "Piatok"\ + "Sobota"] + ::msgcat::mcset sk MONTHS_ABBREV [list \ + "jan"\ + "feb"\ + "mar"\ + "apr"\ + "m\u00e1j"\ + "j\u00fan"\ + "j\u00fal"\ + "aug"\ + "sep"\ + "okt"\ + "nov"\ + "dec"\ + ""] + ::msgcat::mcset sk MONTHS_FULL [list \ + "janu\u00e1r"\ + "febru\u00e1r"\ + "marec"\ + "apr\u00edl"\ + "m\u00e1j"\ + "j\u00fan"\ + "j\u00fal"\ + "august"\ + "september"\ + "okt\u00f3ber"\ + "november"\ + "december"\ + ""] + ::msgcat::mcset sk BCE "pred n.l." + ::msgcat::mcset sk CE "n.l." + ::msgcat::mcset sk DATE_FORMAT "%e.%m.%Y" + ::msgcat::mcset sk TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset sk DATE_TIME_FORMAT "%e.%m.%Y %k:%M:%S %z" +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9f294d25dd5370bda77bbe28781f2e2057fcace --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & _add_relu_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & _add_relu_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight.h new file mode 100644 index 0000000000000000000000000000000000000000..5dc6e802701a5d9e4b5ab9cc315ede95805d3a80 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor +inline at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); +} +namespace symint { + template ::value>> + at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } +} + +// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor +inline at::Tensor _cudnn_rnn_flatten_weight_symint(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); +} +namespace symint { + template ::value>> + at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } +} + +// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); +} +namespace symint { + template ::value>> + at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } +} + +// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); +} +namespace symint { + template ::value>> + at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } +} + +// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cudnn_rnn_flatten_weight_symint_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); +} +namespace symint { + template ::value>> + at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } +} + +// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cudnn_rnn_flatten_weight_symint_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); +} +namespace symint { + template ::value>> + at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2690e00ec786c48d587e5e26527acf3684cf24aa --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & bias, const ::std::optional & cu_seqlens_q, const ::std::optional & cu_seqlens_k, ::std::optional max_seqlen_q, ::std::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, ::std::optional scale=::std::nullopt, const ::std::optional & seqlen_k={}, ::std::optional window_size=::std::nullopt); +TORCH_API ::std::tuple _efficient_attention_forward_symint(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & bias, const ::std::optional & cu_seqlens_q, const ::std::optional & cu_seqlens_k, ::std::optional max_seqlen_q, ::std::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, ::std::optional scale=::std::nullopt, const ::std::optional & seqlen_k={}, ::std::optional window_size=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcmul.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcmul.h new file mode 100644 index 0000000000000000000000000000000000000000..234bf10f3954bee176a675486d429ca6a2d137bf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_addcmul.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] +inline ::std::vector _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value); +} + +// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars); +} + +// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] +inline ::std::vector _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars); +} + +// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () +inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value); +} + +// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () +inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars); +} + +// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () +inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars); +} + +// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () +inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out); +} +// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () +inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out); +} + +// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out); +} +// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out); +} + +// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out); +} +// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { + return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42a92e3c5acc201534f63d111640cdebc43a61b4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_clamp_max(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_clamp_max(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_clamp_max_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_clamp_max(at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_clamp_max_(at::TensorList self, at::ArrayRef scalars); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..de31a883889cfc20c99365a52d60e2850c2ef73c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_log { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_log_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_log_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_log") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a445539e587c8fdb618afa159222015bb50eb9f1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c981a72d87e66778d19138ae9cda88798e3e71a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _mps_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups); +TORCH_API at::Tensor & _mps_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out); +TORCH_API at::Tensor & _mps_convolution_transpose_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); +TORCH_API at::Tensor & _mps_convolution_transpose_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3577a91b4d219440698254ec848a178a795cabb9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _native_batch_norm_legit_no_training(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_no_training_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_no_training_outf(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bc412e1e49261606e687d34410c9cb6e5a72196f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _native_batch_norm_legit_no_training { + using schema = ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit_no_training") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); +}; + +struct TORCH_API _native_batch_norm_legit_no_training_out { + using schema = ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, double, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit_no_training") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5127ff1f0312f675412976861009a598591df696 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_native.h @@ -0,0 +1,20 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb938ee12486737200c5e3a432638a50805db3bd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _scaled_dot_product_efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal=false, ::std::optional scale=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe.h new file mode 100644 index 0000000000000000000000000000000000000000..1fa81bb54fb9f7ad5bf3dbe94747e419d79b0696 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24c15f45ca642380bef3f2b9848485c8ba64c9be --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor _to_copy(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, bool non_blocking, ::std::optional memory_format); +TORCH_API at::Tensor & _to_copy_out(at::Tensor & out, const at::Tensor & self, bool non_blocking=false, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor & _to_copy_outf(const at::Tensor & self, bool non_blocking, ::std::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..aee6893aee77c64462199f493d39d63535f61a1c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _to_sparse_csc_out(const at::Tensor & self, ::std::optional dense_dim, at::Tensor & out); +TORCH_API at::Tensor dense_to_sparse_csc(const at::Tensor & self, ::std::optional dense_dim=::std::nullopt); +TORCH_API at::Tensor coo_to_sparse_csc(const at::Tensor & self, ::std::optional dense_dim=::std::nullopt); +TORCH_API at::Tensor sparse_compressed_to_sparse_csc(const at::Tensor & self, ::std::optional dense_dim=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured.h new file mode 100644 index 0000000000000000000000000000000000000000..23608aad68dee43866afd07808991a12d0f5e2f6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor) +inline ::std::tuple _to_sparse_semi_structured(const at::Tensor & dense) { + return at::_ops::_to_sparse_semi_structured::call(dense); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..86e691af45088836e54babde6d08e2215cf8c789 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_adaptive_max_pool2d_out_cpu : public at::meta::structured_adaptive_max_pool2d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices); +}; +struct TORCH_API structured_adaptive_max_pool2d_out_cuda : public at::meta::structured_adaptive_max_pool2d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/align_to_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/align_to_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b23d8c51d56453da0ea0380001ac4cc8f937dcd4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/align_to_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor align_to(const at::Tensor & self, at::DimnameList names); +TORCH_API at::Tensor align_to(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db1e3dc55ab998fee577b6d9f8fe214ad7f2a467 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor batch_norm_elemt(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps); +TORCH_API at::Tensor & batch_norm_elemt_out(at::Tensor & out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps); +TORCH_API at::Tensor & batch_norm_elemt_outf(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6437d5dc5751025692b0b4be8a1b5c27738f5055 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cdist(const at::Tensor & x1, const at::Tensor & x2, double p=2, ::std::optional compute_mode=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8f175dda35f6f5e5b698632e691d955249d6f23a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3cb5bfe0756e69239e2a17149915a7aba0dd105a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API clamp { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & min, const ::std::optional & max); +}; + +struct TORCH_API clamp_Tensor { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & min, const ::std::optional & max); +}; + +struct TORCH_API clamp_ { + using schema = at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const ::std::optional & min, const ::std::optional & max); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional & min, const ::std::optional & max); +}; + +struct TORCH_API clamp__Tensor { + using schema = at::Tensor & (at::Tensor &, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const ::std::optional & min, const ::std::optional & max); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional & min, const ::std::optional & max); +}; + +struct TORCH_API clamp_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & min, const ::std::optional & max, at::Tensor & out); +}; + +struct TORCH_API clamp_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, const ::std::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & min, const ::std::optional & max, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24d08726256c26db2704846c31e874d91886c8b2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor col_indices_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..946edb16883ffccb46a7ecf3811f29ffa77664de --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor cudnn_grid_sampler(const at::Tensor & self, const at::Tensor & grid); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/deg2rad_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/deg2rad_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3f4690d7ca6998ac3bb3e09fdfddc0e665c1b98b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/deg2rad_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API deg2rad { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::deg2rad") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "deg2rad(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API deg2rad_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::deg2rad_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "deg2rad_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API deg2rad_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::deg2rad") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..29cdd6392e00467bc08f0fac60061e91d980664b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train); +TORCH_API at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..d19cade9aa98321e927921dcdfe796a9d8633b77 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor +inline at::Tensor feature_dropout(const at::Tensor & input, double p, bool train) { + return at::_ops::feature_dropout::call(input, p, train); +} + +// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) +inline at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train) { + return at::_ops::feature_dropout_::call(self, p, train); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..845e5ad621989dbf896ce048631b0089d91ab3ee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_fft(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor fft_fft_symint(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_fft_out(at::Tensor & out, const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_fft_outf(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out); +TORCH_API at::Tensor & fft_fft_symint_out(at::Tensor & out, const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_fft_symint_outf(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h new file mode 100644 index 0000000000000000000000000000000000000000..bc6c945c0854928cfd497af8a67b8477111aa9c8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor +inline at::Tensor flatten_dense_tensors(at::TensorList tensors) { + return at::_ops::flatten_dense_tensors::call(tensors); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..714e2139cebc7f008273ffb499e6cc0aa12439d7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple geqrf(const at::Tensor & self); +TORCH_API ::std::tuple geqrf_out(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ef43564ff7cfd42ff7b112355af8539fa26c1946 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f29b0293645726c227b2264e4eaf866a1e179a4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..232d6bc84949ceca5a52cfb46454227362799d65 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histogramdd { + using schema = ::std::tuple> (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histogramdd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)") + static ::std::tuple> call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +struct TORCH_API histogramdd_int_bins { + using schema = ::std::tuple> (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histogramdd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_bins") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)") + static ::std::tuple> call(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +struct TORCH_API histogramdd_TensorList_bins { + using schema = ::std::tuple> (const at::Tensor &, at::TensorList, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histogramdd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "TensorList_bins") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)") + static ::std::tuple> call(const at::Tensor & self, at::TensorList bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..e6f6cdb7fff1ddf8ab8e2e56e5b4fd8ccb6a8350 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor +inline at::Tensor instance_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..164a59b980a6be1ca6c948d20c78ad6dcfd5a5c5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor instance_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b40d121d4e8baff929717269d4d145fab1f748cf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..99b0f6716cedbd169834f261cafd4382d460d38b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a320092df1fdf8c7bf3eef483d5feddf1b83063c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e6f4f3423d58435bef913eb243e5618540dab8de --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API ::std::tuple linalg_eigh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h new file mode 100644 index 0000000000000000000000000000000000000000..9487988ddaf2fd27412a2d0932c96ff27e1df053 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) +inline ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor::call(A, pivot); +} + +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +inline ::std::tuple linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots); +} +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +inline ::std::tuple linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linear.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linear.h new file mode 100644 index 0000000000000000000000000000000000000000..fa104be020a768d7a0350a153e65912d9a387366 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linear.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor +inline at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}) { + return at::_ops::linear::call(input, weight, bias); +} + +// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linear_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}) { + return at::_ops::linear_out::call(input, weight, bias, out); +} +// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linear_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out) { + return at::_ops::linear_out::call(input, weight, bias, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..94334c873a1b5c8132c4492f9ecc7c13436bc584 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_normal(const at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & log_normal_out(const at::Tensor & self, double mean, double std, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor & log_normal_(at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..641306ff680c0e24511b642a01ef3aaf9cadb6aa --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_logaddexp_out : public at::meta::structured_logaddexp { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h new file mode 100644 index 0000000000000000000000000000000000000000..64549776af7a884ff0817c5620da990de2554740 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor & logspace_cuda_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b184b8b9ad9aefb60eee4aa5b972f735f0f74ab1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_convolution_relu { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_convolution_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mode_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mode_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e550c1c3c3572fd392da925942aa5f999a763975 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mode_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple mode(const at::Tensor & self, int64_t dim=-1, bool keepdim=false); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b5f2651a8dca9ab7f39b1e03b7bb224290e13f0d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor nan_to_num(const at::Tensor & self, ::std::optional nan=::std::nullopt, ::std::optional posinf=::std::nullopt, ::std::optional neginf=::std::nullopt); +TORCH_API at::Tensor & nan_to_num_(at::Tensor & self, ::std::optional nan=::std::nullopt, ::std::optional posinf=::std::nullopt, ::std::optional neginf=::std::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..353a47ceba4757acf6c47fd9495bdff260404bcc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API new_zeros { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::new_zeros") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API new_zeros_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::new_zeros") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a6bddc66913221bcc74702e16c7d48e4d6febc52 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nll_loss_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, c10::SymInt, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +}; + +struct TORCH_API nll_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, c10::SymInt, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/random.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/random.h new file mode 100644 index 0000000000000000000000000000000000000000..651e546f4b76c769774530aa7e83a3a37d8cd593 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/random.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator=::std::nullopt) { + return at::_ops::random_from_out::call(self, from, to, generator, out); +} +// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_outf(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator, at::Tensor & out) { + return at::_ops::random_from_out::call(self, from, to, generator, out); +} + +// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor +inline at::Tensor random(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator=::std::nullopt) { + return at::_ops::random_from::call(self, from, to, generator); +} + +// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t to, ::std::optional generator=::std::nullopt) { + return at::_ops::random_to_out::call(self, to, generator, out); +} +// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_outf(const at::Tensor & self, int64_t to, ::std::optional generator, at::Tensor & out) { + return at::_ops::random_to_out::call(self, to, generator, out); +} + +// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor +inline at::Tensor random(const at::Tensor & self, int64_t to, ::std::optional generator=::std::nullopt) { + return at::_ops::random_to::call(self, to, generator); +} + +// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, ::std::optional generator=::std::nullopt) { + return at::_ops::random_out::call(self, generator, out); +} +// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_outf(const at::Tensor & self, ::std::optional generator, at::Tensor & out) { + return at::_ops::random_out::call(self, generator, out); +} + +// aten::random(Tensor self, *, Generator? generator=None) -> Tensor +inline at::Tensor random(const at::Tensor & self, ::std::optional generator=::std::nullopt) { + return at::_ops::random::call(self, generator); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..4e8ffdf9ad4b4043be13a3497e36fc6519460c0b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); +} +namespace symint { + template ::value>> + at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } +} + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); +} +namespace symint { + template ::value>> + at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } +} + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input); + } +} + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input); + } +} + +// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor +inline at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor +inline at::Tensor reflection_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding); +} +namespace symint { + template ::value>> + at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding); + } +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3c5c73db797a5dd12b858cd14153d1dd5d78797b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c7edad53c3859f9ff33898fb85dea53dbdd0d47b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & replication_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/round_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/round_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd43946208ce0e60a09444ea4bb78c4b3ccb21dc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/round_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor round(const at::Tensor & self); +TORCH_API at::Tensor & round_(at::Tensor & self); +TORCH_API at::Tensor round(const at::Tensor & self, int64_t decimals); +TORCH_API at::Tensor & round_(at::Tensor & self, int64_t decimals); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sinc.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sinc.h new file mode 100644 index 0000000000000000000000000000000000000000..dc4117a43b34d21a0e678dc875f99f89771f6ffb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sinc.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sinc(Tensor self) -> Tensor +inline at::Tensor sinc(const at::Tensor & self) { + return at::_ops::sinc::call(self); +} + +// aten::sinc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & sinc_(at::Tensor & self) { + return at::_ops::sinc_::call(self); +} + +// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sinc_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::sinc_out::call(self, out); +} +// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sinc_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::sinc_out::call(self, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b2e1efbd472439f7fc630d8af102ee5af0663666 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & slow_conv_dilated2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_dilated2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d77cc0b1cf4783b5ff658b0c96f383f0c6e7f9d8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & soft_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sort_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sort_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a5e0d064da114a564e22fc39c99ce4a37262d214 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sort_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple sort(const at::Tensor & self, ::std::optional stable, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, ::std::optional stable, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_outf(const at::Tensor & self, ::std::optional stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he.h new file mode 100644 index 0000000000000000000000000000000000000000..4012cc3e20fa9406a775c14865bb93d33e249cdd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor +inline at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he::call(x, n); +} + +// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor +inline at::Tensor special_hermite_polynomial_he(const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n); +} + +// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor +inline at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n); +} + +// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_out::call(x, n, out); +} +// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_he_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_out::call(x, n, out); +} + +// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_x_scalar_out::call(x, n, out); +} +// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_he_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_x_scalar_out::call(x, n, out); +} + +// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_he_n_scalar_out::call(x, n, out); +} +// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_hermite_polynomial_he_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_n_scalar_out::call(x, n, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b4c63fe18fbe439b7be67c05ff099dc96fd0846 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor t_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/trace_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/trace_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1d87e5101da56c8087d383f7ba76981d8c2284f6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/trace_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor trace(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6f7c9db51ef65c0c349c512fa6c77e2c74d68ed1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector unsafe_split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0); +TORCH_API ::std::vector unsafe_split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0); +TORCH_API void unsafe_split_with_sizes_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0); +TORCH_API void unsafe_split_with_sizes_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out); +TORCH_API void unsafe_split_with_sizes_symint_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0); +TORCH_API void unsafe_split_with_sizes_symint_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..45beb4758b58091bcbe766b3aacc8c448ef56eea --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_nearest2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, ::std::optional> scale_factors); +struct TORCH_API structured_upsample_nearest2d_out_cpu : public at::meta::structured_upsample_nearest2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_nearest2d_out_cuda : public at::meta::structured_upsample_nearest2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & out); +}; +TORCH_API at::Tensor upsample_nearest2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..23566776761bd87ebec46c71d1272089eff31898 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); +struct TORCH_API structured_upsample_trilinear3d_out_cpu : public at::meta::structured_upsample_trilinear3d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_trilinear3d_out_cuda : public at::meta::structured_upsample_trilinear3d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a03452aa9fd2a975250c202e431fe0f21012f286 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor view(const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size); + +} // namespace meta +} // namespace at