diff --git a/.gitattributes b/.gitattributes index cd7a9df34ec20ec7b232940d0627230fc40ab7de..a87951f9c098fd038f29d563f42f0834132760c5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -403,3 +403,9 @@ llava/lib/libquadmath.so filter=lfs diff=lfs merge=lfs -text llava/lib/liblzma.so.5 filter=lfs diff=lfs merge=lfs -text llava/lib/libncurses.so.6 filter=lfs diff=lfs merge=lfs -text llava/lib/libtsan.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +llava/lib/libncurses.so filter=lfs diff=lfs merge=lfs -text +llava/lib/libtinfo.so.6 filter=lfs diff=lfs merge=lfs -text +llava/lib/libbz2.so filter=lfs diff=lfs merge=lfs -text +llava/lib/libasan.so.6.0.0 filter=lfs diff=lfs merge=lfs -text +llava/lib/libbz2.so.1.0.8 filter=lfs diff=lfs merge=lfs -text +llava/lib/libssl.so.3 filter=lfs diff=lfs merge=lfs -text diff --git a/llava/lib/libasan.so.6.0.0 b/llava/lib/libasan.so.6.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..42c89955cc0ed8b63e06a4d9af4f79d28580aa8c --- /dev/null +++ b/llava/lib/libasan.so.6.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a8a7995a4d84a8817af8d1604bef621e99d0622df4eda14f6fe5245735a952e +size 7575272 diff --git a/llava/lib/libbz2.so b/llava/lib/libbz2.so new file mode 100644 index 0000000000000000000000000000000000000000..7e057fb75b1b533e33984742a4a02254948e177f --- /dev/null +++ b/llava/lib/libbz2.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4979469ae49ac144f62202f75bbdd69b17197aedb879d633337c8cf7e4aba301 +size 229016 diff --git a/llava/lib/libbz2.so.1.0.8 b/llava/lib/libbz2.so.1.0.8 new file mode 100644 index 0000000000000000000000000000000000000000..7e057fb75b1b533e33984742a4a02254948e177f --- /dev/null +++ b/llava/lib/libbz2.so.1.0.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4979469ae49ac144f62202f75bbdd69b17197aedb879d633337c8cf7e4aba301 +size 229016 diff --git a/llava/lib/libncurses.so b/llava/lib/libncurses.so new file mode 100644 index 0000000000000000000000000000000000000000..0745e17b73f2d5570bc5f8868bab0381bd7da794 --- /dev/null +++ b/llava/lib/libncurses.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fa4e5e93804d78660b0eef727cdb4211209e1742e4ad3669348022668d90962 +size 271304 diff --git a/llava/lib/libssl.so.3 b/llava/lib/libssl.so.3 new file mode 100644 index 0000000000000000000000000000000000000000..53b6e92c255c102b29b845c3cc906034d8461529 --- /dev/null +++ b/llava/lib/libssl.so.3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3480c91df4e0c1a33514955568641405e37924f680e8ba42f494a209640516c6 +size 775712 diff --git a/llava/lib/libtinfo.so.6 b/llava/lib/libtinfo.so.6 new file mode 100644 index 0000000000000000000000000000000000000000..a645f87a3ef9feaf2f53a1911825fe742fb52521 --- /dev/null +++ b/llava/lib/libtinfo.so.6 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5679c9d7cc0ec2d6b08c6058198667efe71f657e89dcc0bd7adcf5d6cbc80c5 +size 287080 diff --git a/llava/lib/tcl8.6/msgs/af_za.msg b/llava/lib/tcl8.6/msgs/af_za.msg new file mode 100644 index 0000000000000000000000000000000000000000..fef48ad48fe899ab4bbc86f28edcf11edb66c5d8 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/af_za.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset af_ZA DATE_FORMAT "%d %B %Y" + ::msgcat::mcset af_ZA TIME_FORMAT_12 "%l:%M:%S %P" + ::msgcat::mcset af_ZA DATE_TIME_FORMAT "%d %B %Y %l:%M:%S %P %z" +} diff --git a/llava/lib/tcl8.6/msgs/ar_sy.msg b/llava/lib/tcl8.6/msgs/ar_sy.msg new file mode 100644 index 0000000000000000000000000000000000000000..d5e1c873a1c2b7790d7d539cbb59a3d2e5ae02dd --- /dev/null +++ b/llava/lib/tcl8.6/msgs/ar_sy.msg @@ -0,0 +1,39 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ar_SY DAYS_OF_WEEK_ABBREV [list \ + "\u0627\u0644\u0623\u062d\u062f"\ + "\u0627\u0644\u0627\u062b\u0646\u064a\u0646"\ + "\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621"\ + "\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621"\ + "\u0627\u0644\u062e\u0645\u064a\u0633"\ + "\u0627\u0644\u062c\u0645\u0639\u0629"\ + "\u0627\u0644\u0633\u0628\u062a"] + ::msgcat::mcset ar_SY MONTHS_ABBREV [list \ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0634\u0628\u0627\u0637"\ + "\u0622\u0630\u0627\u0631"\ + "\u0646\u064a\u0633\u0627\u0646"\ + "\u0646\u0648\u0627\u0631"\ + "\u062d\u0632\u064a\u0631\u0627\u0646"\ + "\u062a\u0645\u0648\u0632"\ + "\u0622\u0628"\ + "\u0623\u064a\u0644\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\ + ""] + ::msgcat::mcset ar_SY MONTHS_FULL [list \ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0634\u0628\u0627\u0637"\ + "\u0622\u0630\u0627\u0631"\ + "\u0646\u064a\u0633\u0627\u0646"\ + "\u0646\u0648\u0627\u0631\u0627\u0646"\ + "\u062d\u0632\u064a\u0631"\ + "\u062a\u0645\u0648\u0632"\ + "\u0622\u0628"\ + "\u0623\u064a\u0644\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u0623\u0648\u0644"\ + "\u062a\u0634\u0631\u064a\u0646 \u0627\u0644\u062b\u0627\u0646\u064a"\ + "\u0643\u0627\u0646\u0648\u0646 \u0627\u0644\u0623\u0648\u0644"\ + ""] +} diff --git a/llava/lib/tcl8.6/msgs/en_nz.msg b/llava/lib/tcl8.6/msgs/en_nz.msg new file mode 100644 index 0000000000000000000000000000000000000000..b419017a91d9e3db7435c34a68bd54a224454bf9 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/en_nz.msg @@ -0,0 +1,7 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset en_NZ DATE_FORMAT "%e/%m/%Y" + ::msgcat::mcset en_NZ TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset en_NZ TIME_FORMAT_12 "%I:%M:%S %P %z" + ::msgcat::mcset en_NZ DATE_TIME_FORMAT "%e/%m/%Y %H:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/eo.msg b/llava/lib/tcl8.6/msgs/eo.msg new file mode 100644 index 0000000000000000000000000000000000000000..1d2a24fece6b010f0020fb9b814ac9ab1ae07445 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/eo.msg @@ -0,0 +1,54 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset eo DAYS_OF_WEEK_ABBREV [list \ + "di"\ + "lu"\ + "ma"\ + "me"\ + "\u0135a"\ + "ve"\ + "sa"] + ::msgcat::mcset eo DAYS_OF_WEEK_FULL [list \ + "diman\u0109o"\ + "lundo"\ + "mardo"\ + "merkredo"\ + "\u0135a\u016ddo"\ + "vendredo"\ + "sabato"] + ::msgcat::mcset eo MONTHS_ABBREV [list \ + "jan"\ + "feb"\ + "mar"\ + "apr"\ + "maj"\ + "jun"\ + "jul"\ + "a\u016dg"\ + "sep"\ + "okt"\ + "nov"\ + "dec"\ + ""] + ::msgcat::mcset eo MONTHS_FULL [list \ + "januaro"\ + "februaro"\ + "marto"\ + "aprilo"\ + "majo"\ + "junio"\ + "julio"\ + "a\u016dgusto"\ + "septembro"\ + "oktobro"\ + "novembro"\ + "decembro"\ + ""] + ::msgcat::mcset eo BCE "aK" + ::msgcat::mcset eo CE "pK" + ::msgcat::mcset eo AM "atm" + ::msgcat::mcset eo PM "ptm" + ::msgcat::mcset eo DATE_FORMAT "%Y-%b-%d" + ::msgcat::mcset eo TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset eo DATE_TIME_FORMAT "%Y-%b-%d %H:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/es.msg b/llava/lib/tcl8.6/msgs/es.msg new file mode 100644 index 0000000000000000000000000000000000000000..a24f0a1bb2aa78a8f35360b5e0d61192485a8657 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/es.msg @@ -0,0 +1,52 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es DAYS_OF_WEEK_ABBREV [list \ + "dom"\ + "lun"\ + "mar"\ + "mi\u00e9"\ + "jue"\ + "vie"\ + "s\u00e1b"] + ::msgcat::mcset es DAYS_OF_WEEK_FULL [list \ + "domingo"\ + "lunes"\ + "martes"\ + "mi\u00e9rcoles"\ + "jueves"\ + "viernes"\ + "s\u00e1bado"] + ::msgcat::mcset es MONTHS_ABBREV [list \ + "ene"\ + "feb"\ + "mar"\ + "abr"\ + "may"\ + "jun"\ + "jul"\ + "ago"\ + "sep"\ + "oct"\ + "nov"\ + "dic"\ + ""] + ::msgcat::mcset es MONTHS_FULL [list \ + "enero"\ + "febrero"\ + "marzo"\ + "abril"\ + "mayo"\ + "junio"\ + "julio"\ + "agosto"\ + "septiembre"\ + "octubre"\ + "noviembre"\ + "diciembre"\ + ""] + ::msgcat::mcset es BCE "a.C." + ::msgcat::mcset es CE "d.C." + ::msgcat::mcset es DATE_FORMAT "%e de %B de %Y" + ::msgcat::mcset es TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset es DATE_TIME_FORMAT "%e de %B de %Y %k:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/es_gt.msg b/llava/lib/tcl8.6/msgs/es_gt.msg new file mode 100644 index 0000000000000000000000000000000000000000..ecd6faf9191c9eb348e8e620a8b264c32e1a25d8 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/es_gt.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es_GT DATE_FORMAT "%e/%m/%Y" + ::msgcat::mcset es_GT TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset es_GT DATE_TIME_FORMAT "%e/%m/%Y %I:%M:%S %P %z" +} diff --git a/llava/lib/tcl8.6/msgs/es_hn.msg b/llava/lib/tcl8.6/msgs/es_hn.msg new file mode 100644 index 0000000000000000000000000000000000000000..a758ca2b6118c92384333f7beb6513939672e81e --- /dev/null +++ b/llava/lib/tcl8.6/msgs/es_hn.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es_HN DATE_FORMAT "%m-%d-%Y" + ::msgcat::mcset es_HN TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset es_HN DATE_TIME_FORMAT "%m-%d-%Y %I:%M:%S %P %z" +} diff --git a/llava/lib/tcl8.6/msgs/es_ni.msg b/llava/lib/tcl8.6/msgs/es_ni.msg new file mode 100644 index 0000000000000000000000000000000000000000..7c394953a3d069ae78355c632ae28a25fbf111d2 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/es_ni.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es_NI DATE_FORMAT "%m-%d-%Y" + ::msgcat::mcset es_NI TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset es_NI DATE_TIME_FORMAT "%m-%d-%Y %I:%M:%S %P %z" +} diff --git a/llava/lib/tcl8.6/msgs/es_pr.msg b/llava/lib/tcl8.6/msgs/es_pr.msg new file mode 100644 index 0000000000000000000000000000000000000000..8511b126ed0be24c044315366f7c75b87aa70f36 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/es_pr.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset es_PR DATE_FORMAT "%m-%d-%Y" + ::msgcat::mcset es_PR TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset es_PR DATE_TIME_FORMAT "%m-%d-%Y %I:%M:%S %P %z" +} diff --git a/llava/lib/tcl8.6/msgs/gl.msg b/llava/lib/tcl8.6/msgs/gl.msg new file mode 100644 index 0000000000000000000000000000000000000000..4b869e8550665690bf621afdf659cd9d39e9cb2c --- /dev/null +++ b/llava/lib/tcl8.6/msgs/gl.msg @@ -0,0 +1,47 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset gl DAYS_OF_WEEK_ABBREV [list \ + "Dom"\ + "Lun"\ + "Mar"\ + "M\u00e9r"\ + "Xov"\ + "Ven"\ + "S\u00e1b"] + ::msgcat::mcset gl DAYS_OF_WEEK_FULL [list \ + "Domingo"\ + "Luns"\ + "Martes"\ + "M\u00e9rcores"\ + "Xoves"\ + "Venres"\ + "S\u00e1bado"] + ::msgcat::mcset gl MONTHS_ABBREV [list \ + "Xan"\ + "Feb"\ + "Mar"\ + "Abr"\ + "Mai"\ + "Xu\u00f1"\ + "Xul"\ + "Ago"\ + "Set"\ + "Out"\ + "Nov"\ + "Dec"\ + ""] + ::msgcat::mcset gl MONTHS_FULL [list \ + "Xaneiro"\ + "Febreiro"\ + "Marzo"\ + "Abril"\ + "Maio"\ + "Xu\u00f1o"\ + "Xullo"\ + "Agosto"\ + "Setembro"\ + "Outubro"\ + "Novembro"\ + "Decembro"\ + ""] +} diff --git a/llava/lib/tcl8.6/msgs/kok_in.msg b/llava/lib/tcl8.6/msgs/kok_in.msg new file mode 100644 index 0000000000000000000000000000000000000000..abcb1ff883699c995fc108baf1cb828f712b9032 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/kok_in.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset kok_IN DATE_FORMAT "%d %M %Y" + ::msgcat::mcset kok_IN TIME_FORMAT_12 "%I:%M:%S %P" + ::msgcat::mcset kok_IN DATE_TIME_FORMAT "%d %M %Y %I:%M:%S %P %z" +} diff --git a/llava/lib/tcl8.6/msgs/mr.msg b/llava/lib/tcl8.6/msgs/mr.msg new file mode 100644 index 0000000000000000000000000000000000000000..cea427a9065c619748b2807040a5314cc0915c1e --- /dev/null +++ b/llava/lib/tcl8.6/msgs/mr.msg @@ -0,0 +1,39 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset mr DAYS_OF_WEEK_FULL [list \ + "\u0930\u0935\u093f\u0935\u093e\u0930"\ + "\u0938\u094b\u092e\u0935\u093e\u0930"\ + "\u092e\u0902\u0917\u0933\u0935\u093e\u0930"\ + "\u092e\u0902\u0917\u0933\u0935\u093e\u0930"\ + "\u0917\u0941\u0930\u0941\u0935\u093e\u0930"\ + "\u0936\u0941\u0915\u094d\u0930\u0935\u093e\u0930"\ + "\u0936\u0928\u093f\u0935\u093e\u0930"] + ::msgcat::mcset mr MONTHS_ABBREV [list \ + "\u091c\u093e\u0928\u0947\u0935\u093e\u0930\u0940"\ + "\u092b\u0947\u092c\u0943\u0935\u093e\u0930\u0940"\ + "\u092e\u093e\u0930\u094d\u091a"\ + "\u090f\u092a\u094d\u0930\u093f\u0932"\ + "\u092e\u0947"\ + "\u091c\u0942\u0928"\ + "\u091c\u0941\u0932\u0948"\ + "\u0913\u0917\u0938\u094d\u091f"\ + "\u0938\u0947\u092a\u094d\u091f\u0947\u0902\u092c\u0930"\ + "\u0913\u0915\u094d\u091f\u094b\u092c\u0930"\ + "\u0928\u094b\u0935\u094d\u0939\u0947\u0902\u092c\u0930"\ + "\u0921\u093f\u0938\u0947\u0902\u092c\u0930"] + ::msgcat::mcset mr MONTHS_FULL [list \ + "\u091c\u093e\u0928\u0947\u0935\u093e\u0930\u0940"\ + "\u092b\u0947\u092c\u0943\u0935\u093e\u0930\u0940"\ + "\u092e\u093e\u0930\u094d\u091a"\ + "\u090f\u092a\u094d\u0930\u093f\u0932"\ + "\u092e\u0947"\ + "\u091c\u0942\u0928"\ + "\u091c\u0941\u0932\u0948"\ + "\u0913\u0917\u0938\u094d\u091f"\ + "\u0938\u0947\u092a\u094d\u091f\u0947\u0902\u092c\u0930"\ + "\u0913\u0915\u094d\u091f\u094b\u092c\u0930"\ + "\u0928\u094b\u0935\u094d\u0939\u0947\u0902\u092c\u0930"\ + "\u0921\u093f\u0938\u0947\u0902\u092c\u0930"] + ::msgcat::mcset mr AM "BC" + ::msgcat::mcset mr PM "AD" +} diff --git a/llava/lib/tcl8.6/msgs/nl.msg b/llava/lib/tcl8.6/msgs/nl.msg new file mode 100644 index 0000000000000000000000000000000000000000..4c5c675518623c91e6883bcbe78fade21d5482ab --- /dev/null +++ b/llava/lib/tcl8.6/msgs/nl.msg @@ -0,0 +1,50 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset nl DAYS_OF_WEEK_ABBREV [list \ + "zo"\ + "ma"\ + "di"\ + "wo"\ + "do"\ + "vr"\ + "za"] + ::msgcat::mcset nl DAYS_OF_WEEK_FULL [list \ + "zondag"\ + "maandag"\ + "dinsdag"\ + "woensdag"\ + "donderdag"\ + "vrijdag"\ + "zaterdag"] + ::msgcat::mcset nl MONTHS_ABBREV [list \ + "jan"\ + "feb"\ + "mrt"\ + "apr"\ + "mei"\ + "jun"\ + "jul"\ + "aug"\ + "sep"\ + "okt"\ + "nov"\ + "dec"\ + ""] + ::msgcat::mcset nl MONTHS_FULL [list \ + "januari"\ + "februari"\ + "maart"\ + "april"\ + "mei"\ + "juni"\ + "juli"\ + "augustus"\ + "september"\ + "oktober"\ + "november"\ + "december"\ + ""] + ::msgcat::mcset nl DATE_FORMAT "%e %B %Y" + ::msgcat::mcset nl TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset nl DATE_TIME_FORMAT "%e %B %Y %k:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/nl_be.msg b/llava/lib/tcl8.6/msgs/nl_be.msg new file mode 100644 index 0000000000000000000000000000000000000000..4b19670fc6dc2c3a4412fa8f49b2de77347b5cc3 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/nl_be.msg @@ -0,0 +1,7 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset nl_BE DATE_FORMAT "%d-%m-%y" + ::msgcat::mcset nl_BE TIME_FORMAT "%T" + ::msgcat::mcset nl_BE TIME_FORMAT_12 "%T" + ::msgcat::mcset nl_BE DATE_TIME_FORMAT "%a %d %b %Y %T %z" +} diff --git a/llava/lib/tcl8.6/msgs/nn.msg b/llava/lib/tcl8.6/msgs/nn.msg new file mode 100644 index 0000000000000000000000000000000000000000..bd61ac9493955b92806bfa55c43c769d5c522cb4 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/nn.msg @@ -0,0 +1,52 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset nn DAYS_OF_WEEK_ABBREV [list \ + "su"\ + "m\u00e5"\ + "ty"\ + "on"\ + "to"\ + "fr"\ + "lau"] + ::msgcat::mcset nn DAYS_OF_WEEK_FULL [list \ + "sundag"\ + "m\u00e5ndag"\ + "tysdag"\ + "onsdag"\ + "torsdag"\ + "fredag"\ + "laurdag"] + ::msgcat::mcset nn MONTHS_ABBREV [list \ + "jan"\ + "feb"\ + "mar"\ + "apr"\ + "mai"\ + "jun"\ + "jul"\ + "aug"\ + "sep"\ + "okt"\ + "nov"\ + "des"\ + ""] + ::msgcat::mcset nn MONTHS_FULL [list \ + "januar"\ + "februar"\ + "mars"\ + "april"\ + "mai"\ + "juni"\ + "juli"\ + "august"\ + "september"\ + "oktober"\ + "november"\ + "desember"\ + ""] + ::msgcat::mcset nn BCE "f.Kr." + ::msgcat::mcset nn CE "e.Kr." + ::msgcat::mcset nn DATE_FORMAT "%e. %B %Y" + ::msgcat::mcset nn TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset nn DATE_TIME_FORMAT "%e. %B %Y %H:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/pt.msg b/llava/lib/tcl8.6/msgs/pt.msg new file mode 100644 index 0000000000000000000000000000000000000000..96fdb35a682eae0a5344a20f31a5d3ed6e1f9207 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/pt.msg @@ -0,0 +1,50 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset pt DAYS_OF_WEEK_ABBREV [list \ + "Dom"\ + "Seg"\ + "Ter"\ + "Qua"\ + "Qui"\ + "Sex"\ + "S\u00e1b"] + ::msgcat::mcset pt DAYS_OF_WEEK_FULL [list \ + "Domingo"\ + "Segunda-feira"\ + "Ter\u00e7a-feira"\ + "Quarta-feira"\ + "Quinta-feira"\ + "Sexta-feira"\ + "S\u00e1bado"] + ::msgcat::mcset pt MONTHS_ABBREV [list \ + "Jan"\ + "Fev"\ + "Mar"\ + "Abr"\ + "Mai"\ + "Jun"\ + "Jul"\ + "Ago"\ + "Set"\ + "Out"\ + "Nov"\ + "Dez"\ + ""] + ::msgcat::mcset pt MONTHS_FULL [list \ + "Janeiro"\ + "Fevereiro"\ + "Mar\u00e7o"\ + "Abril"\ + "Maio"\ + "Junho"\ + "Julho"\ + "Agosto"\ + "Setembro"\ + "Outubro"\ + "Novembro"\ + "Dezembro"\ + ""] + ::msgcat::mcset pt DATE_FORMAT "%d-%m-%Y" + ::msgcat::mcset pt TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset pt DATE_TIME_FORMAT "%d-%m-%Y %k:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/ro.msg b/llava/lib/tcl8.6/msgs/ro.msg new file mode 100644 index 0000000000000000000000000000000000000000..bdd7c6197222ebe646efabcaf0ec00ca50348aa7 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/ro.msg @@ -0,0 +1,52 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ro DAYS_OF_WEEK_ABBREV [list \ + "D"\ + "L"\ + "Ma"\ + "Mi"\ + "J"\ + "V"\ + "S"] + ::msgcat::mcset ro DAYS_OF_WEEK_FULL [list \ + "duminic\u0103"\ + "luni"\ + "mar\u0163i"\ + "miercuri"\ + "joi"\ + "vineri"\ + "s\u00eemb\u0103t\u0103"] + ::msgcat::mcset ro MONTHS_ABBREV [list \ + "Ian"\ + "Feb"\ + "Mar"\ + "Apr"\ + "Mai"\ + "Iun"\ + "Iul"\ + "Aug"\ + "Sep"\ + "Oct"\ + "Nov"\ + "Dec"\ + ""] + ::msgcat::mcset ro MONTHS_FULL [list \ + "ianuarie"\ + "februarie"\ + "martie"\ + "aprilie"\ + "mai"\ + "iunie"\ + "iulie"\ + "august"\ + "septembrie"\ + "octombrie"\ + "noiembrie"\ + "decembrie"\ + ""] + ::msgcat::mcset ro BCE "d.C." + ::msgcat::mcset ro CE "\u00ee.d.C." + ::msgcat::mcset ro DATE_FORMAT "%d.%m.%Y" + ::msgcat::mcset ro TIME_FORMAT "%H:%M:%S" + ::msgcat::mcset ro DATE_TIME_FORMAT "%d.%m.%Y %H:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/ru_ua.msg b/llava/lib/tcl8.6/msgs/ru_ua.msg new file mode 100644 index 0000000000000000000000000000000000000000..6e1f8a86e8dea968b603d101395847edb7bb62ff --- /dev/null +++ b/llava/lib/tcl8.6/msgs/ru_ua.msg @@ -0,0 +1,6 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset ru_UA DATE_FORMAT "%d.%m.%Y" + ::msgcat::mcset ru_UA TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset ru_UA DATE_TIME_FORMAT "%d.%m.%Y %k:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/sw.msg b/llava/lib/tcl8.6/msgs/sw.msg new file mode 100644 index 0000000000000000000000000000000000000000..b888b43df7009891dd34872cdce6002b961287a9 --- /dev/null +++ b/llava/lib/tcl8.6/msgs/sw.msg @@ -0,0 +1,49 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset sw DAYS_OF_WEEK_ABBREV [list \ + "Jpi"\ + "Jtt"\ + "Jnn"\ + "Jtn"\ + "Alh"\ + "Iju"\ + "Jmo"] + ::msgcat::mcset sw DAYS_OF_WEEK_FULL [list \ + "Jumapili"\ + "Jumatatu"\ + "Jumanne"\ + "Jumatano"\ + "Alhamisi"\ + "Ijumaa"\ + "Jumamosi"] + ::msgcat::mcset sw MONTHS_ABBREV [list \ + "Jan"\ + "Feb"\ + "Mar"\ + "Apr"\ + "Mei"\ + "Jun"\ + "Jul"\ + "Ago"\ + "Sep"\ + "Okt"\ + "Nov"\ + "Des"\ + ""] + ::msgcat::mcset sw MONTHS_FULL [list \ + "Januari"\ + "Februari"\ + "Machi"\ + "Aprili"\ + "Mei"\ + "Juni"\ + "Julai"\ + "Agosti"\ + "Septemba"\ + "Oktoba"\ + "Novemba"\ + "Desemba"\ + ""] + ::msgcat::mcset sw BCE "KK" + ::msgcat::mcset sw CE "BK" +} diff --git a/llava/lib/tcl8.6/msgs/th.msg b/llava/lib/tcl8.6/msgs/th.msg new file mode 100644 index 0000000000000000000000000000000000000000..7486c35a68f7a5abbb2e085cbf4a5285dc3c612b --- /dev/null +++ b/llava/lib/tcl8.6/msgs/th.msg @@ -0,0 +1,54 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset th DAYS_OF_WEEK_ABBREV [list \ + "\u0e2d\u0e32."\ + "\u0e08."\ + "\u0e2d."\ + "\u0e1e."\ + "\u0e1e\u0e24."\ + "\u0e28."\ + "\u0e2a."] + ::msgcat::mcset th DAYS_OF_WEEK_FULL [list \ + "\u0e27\u0e31\u0e19\u0e2d\u0e32\u0e17\u0e34\u0e15\u0e22\u0e4c"\ + "\u0e27\u0e31\u0e19\u0e08\u0e31\u0e19\u0e17\u0e23\u0e4c"\ + "\u0e27\u0e31\u0e19\u0e2d\u0e31\u0e07\u0e04\u0e32\u0e23"\ + "\u0e27\u0e31\u0e19\u0e1e\u0e38\u0e18"\ + "\u0e27\u0e31\u0e19\u0e1e\u0e24\u0e2b\u0e31\u0e2a\u0e1a\u0e14\u0e35"\ + "\u0e27\u0e31\u0e19\u0e28\u0e38\u0e01\u0e23\u0e4c"\ + "\u0e27\u0e31\u0e19\u0e40\u0e2a\u0e32\u0e23\u0e4c"] + ::msgcat::mcset th MONTHS_ABBREV [list \ + "\u0e21.\u0e04."\ + "\u0e01.\u0e1e."\ + "\u0e21\u0e35.\u0e04."\ + "\u0e40\u0e21.\u0e22."\ + "\u0e1e.\u0e04."\ + "\u0e21\u0e34.\u0e22."\ + "\u0e01.\u0e04."\ + "\u0e2a.\u0e04."\ + "\u0e01.\u0e22."\ + "\u0e15.\u0e04."\ + "\u0e1e.\u0e22."\ + "\u0e18.\u0e04."\ + ""] + ::msgcat::mcset th MONTHS_FULL [list \ + "\u0e21\u0e01\u0e23\u0e32\u0e04\u0e21"\ + "\u0e01\u0e38\u0e21\u0e20\u0e32\u0e1e\u0e31\u0e19\u0e18\u0e4c"\ + "\u0e21\u0e35\u0e19\u0e32\u0e04\u0e21"\ + "\u0e40\u0e21\u0e29\u0e32\u0e22\u0e19"\ + "\u0e1e\u0e24\u0e29\u0e20\u0e32\u0e04\u0e21"\ + "\u0e21\u0e34\u0e16\u0e38\u0e19\u0e32\u0e22\u0e19"\ + "\u0e01\u0e23\u0e01\u0e0e\u0e32\u0e04\u0e21"\ + "\u0e2a\u0e34\u0e07\u0e2b\u0e32\u0e04\u0e21"\ + "\u0e01\u0e31\u0e19\u0e22\u0e32\u0e22\u0e19"\ + "\u0e15\u0e38\u0e25\u0e32\u0e04\u0e21"\ + "\u0e1e\u0e24\u0e28\u0e08\u0e34\u0e01\u0e32\u0e22\u0e19"\ + "\u0e18\u0e31\u0e19\u0e27\u0e32\u0e04\u0e21"\ + ""] + ::msgcat::mcset th BCE "\u0e25\u0e17\u0e35\u0e48" + ::msgcat::mcset th CE "\u0e04.\u0e28." + ::msgcat::mcset th AM "\u0e01\u0e48\u0e2d\u0e19\u0e40\u0e17\u0e35\u0e48\u0e22\u0e07" + ::msgcat::mcset th PM "\u0e2b\u0e25\u0e31\u0e07\u0e40\u0e17\u0e35\u0e48\u0e22\u0e07" + ::msgcat::mcset th DATE_FORMAT "%e/%m/%Y" + ::msgcat::mcset th TIME_FORMAT "%k:%M:%S" + ::msgcat::mcset th DATE_TIME_FORMAT "%e/%m/%Y %k:%M:%S %z" +} diff --git a/llava/lib/tcl8.6/msgs/zh_sg.msg b/llava/lib/tcl8.6/msgs/zh_sg.msg new file mode 100644 index 0000000000000000000000000000000000000000..a2f3e39d8d21ea13661da244af688157bd46773b --- /dev/null +++ b/llava/lib/tcl8.6/msgs/zh_sg.msg @@ -0,0 +1,8 @@ +# created by tools/loadICU.tcl -- do not edit +namespace eval ::tcl::clock { + ::msgcat::mcset zh_SG AM "\u4e0a\u5348" + ::msgcat::mcset zh_SG PM "\u4e2d\u5348" + ::msgcat::mcset zh_SG DATE_FORMAT "%d %B %Y" + ::msgcat::mcset zh_SG TIME_FORMAT_12 "%P %I:%M:%S" + ::msgcat::mcset zh_SG DATE_TIME_FORMAT "%d %B %Y %P %I:%M:%S %z" +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..384dae22eb53078ed373c233ab43020bbe0c45ba --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_floor(at::TensorList self); +TORCH_API void _foreach_floor_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2.h new file mode 100644 index 0000000000000000000000000000000000000000..113685a9a88a01206dae1adbc0ffbb3669fb2466 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_log2(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_log2(at::TensorList self) { + return at::_ops::_foreach_log2::call(self); +} + +// aten::_foreach_log2_(Tensor(a!)[] self) -> () +inline void _foreach_log2_(at::TensorList self) { + return at::_ops::_foreach_log2_::call(self); +} + +// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log2_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log2_out::call(self, out); +} +// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log2_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log2_out::call(self, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2712134847d4fea3c58c91f02a0251ba627f2eb5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_neg(at::TensorList self); +TORCH_API void _foreach_neg_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_lazy_clone_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_lazy_clone_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..14155c8bdbb936089ed9586e3e0522c391aa7742 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_lazy_clone_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _lazy_clone { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_lazy_clone") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_lazy_clone(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..2fe7899e9c3eda6364445910ee83c82e9b65678c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor +inline at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride)); +} +namespace symint { + template ::value>> + at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride)); + } +} + +// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor +inline at::Tensor _nnpack_spatial_convolution_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride); +} +namespace symint { + template ::value>> + at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride); + } +} + +// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); +} +namespace symint { + template ::value>> + at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); + } +} + +// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); +} +namespace symint { + template ::value>> + at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); + } +} + +// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nnpack_spatial_convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out); + } +} + +// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nnpack_spatial_convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out); + } +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..15d52270ab5230bf98bdcb40fd6b455c2123d0b7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight); +TORCH_API ::std::tuple mkldnn_prelu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1214f002356feb5fcb70bd28695233f101c61a9a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _sparse_semi_structured_apply(const at::Tensor & input, const at::Tensor & thread_masks); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..05b1d3e81464e4b31846e5694dcd1fee210069ee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _unsafe_index_put(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a8cd55769ff1614954dfaa626faee834f196ae9d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_meta.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1d1edd91e651598192ea55b2eac0ff071f0f4664 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__upsample_bilinear2d_aa : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..623d81a9c4b678943c071f52545cd263740a62d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out); +} +namespace symint { + template ::value>> + at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out); + } +} + +// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out); +} +namespace symint { + template ::value>> + at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out); + } +} + +// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out); +} +namespace symint { + template ::value>> + at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out); + } +} + +// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out); +} +namespace symint { + template ::value>> + at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out); + } +} + +// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor +inline at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d::call(self, c10::fromIntArrayRefSlow(output_size)); +} +namespace symint { + template ::value>> + at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d::call(self, c10::fromIntArrayRefSlow(output_size)); + } +} + +// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor +inline at::Tensor adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d::call(self, output_size); +} +namespace symint { + template ::value>> + at::Tensor adaptive_avg_pool3d(const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d::call(self, output_size); + } +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..f573dc40819b67370a65837dc77b3eb9bb60327f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_adaptive_max_pool2d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +}; + +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2a1fb871bdae62272ac22c2fd7e9b4e0216a1ce --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor atan2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & atan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & atan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & atan2_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fd0cc8a2480b0ad6e9bbeab6b17080081b8568ad --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple batch_norm_gather_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps, int64_t count); +TORCH_API ::std::tuple batch_norm_gather_stats_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..31255084d393c2f3cc62f507ab3beaa976b11823 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple batch_norm_gather_stats_with_counts_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps, const at::Tensor & counts); +TORCH_API ::std::tuple batch_norm_gather_stats_with_counts_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/complex_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/complex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3dcf615a42e94eefcc2e849806fbaa9f6a092674 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/complex_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & complex_out(at::Tensor & out, const at::Tensor & real, const at::Tensor & imag); +TORCH_API at::Tensor & complex_outf(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/conv1d_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/conv1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ff81e006bed7971f2f149672db19fefa3a3351cd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/conv1d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API conv1d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); +}; + +struct TORCH_API conv1d_padding { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::string_view, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "padding") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding=\"valid\", SymInt[1] dilation=1, SymInt groups=1) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3f7144b52f5240c70a0041c2de941099c7e31bf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cudnn_grid_sampler { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_grid_sampler") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output") + static at::Tensor call(const at::Tensor & self, const at::Tensor & grid); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid); +}; + +struct TORCH_API cudnn_grid_sampler_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_grid_sampler") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cummax.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cummax.h new file mode 100644 index 0000000000000000000000000000000000000000..f9bd56b6cd9d4523f32b0cdb8687a66f1718f925 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cummax.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) +inline ::std::tuple cummax(const at::Tensor & self, int64_t dim) { + return at::_ops::cummax::call(self, dim); +} + +// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) { + return at::_ops::cummax_out::call(self, dim, values, indices); +} +// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple cummax_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummax_out::call(self, dim, values, indices); +} + +// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) +inline ::std::tuple cummax(const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummax_dimname::call(self, dim); +} + +// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummax_dimname_out::call(self, dim, values, indices); +} +// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple cummax_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummax_dimname_out::call(self, dim, values, indices); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e6d00046cb5dbbeb2863e1157838158c927be8a2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor diagonal(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1); +TORCH_API at::Tensor diagonal(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diff_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diff_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..220aee166a3e5ad80732db4e1c2f5c81d029f1f3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/diff_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor diff(const at::Tensor & self, int64_t n=1, int64_t dim=-1, const ::std::optional & prepend={}, const ::std::optional & append={}); +TORCH_API at::Tensor & diff_out(at::Tensor & out, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const ::std::optional & prepend={}, const ::std::optional & append={}); +TORCH_API at::Tensor & diff_outf(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional & prepend, const ::std::optional & append, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..74c32d54f7191b346ecc466c2df3bcc4ffa24fbd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor +inline at::Tensor embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); +} +namespace symint { + template ::value>> + at::Tensor embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); + } +} + +// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor +inline at::Tensor embedding_dense_backward_symint(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); +} +namespace symint { + template ::value>> + at::Tensor embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); + } +} + +// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); +} +namespace symint { + template ::value>> + at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } +} + +// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out) { + return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); +} +namespace symint { + template ::value>> + at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out) { + return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } +} + +// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & embedding_dense_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); +} +namespace symint { + template ::value>> + at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } +} + +// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & embedding_dense_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) { + return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); +} +namespace symint { + template ::value>> + at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) { + return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2cfff5b2114125e4e03c36fa00e1ee9c2fa8fb92 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_hfft_symint(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_hfft_symint_out(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gradient_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gradient_native.h new file mode 100644 index 0000000000000000000000000000000000000000..03a9aba86ab37370d8c14281d4b8f2228725a723 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gradient_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector gradient(const at::Tensor & self, const ::std::optional & spacing=::std::nullopt, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a07f99cb9739013998d3b32bd32c8a6429375006 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gt_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b61d0dabb303f36984bf3d908be456045bbc58e8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gt_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gt_Scalar_out : public at::meta::structured_gt_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor gt_scalar_nested(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_gt_Tensor_out : public at::meta::structured_gt_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor gt_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..5c04c55c5bacd39990913282d0802d5fbb74b7a1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss_out::call(self, target, reduction, delta, out); +} +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) { + return at::_ops::huber_loss_out::call(self, target, reduction, delta, out); +} + +// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor +inline at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss::call(self, target, reduction, delta); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/i0_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/i0_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7f9929032fe967c064202a4144ac48c3a0183a82 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/i0_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_i0_out : public at::meta::structured_i0 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b20953abe6a665d9f40e59ac413e263397901d89 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_conj { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_conj") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_conj(Tensor self) -> bool") + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/item_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/item_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cc303f80abf93db76ceb54650a3df3c5ca35d36f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/item_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API item { + using schema = at::Scalar (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::item") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "item(Tensor self) -> Scalar") + static at::Scalar call(const at::Tensor & self); + static at::Scalar redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea400739c8c2719d460d68ff6b6cf89b8903d38 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_beta_out(int64_t window_length, bool periodic, double beta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5ed1e3b1af506d4148892e6751fc77b8b422e07e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kaiser_window { + using schema = at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_periodic { + using schema = at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_beta { + using schema = at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "beta") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API kaiser_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +struct TORCH_API kaiser_window_beta_out { + using schema = at::Tensor & (int64_t, bool, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::kaiser_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "beta_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t window_length, bool periodic, double beta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e930571b7dc3a059f8834acbaa37da40558408f9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_not_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & logical_not_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/max.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/max.h new file mode 100644 index 0000000000000000000000000000000000000000..85377e361c3391f3a0516123b68cf9d800d2ddae --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/max.h @@ -0,0 +1,81 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::max_dim::call(self, dim, keepdim); +} + +// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::max_dim_max::call(self, dim, keepdim, max, max_values); +} +// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { + return at::_ops::max_dim_max::call(self, dim, keepdim, max, max_values); +} + +// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple max(const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::max_names_dim::call(self, dim, keepdim); +} + +// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::max_names_dim_max::call(self, dim, keepdim, max, max_values); +} +// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple max_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { + return at::_ops::max_names_dim_max::call(self, dim, keepdim, max, max_values); +} + +// aten::max(Tensor self) -> Tensor +inline at::Tensor max(const at::Tensor & self) { + return at::_ops::max::call(self); +} + +// aten::max.other(Tensor self, Tensor other) -> Tensor +inline at::Tensor max(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::max_other::call(self, other); +} + +// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::max_out::call(self, other, out); +} +// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::max_out::call(self, other, out); +} + +// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::max_unary_out::call(self, out); +} +// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::max_unary_out::call(self, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..181d07fe0b6c14146025bc53aba090d3862a4cbb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor max_unpool2d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & max_unpool2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cdede67d2858e059b13ed12fe73f189574ca8544 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & mkldnn_reorder_conv3d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=::std::nullopt); +TORCH_API at::Tensor & mkldnn_reorder_conv3d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out); +TORCH_API at::Tensor & mkldnn_reorder_conv3d_weight_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=::std::nullopt); +TORCH_API at::Tensor & mkldnn_reorder_conv3d_weight_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..56c34fcc0b682c45a6aee98e17ac9cb3241c845a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, ::std::optional nan=::std::nullopt, ::std::optional posinf=::std::nullopt, ::std::optional neginf=::std::nullopt); +TORCH_API at::Tensor & nan_to_num_outf(const at::Tensor & self, ::std::optional nan, ::std::optional posinf, ::std::optional neginf, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2f20a125e515571825eed762228dcbf7f360760d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nll_loss_symint(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100); +TORCH_API at::Tensor & nll_loss_out(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f158d2c205a3b31eae235fdc19c79c08110fbcd2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor poisson(const at::Tensor & self, ::std::optional generator=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/prelu.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/prelu.h new file mode 100644 index 0000000000000000000000000000000000000000..80ffe1b58315ded3c6472196d849f045a9977fc2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/prelu.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::prelu(Tensor self, Tensor weight) -> Tensor +inline at::Tensor prelu(const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::prelu::call(self, weight); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/promote_types_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/promote_types_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a499c5e891e9b4da722a4a75047b11846bbac773 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/promote_types_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API promote_types { + using schema = at::ScalarType (at::ScalarType, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::promote_types") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "promote_types(ScalarType type1, ScalarType type2) -> ScalarType") + static at::ScalarType call(at::ScalarType type1, at::ScalarType type2); + static at::ScalarType redispatch(c10::DispatchKeySet dispatchKeySet, at::ScalarType type1, at::ScalarType type2); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..92ad0d09e7f19fdf64505c8560c475b5fd9dd469 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rand_like { + using schema = at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); +}; + +struct TORCH_API rand_like_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, ::std::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional memory_format, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1b1d2310a47ed5d3fedacc15a24e772f0c8835d3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API row_indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::row_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "row_indices(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e021d72923e3f2d6510dfd2c38f6bb3b66ab42f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor sgn(const at::Tensor & self); +TORCH_API at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sgn_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..fc5fdfa8ff84b659ac6c895274c7439a0e2da020 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + return at::_ops::softplus_backward_grad_input::call(grad_output, self, beta, threshold, grad_input); +} +// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & softplus_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) { + return at::_ops::softplus_backward_grad_input::call(grad_output, self, beta, threshold, grad_input); +} + +// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor +inline at::Tensor softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + return at::_ops::softplus_backward::call(grad_output, self, beta, threshold); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..68608a4f0bb0e10d0c07df24a648eeefec35b694 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammainc.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammainc.h new file mode 100644 index 0000000000000000000000000000000000000000..03c4847a493335b071a5a5462a1ebeb8c5656a2f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammainc.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_gammainc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammainc_out::call(self, other, out); +} +// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_gammainc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_gammainc_out::call(self, other, out); +} + +// aten::special_gammainc(Tensor self, Tensor other) -> Tensor +inline at::Tensor special_gammainc(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammainc::call(self, other); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..75c1ea9d5742492d1c1f756716c5168a1c0331bc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_scaled_modified_bessel_k1(const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k1_out(at::Tensor & out, const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k1_outf(const at::Tensor & x, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5a449f4425c7e5aeac0079f2d89b8e0f6c34596e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor sum(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & sum_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b84dd7d62e6b8f00b45c5de9dc37b1906d78da79 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor tan(const at::Tensor & self); +TORCH_API at::Tensor & tan_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bd7d7ce7f12b7e0d88ebd15d3c00e5a58f0f4ec9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API unique_dim { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts); +}; + +struct TORCH_API unique_dim_out { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f16695daa4540446ce047785e8c9165092a77a65 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_trilinear3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +}; + +struct TORCH_API upsample_trilinear3d_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/var.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/var.h new file mode 100644 index 0000000000000000000000000000000000000000..b7107890639afe52fa415df8c2ff692c9deae21f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/var.h @@ -0,0 +1,86 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::var(Tensor self, bool unbiased=True) -> Tensor +inline at::Tensor var(const at::Tensor & self, bool unbiased) { + return at::_ops::var::call(self, unbiased); +} + +// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_dim::call(self, dim, unbiased, keepdim); +} + +// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, const ::std::optional & correction=::std::nullopt, bool keepdim=false) { + return at::_ops::var_correction::call(self, dim, correction, keepdim); +} + +// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_out::call(self, dim, unbiased, keepdim, out); +} +// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::var_out::call(self, dim, unbiased, keepdim, out); +} + +// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, const ::std::optional & correction=::std::nullopt, bool keepdim=false) { + return at::_ops::var_correction_out::call(self, dim, correction, keepdim, out); +} +// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::var_correction_out::call(self, dim, correction, keepdim, out); +} + +// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor var(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim); +} + +// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::var_names_out::call(self, dim, unbiased, keepdim, out); +} +// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & var_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::var_names_out::call(self, dim, unbiased, keepdim, out); +} + +// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor var(const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction=::std::nullopt, bool keepdim=false) { + return at::_ops::var_correction_names::call(self, dim, correction, keepdim); +} + +// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction=::std::nullopt, bool keepdim=false) { + return at::_ops::var_correction_names_out::call(self, dim, correction, keepdim, out); +} +// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & var_outf(const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::var_correction_names_out::call(self, dim, correction, keepdim, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bf7b6a464aa6c2e9842baa1d1e0459f7f73ee90b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor vstack(at::TensorList tensors); +TORCH_API at::Tensor & vstack_out(at::TensorList tensors, at::Tensor & out); +} // namespace native +} // namespace at