diff --git a/.gitattributes b/.gitattributes index 9f2b0440a4d453b321eabbd14718de29138d5248..bb1f15fbdaa5badcce75a1d7de973ba2a1a174de 100644 --- a/.gitattributes +++ b/.gitattributes @@ -450,3 +450,6 @@ llava/lib/python3.10/lib-dynload/_xxsubinterpreters.cpython-310-x86_64-linux-gnu llava/lib/python3.10/lib-dynload/_socket.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llava/lib/python3.10/lib-dynload/_lzma.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llava/lib/python3.10/lib-dynload/_codecs_tw.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llava/lib/python3.10/lib-dynload/_testclinic.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llava/lib/python3.10/__pycache__/turtle.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +llava/lib/python3.10/__pycache__/_pydecimal.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/llava/lib/python3.10/__pycache__/__phello__.foo.cpython-310.pyc b/llava/lib/python3.10/__pycache__/__phello__.foo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc41547219099a03d90409555b823648d6fbf163 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/__phello__.foo.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/_aix_support.cpython-310.pyc b/llava/lib/python3.10/__pycache__/_aix_support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5071ae37fe5b4c28a2c1522111ade23cafd897a7 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/_aix_support.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/_py_abc.cpython-310.pyc b/llava/lib/python3.10/__pycache__/_py_abc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b7561fed40baa8dd7c22858c94e3d1fb02f45f1 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/_py_abc.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/_pydecimal.cpython-310.pyc b/llava/lib/python3.10/__pycache__/_pydecimal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ace1192327ddad1685ba30863bb9ec272962cac6 --- /dev/null +++ b/llava/lib/python3.10/__pycache__/_pydecimal.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:867ec504e3cc49724dd91bb949795ebec4dfbb3ea9fe5873b9907ac81789fba1 +size 157990 diff --git a/llava/lib/python3.10/__pycache__/argparse.cpython-310.pyc b/llava/lib/python3.10/__pycache__/argparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90915ab7b295c3baf71c398ca7a51883d0f8c0fc Binary files /dev/null and b/llava/lib/python3.10/__pycache__/argparse.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/ast.cpython-310.pyc b/llava/lib/python3.10/__pycache__/ast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..519b703e73bcee08eadde76e48bed262e7a3ee0a Binary files /dev/null and b/llava/lib/python3.10/__pycache__/ast.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/asynchat.cpython-310.pyc b/llava/lib/python3.10/__pycache__/asynchat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd9a39b83a95cb371904dbacadff1a15ed0525fb Binary files /dev/null and b/llava/lib/python3.10/__pycache__/asynchat.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/bisect.cpython-310.pyc b/llava/lib/python3.10/__pycache__/bisect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f00da0a8f1271be3d74740fdc53545b1f5d20cc Binary files /dev/null and b/llava/lib/python3.10/__pycache__/bisect.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/bz2.cpython-310.pyc b/llava/lib/python3.10/__pycache__/bz2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cbc43a1bccf76165df961ca0ff456b75e83b18a Binary files /dev/null and b/llava/lib/python3.10/__pycache__/bz2.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/cgi.cpython-310.pyc b/llava/lib/python3.10/__pycache__/cgi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7467be45d69d0191191d911cf70d70884f84c835 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/cgi.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/cmd.cpython-310.pyc b/llava/lib/python3.10/__pycache__/cmd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f69b724cf0fc39e446514e52808e7f33b6baa98f Binary files /dev/null and b/llava/lib/python3.10/__pycache__/cmd.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/code.cpython-310.pyc b/llava/lib/python3.10/__pycache__/code.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c43091f4a3031d6c9e2a286f071ebd6eb21455 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/code.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/codecs.cpython-310.pyc b/llava/lib/python3.10/__pycache__/codecs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c63bbaf372625078ce91684c7045bef6162dc470 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/codecs.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/codeop.cpython-310.pyc b/llava/lib/python3.10/__pycache__/codeop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c35c47c70013438369b09b5f5c4cdf68dc6db6e4 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/codeop.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/contextlib.cpython-310.pyc b/llava/lib/python3.10/__pycache__/contextlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b37d72580eeaf7c7ab5824e89bf8a0fc2aca2c4c Binary files /dev/null and b/llava/lib/python3.10/__pycache__/contextlib.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/crypt.cpython-310.pyc b/llava/lib/python3.10/__pycache__/crypt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12072696e8a44928afc624ef163572683ab53342 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/crypt.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/csv.cpython-310.pyc b/llava/lib/python3.10/__pycache__/csv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..093355977f142327a63294689df3339a6c1b3b60 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/csv.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/doctest.cpython-310.pyc b/llava/lib/python3.10/__pycache__/doctest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ddf1c3d714fad9814af725e15772fe63aa5eb3c Binary files /dev/null and b/llava/lib/python3.10/__pycache__/doctest.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/fileinput.cpython-310.pyc b/llava/lib/python3.10/__pycache__/fileinput.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2e1e561439796b1fc42e3c8ba4eee0f56f2c466 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/fileinput.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/fractions.cpython-310.pyc b/llava/lib/python3.10/__pycache__/fractions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0026840cba1371c989a975f0ae508ae9581bcd5f Binary files /dev/null and b/llava/lib/python3.10/__pycache__/fractions.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/gettext.cpython-310.pyc b/llava/lib/python3.10/__pycache__/gettext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c367699a6d65c7042cbd8c073fa4f4fbcd0b937 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/gettext.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/graphlib.cpython-310.pyc b/llava/lib/python3.10/__pycache__/graphlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea979e5f90481f8e435d9a95a0f174df89082aad Binary files /dev/null and b/llava/lib/python3.10/__pycache__/graphlib.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/hashlib.cpython-310.pyc b/llava/lib/python3.10/__pycache__/hashlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aac61753f05acc6b989ffb05a06d86e48495c4f Binary files /dev/null and b/llava/lib/python3.10/__pycache__/hashlib.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/imghdr.cpython-310.pyc b/llava/lib/python3.10/__pycache__/imghdr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cd50702379bd1056f491759a9e68b06c1d2806f Binary files /dev/null and b/llava/lib/python3.10/__pycache__/imghdr.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/ipaddress.cpython-310.pyc b/llava/lib/python3.10/__pycache__/ipaddress.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0ee2a650b7810e29b90acd0577648699de91735 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/ipaddress.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/linecache.cpython-310.pyc b/llava/lib/python3.10/__pycache__/linecache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..485a85890a83d7060b536f0d5b2c945075af4ba2 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/linecache.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/lzma.cpython-310.pyc b/llava/lib/python3.10/__pycache__/lzma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..481d4b2c49d990cddbd61cd1b4c15263d36eb112 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/lzma.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/mimetypes.cpython-310.pyc b/llava/lib/python3.10/__pycache__/mimetypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b038fce5da89ce361647d6fc33812d2d66b37eec Binary files /dev/null and b/llava/lib/python3.10/__pycache__/mimetypes.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/nntplib.cpython-310.pyc b/llava/lib/python3.10/__pycache__/nntplib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ea247c6430e9e23000255618887f2b60e906f45 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/nntplib.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/ntpath.cpython-310.pyc b/llava/lib/python3.10/__pycache__/ntpath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3f68650464bc52dd0318ad790d9ce65faa8e1b6 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/ntpath.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/nturl2path.cpython-310.pyc b/llava/lib/python3.10/__pycache__/nturl2path.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb7fb387c79229a1c56a6f457b7acc0cf94fd054 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/nturl2path.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/opcode.cpython-310.pyc b/llava/lib/python3.10/__pycache__/opcode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc33c4de7a3126146128c2aed4fcd85dbef43918 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/opcode.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/pkgutil.cpython-310.pyc b/llava/lib/python3.10/__pycache__/pkgutil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c5c839a322c53cfdf9c6cdf871a25f699501b9a Binary files /dev/null and b/llava/lib/python3.10/__pycache__/pkgutil.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/plistlib.cpython-310.pyc b/llava/lib/python3.10/__pycache__/plistlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c047410e6b8fb4d929ab33870938ce1fa30fce15 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/plistlib.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/poplib.cpython-310.pyc b/llava/lib/python3.10/__pycache__/poplib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adc5f9af72d9c4a169aa6180ccaab19d09cca345 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/poplib.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/posixpath.cpython-310.pyc b/llava/lib/python3.10/__pycache__/posixpath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1499e5856b35e3f98afbd31ffbab28711669e269 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/posixpath.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/pprint.cpython-310.pyc b/llava/lib/python3.10/__pycache__/pprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33fe16a4f6ea84010714d5990758b22202a7f1a9 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/pprint.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/pstats.cpython-310.pyc b/llava/lib/python3.10/__pycache__/pstats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a6c8e52c0884d95bf1851414ef30377f9578632 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/pstats.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/pty.cpython-310.pyc b/llava/lib/python3.10/__pycache__/pty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d856920f4eab7785751737857c3f665e782e5c2 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/pty.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/random.cpython-310.pyc b/llava/lib/python3.10/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..636a3ab5625dca32b0bdf78c35adef49f55a54ac Binary files /dev/null and b/llava/lib/python3.10/__pycache__/random.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/re.cpython-310.pyc b/llava/lib/python3.10/__pycache__/re.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8087f1c5955dfd5e952d64aba88ea502f567b382 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/re.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/runpy.cpython-310.pyc b/llava/lib/python3.10/__pycache__/runpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a407e0a2be1a11240ee00ae65150e12170d57bd7 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/runpy.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/secrets.cpython-310.pyc b/llava/lib/python3.10/__pycache__/secrets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68e640e291b200ca93f48e732b097f268604f9f6 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/secrets.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/smtplib.cpython-310.pyc b/llava/lib/python3.10/__pycache__/smtplib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd11f5dbf614194532e97e6254b0f82e165c00d3 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/smtplib.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/sre_compile.cpython-310.pyc b/llava/lib/python3.10/__pycache__/sre_compile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cd4032f64cc38dd29b4f85a9606b7be8bfc66e9 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/sre_compile.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/struct.cpython-310.pyc b/llava/lib/python3.10/__pycache__/struct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a48ece0414cb5a8f8a62f9c2b86b4deed224f5e Binary files /dev/null and b/llava/lib/python3.10/__pycache__/struct.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/symtable.cpython-310.pyc b/llava/lib/python3.10/__pycache__/symtable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc9572d56920381b1c92cea8243cd08d55262f70 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/symtable.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/tarfile.cpython-310.pyc b/llava/lib/python3.10/__pycache__/tarfile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cf573e72c8e9278e4e25a800f9ef50d74657606 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/tarfile.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/textwrap.cpython-310.pyc b/llava/lib/python3.10/__pycache__/textwrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa4e087504dee9cb04db392e25fd978c1d11b35b Binary files /dev/null and b/llava/lib/python3.10/__pycache__/textwrap.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/threading.cpython-310.pyc b/llava/lib/python3.10/__pycache__/threading.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..325d7d80b0ea6be72c457c28f3506c5df7e9c3e6 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/threading.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/trace.cpython-310.pyc b/llava/lib/python3.10/__pycache__/trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d7131e4c5f6fa4227165a508c55804e43db2654 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/trace.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/tty.cpython-310.pyc b/llava/lib/python3.10/__pycache__/tty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e9a3c968ce016ff661f8afb1bc76c42d18af065 Binary files /dev/null and b/llava/lib/python3.10/__pycache__/tty.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/turtle.cpython-310.pyc b/llava/lib/python3.10/__pycache__/turtle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3e553722a624ee490be59524846f2bc4119621b --- /dev/null +++ b/llava/lib/python3.10/__pycache__/turtle.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e0a3de6e5bcb4263511830455a4bbd4d4628106d06d391cf31df663f8de6adf +size 129111 diff --git a/llava/lib/python3.10/__pycache__/uuid.cpython-310.pyc b/llava/lib/python3.10/__pycache__/uuid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c808d30b5b0602c514e31dddfb9c4d8ab3d1221f Binary files /dev/null and b/llava/lib/python3.10/__pycache__/uuid.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/warnings.cpython-310.pyc b/llava/lib/python3.10/__pycache__/warnings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..739e820eb63b96597174c6b82a1ef36ae36ebb5a Binary files /dev/null and b/llava/lib/python3.10/__pycache__/warnings.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/webbrowser.cpython-310.pyc b/llava/lib/python3.10/__pycache__/webbrowser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f99aca9c9e24815b88323362b60002ee0ac2eef Binary files /dev/null and b/llava/lib/python3.10/__pycache__/webbrowser.cpython-310.pyc differ diff --git a/llava/lib/python3.10/__pycache__/zipapp.cpython-310.pyc b/llava/lib/python3.10/__pycache__/zipapp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55ae9525f033d783f665e147a27bff575bb60aee Binary files /dev/null and b/llava/lib/python3.10/__pycache__/zipapp.cpython-310.pyc differ diff --git a/llava/lib/python3.10/lib-dynload/_testclinic.cpython-310-x86_64-linux-gnu.so b/llava/lib/python3.10/lib-dynload/_testclinic.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..33cb95091f88b746f4e9036866383fa78be59921 --- /dev/null +++ b/llava/lib/python3.10/lib-dynload/_testclinic.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d21d17fa352f1e585afb8240dcbe49ad67b9bed71232b699c294b97820792559 +size 205672 diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_with_update_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_with_update_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2b1802a1cc522a9cfd19b8564a371c820dd06de6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_with_update_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _batch_norm_with_update_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _batch_norm_with_update_outf(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve); +TORCH_API ::std::tuple _batch_norm_with_update(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_with_update_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_with_update_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3be99be37908f9b0cf40fe4910c688b8d92e49ee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_with_update_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _batch_norm_with_update_functional(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _batch_norm_with_update_cpu_out(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve); +TORCH_API ::std::tuple _batch_norm_with_update_cpu(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _batch_norm_with_update_cuda_out(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve); +TORCH_API ::std::tuple _batch_norm_with_update_cuda(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _batch_norm_with_update_mkldnn(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0bc63c65d84860e1863e82c2c524c2188326e366 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API block_diag { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::block_diag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "block_diag(Tensor[] tensors) -> Tensor") + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API block_diag_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::block_diag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/eq_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/eq_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a15f08555822b803f3b3400e9b0da39ea3247f43 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/eq_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor eq(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & eq_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor eq(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & eq_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2dee1ae58048f3e8cb66caf33904b2bc5d46e516 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svd_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..67d86b79cef85ce3b225af36db8c5bec978c731f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svd_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_svd(const at::Tensor & A, bool full_matrices=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_svd_out(const at::Tensor & A, bool full_matrices, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f73d98a9858f70f8517701303eac9776b0749857 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1265e035b50c8b3a04114f9fd764ac17670fcec2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logsumexp { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logsumexp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim); +}; + +struct TORCH_API logsumexp_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logsumexp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API logsumexp_names { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logsumexp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim); +}; + +struct TORCH_API logsumexp_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logsumexp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..20ad74a501feb52014995be8848b271366db65fa --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..99494775bfb89973642b2a4d8934658baf35ebee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple mkldnn_linear_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask); +TORCH_API ::std::tuple mkldnn_linear_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..68cfdf24b668025f5e180825ba7419c5157ff233 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_reorder_conv3d_weight { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, at::OptionalSymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_reorder_conv3d_weight") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size); +}; + +struct TORCH_API mkldnn_reorder_conv3d_weight_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, at::OptionalSymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_reorder_conv3d_weight") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bba7d0ecbf4c171e540a9447cfa3f7589e1d6c39 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor multilabel_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multilabel_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multilabel_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros.h new file mode 100644 index 0000000000000000000000000000000000000000..febec3303ef2662b25dabcefd6106cf832043e96 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros.h @@ -0,0 +1,97 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +namespace symint { + template ::value>> + at::Tensor new_zeros(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_zeros::call(self, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +namespace symint { + template ::value>> + at::Tensor new_zeros(const at::Tensor & self, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::new_zeros::call(self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +namespace symint { + template ::value>> + at::Tensor new_zeros(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_zeros::call(self, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +namespace symint { + template ::value>> + at::Tensor new_zeros(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory); + } +} + +// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & new_zeros_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::new_zeros_out::call(self, c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & new_zeros_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::new_zeros_out::call(self, c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & new_zeros_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::new_zeros_out::call(self, c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & new_zeros_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::new_zeros_out::call(self, c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & new_zeros_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::new_zeros_out::call(self, size, out); +} +namespace symint { + template ::value>> + at::Tensor & new_zeros_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::new_zeros_out::call(self, size, out); + } +} + +// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & new_zeros_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::new_zeros_out::call(self, size, out); +} +namespace symint { + template ::value>> + at::Tensor & new_zeros_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::new_zeros_out::call(self, size, out); + } +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/set.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/set.h new file mode 100644 index 0000000000000000000000000000000000000000..6d57a8154d15a513da085a9abc53d7f7b9ff47dd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/set.h @@ -0,0 +1,161 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +namespace symint { + template ::value>> + at::Tensor & set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set__source_Storage_storage_offset::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } +} + +namespace symint { + template ::value>> + at::Tensor & set_(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set__source_Storage_storage_offset::call(self, source, storage_offset, size, stride); + } +} + +namespace symint { + template ::value>> + at::Tensor & set_(at::Tensor & self, const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set__source_Tensor_storage_offset::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } +} + +namespace symint { + template ::value>> + at::Tensor & set_(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set__source_Tensor_storage_offset::call(self, source, storage_offset, size, stride); + } +} + +// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source) { + return at::_ops::set_source_Storage_out::call(self, source, out); +} +// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_outf(const at::Tensor & self, at::Storage source, at::Tensor & out) { + return at::_ops::set_source_Storage_out::call(self, source, out); +} + +// aten::set.source_Storage(Tensor self, Storage source) -> Tensor +inline at::Tensor set(const at::Tensor & self, at::Storage source) { + return at::_ops::set_source_Storage::call(self, source); +} + +// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); +} +namespace symint { + template ::value>> + at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } +} + +// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_outf(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); +} +namespace symint { + template ::value>> + at::Tensor & set_outf(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out); + } +} + +// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_symint_out(at::Tensor & out, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, size, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, size, stride, out); + } +} + +// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_symint_outf(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, size, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & set_outf(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, size, stride, out); + } +} + +// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor +inline at::Tensor set(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); +} +namespace symint { + template ::value>> + at::Tensor set(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } +} + +// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor +inline at::Tensor set_symint(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride); +} +namespace symint { + template ::value>> + at::Tensor set(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride); + } +} + +// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & source) { + return at::_ops::set_source_Tensor_out::call(self, source, out); +} +// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_outf(const at::Tensor & self, const at::Tensor & source, at::Tensor & out) { + return at::_ops::set_source_Tensor_out::call(self, source, out); +} + +// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor +inline at::Tensor set(const at::Tensor & self, const at::Tensor & source) { + return at::_ops::set_source_Tensor::call(self, source); +} + +// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::set_out::call(self, out); +} +// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & set_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::set_out::call(self, out); +} + +// aten::set(Tensor self) -> Tensor +inline at::Tensor set(const at::Tensor & self) { + return at::_ops::set::call(self); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/softmax_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/softmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..733d256bad5db00fbf523b51c6d33b0b8c72b61c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/softmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & softmax_out(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8995f041d8ddbc261be02fffa99b5b2547f7f9ee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fbdaa39b990e2d4aecadda81a29656cb32032394 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API const at::Tensor & sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..300df779b2a6ce36824f30ba946fea0b638b1631 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_bessel_j1(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/svd_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/svd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..14de68f13909fa878efc7ef949dac533db036668 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/svd_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple svd(const at::Tensor & self, bool some=true, bool compute_uv=true); +TORCH_API ::std::tuple svd_out(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7decdb9aea33c361117f5cc59b68451421d9a4ce --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_bsc_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional dense_dim=::std::nullopt); +} // namespace native +} // namespace at