diff --git a/.gitattributes b/.gitattributes index 53cfd6d3e7ea122e6026c339df58b598d15d32b7..74984602606fa17ed6785725a597675865c44e45 100644 --- a/.gitattributes +++ b/.gitattributes @@ -816,3 +816,4 @@ parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_mlir.so filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_parallel_device.so filter=lfs diff=lfs merge=lfs -text pllava/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/8/8510 b/llava_next/share/terminfo/8/8510 new file mode 100644 index 0000000000000000000000000000000000000000..2874d511c91ced0214a77389dce5eb056d1482a7 Binary files /dev/null and b/llava_next/share/terminfo/8/8510 differ diff --git a/llava_next/share/terminfo/l/linux-c-nc b/llava_next/share/terminfo/l/linux-c-nc new file mode 100644 index 0000000000000000000000000000000000000000..13473aec2b6d7a4917c753b0ab1014fd8a2de342 Binary files /dev/null and b/llava_next/share/terminfo/l/linux-c-nc differ diff --git a/llava_next/share/terminfo/l/linux-koi8 b/llava_next/share/terminfo/l/linux-koi8 new file mode 100644 index 0000000000000000000000000000000000000000..0fbad8ca3d2fe79e987130288adf1a5877348d9e Binary files /dev/null and b/llava_next/share/terminfo/l/linux-koi8 differ diff --git a/llava_next/share/terminfo/l/linux-m b/llava_next/share/terminfo/l/linux-m new file mode 100644 index 0000000000000000000000000000000000000000..02b202e4e3f2afeccd783d4b5aefd0d78014f036 Binary files /dev/null and b/llava_next/share/terminfo/l/linux-m differ diff --git a/llava_next/share/terminfo/l/linux-s b/llava_next/share/terminfo/l/linux-s new file mode 100644 index 0000000000000000000000000000000000000000..79e961b7fc1b66684d617d93aafdaaf153650616 Binary files /dev/null and b/llava_next/share/terminfo/l/linux-s differ diff --git a/llava_next/share/terminfo/l/linux2.6 b/llava_next/share/terminfo/l/linux2.6 new file mode 100644 index 0000000000000000000000000000000000000000..5e80a19543da69dca56937dde55cff441eaff6a7 Binary files /dev/null and b/llava_next/share/terminfo/l/linux2.6 differ diff --git a/llava_next/share/terminfo/l/lisa b/llava_next/share/terminfo/l/lisa new file mode 100644 index 0000000000000000000000000000000000000000..2bbfd1ec9478c1a8f50af4e8d0be98662c56a792 Binary files /dev/null and b/llava_next/share/terminfo/l/lisa differ diff --git a/llava_next/share/terminfo/l/lpr b/llava_next/share/terminfo/l/lpr new file mode 100644 index 0000000000000000000000000000000000000000..279e1e87833ce6b0c7dec73c458c4cd758b78896 Binary files /dev/null and b/llava_next/share/terminfo/l/lpr differ diff --git a/llava_next/share/terminfo/l/luna68k b/llava_next/share/terminfo/l/luna68k new file mode 100644 index 0000000000000000000000000000000000000000..c9a18a80e5849ac23dbbc22df44f922bb30835bc Binary files /dev/null and b/llava_next/share/terminfo/l/luna68k differ diff --git a/llava_next/share/terminfo/p/p12 b/llava_next/share/terminfo/p/p12 new file mode 100644 index 0000000000000000000000000000000000000000..12de25677be37fc5fee85d23eda474b8cb642755 Binary files /dev/null and b/llava_next/share/terminfo/p/p12 differ diff --git a/llava_next/share/terminfo/p/p14-m-w b/llava_next/share/terminfo/p/p14-m-w new file mode 100644 index 0000000000000000000000000000000000000000..5b6475de6eef814393a5f5a8fc3d368625b276f7 Binary files /dev/null and b/llava_next/share/terminfo/p/p14-m-w differ diff --git a/llava_next/share/terminfo/p/p14-w b/llava_next/share/terminfo/p/p14-w new file mode 100644 index 0000000000000000000000000000000000000000..1d7df53b710aff295e42eeca63d45879279a70fe Binary files /dev/null and b/llava_next/share/terminfo/p/p14-w differ diff --git a/llava_next/share/terminfo/p/p19 b/llava_next/share/terminfo/p/p19 new file mode 100644 index 0000000000000000000000000000000000000000..4b1eda0ea4ee7d8afc1f8a16ed14eef213605712 Binary files /dev/null and b/llava_next/share/terminfo/p/p19 differ diff --git a/llava_next/share/terminfo/p/p4 b/llava_next/share/terminfo/p/p4 new file mode 100644 index 0000000000000000000000000000000000000000..1d53e488c16eac6171ad331aeba03638332c7940 Binary files /dev/null and b/llava_next/share/terminfo/p/p4 differ diff --git a/llava_next/share/terminfo/p/p8-w b/llava_next/share/terminfo/p/p8-w new file mode 100644 index 0000000000000000000000000000000000000000..8f254d3604fcd521d7b155ab6a90e2717ba6b1c8 Binary files /dev/null and b/llava_next/share/terminfo/p/p8-w differ diff --git a/llava_next/share/terminfo/p/p9-8 b/llava_next/share/terminfo/p/p9-8 new file mode 100644 index 0000000000000000000000000000000000000000..8191a9bd8dc3adefd3fecabeac8265090521e448 Binary files /dev/null and b/llava_next/share/terminfo/p/p9-8 differ diff --git a/llava_next/share/terminfo/p/pc-coherent b/llava_next/share/terminfo/p/pc-coherent new file mode 100644 index 0000000000000000000000000000000000000000..e6e3856d35b312c32688be30ac6bdda2f7816393 Binary files /dev/null and b/llava_next/share/terminfo/p/pc-coherent differ diff --git a/llava_next/share/terminfo/p/pc-minix b/llava_next/share/terminfo/p/pc-minix new file mode 100644 index 0000000000000000000000000000000000000000..b78d8a1e9434fe8ad99af043f2fb537615da46d1 Binary files /dev/null and b/llava_next/share/terminfo/p/pc-minix differ diff --git a/llava_next/share/terminfo/p/pc-venix b/llava_next/share/terminfo/p/pc-venix new file mode 100644 index 0000000000000000000000000000000000000000..32a7ec05eeaa5938c238b39519911e132250b0b1 Binary files /dev/null and b/llava_next/share/terminfo/p/pc-venix differ diff --git a/llava_next/share/terminfo/p/pc3 b/llava_next/share/terminfo/p/pc3 new file mode 100644 index 0000000000000000000000000000000000000000..08460b72fd4bf74e3189f8b3d806ee8f5d125ce8 Binary files /dev/null and b/llava_next/share/terminfo/p/pc3 differ diff --git a/llava_next/share/terminfo/p/pc3r b/llava_next/share/terminfo/p/pc3r new file mode 100644 index 0000000000000000000000000000000000000000..0cb4322ea844d6ea0ac6b122ae7bf194e4c81c68 Binary files /dev/null and b/llava_next/share/terminfo/p/pc3r differ diff --git a/llava_next/share/terminfo/p/pcansi b/llava_next/share/terminfo/p/pcansi new file mode 100644 index 0000000000000000000000000000000000000000..2e24b9ac0983d3f5b99e828eca5e86504c6926a2 Binary files /dev/null and b/llava_next/share/terminfo/p/pcansi differ diff --git a/llava_next/share/terminfo/p/pcansi-33 b/llava_next/share/terminfo/p/pcansi-33 new file mode 100644 index 0000000000000000000000000000000000000000..77ec07a919bdc8c2553d752f939c2a65198964b1 Binary files /dev/null and b/llava_next/share/terminfo/p/pcansi-33 differ diff --git a/llava_next/share/terminfo/p/pcansi-33-m b/llava_next/share/terminfo/p/pcansi-33-m new file mode 100644 index 0000000000000000000000000000000000000000..c9b0dcb6076a56225f92ed4578b9c5d164647c82 Binary files /dev/null and b/llava_next/share/terminfo/p/pcansi-33-m differ diff --git a/llava_next/share/terminfo/p/pcansi-43-m b/llava_next/share/terminfo/p/pcansi-43-m new file mode 100644 index 0000000000000000000000000000000000000000..be5691893bf0209fcf2a2b12f960a6d341708385 Binary files /dev/null and b/llava_next/share/terminfo/p/pcansi-43-m differ diff --git a/llava_next/share/terminfo/p/pcansi-m b/llava_next/share/terminfo/p/pcansi-m new file mode 100644 index 0000000000000000000000000000000000000000..8be0ab6f54451f2304eaf37dee1f05901caf7022 Binary files /dev/null and b/llava_next/share/terminfo/p/pcansi-m differ diff --git a/llava_next/share/terminfo/p/pcansi25m b/llava_next/share/terminfo/p/pcansi25m new file mode 100644 index 0000000000000000000000000000000000000000..eea464609fa627787de94bef3b401d01f63540ae Binary files /dev/null and b/llava_next/share/terminfo/p/pcansi25m differ diff --git a/llava_next/share/terminfo/p/pccon+colors b/llava_next/share/terminfo/p/pccon+colors new file mode 100644 index 0000000000000000000000000000000000000000..8a884fa8f8df4bc873f678ce85a50027dc796f4b Binary files /dev/null and b/llava_next/share/terminfo/p/pccon+colors differ diff --git a/llava_next/share/terminfo/p/pcix b/llava_next/share/terminfo/p/pcix new file mode 100644 index 0000000000000000000000000000000000000000..fdfbe5174216ff07831532aaa865dcdd345fb93e Binary files /dev/null and b/llava_next/share/terminfo/p/pcix differ diff --git a/llava_next/share/terminfo/p/pckermit12 b/llava_next/share/terminfo/p/pckermit12 new file mode 100644 index 0000000000000000000000000000000000000000..55ac2ccbb1632c5c88bc7368505abad128df4a79 Binary files /dev/null and b/llava_next/share/terminfo/p/pckermit12 differ diff --git a/llava_next/share/terminfo/p/pcplot b/llava_next/share/terminfo/p/pcplot new file mode 100644 index 0000000000000000000000000000000000000000..2a358389845686fa5df78667c83160b853db9082 Binary files /dev/null and b/llava_next/share/terminfo/p/pcplot differ diff --git a/llava_next/share/terminfo/p/pcvt25-color b/llava_next/share/terminfo/p/pcvt25-color new file mode 100644 index 0000000000000000000000000000000000000000..1b04e2da2ce34ddf60f9195348637b673fd16251 Binary files /dev/null and b/llava_next/share/terminfo/p/pcvt25-color differ diff --git a/llava_next/share/terminfo/p/pcvt25w b/llava_next/share/terminfo/p/pcvt25w new file mode 100644 index 0000000000000000000000000000000000000000..ad03d15a35df9431c1884045b04d606e22ab0edc Binary files /dev/null and b/llava_next/share/terminfo/p/pcvt25w differ diff --git a/llava_next/share/terminfo/p/pcvt35w b/llava_next/share/terminfo/p/pcvt35w new file mode 100644 index 0000000000000000000000000000000000000000..33263c2f280a30b9a239365c6b6bb898d0044d39 Binary files /dev/null and b/llava_next/share/terminfo/p/pcvt35w differ diff --git a/llava_next/share/terminfo/p/pcvt40 b/llava_next/share/terminfo/p/pcvt40 new file mode 100644 index 0000000000000000000000000000000000000000..74ecd8c0baff0187c29f6eb71da8179f8e748d2c Binary files /dev/null and b/llava_next/share/terminfo/p/pcvt40 differ diff --git a/llava_next/share/terminfo/p/pcvt40w b/llava_next/share/terminfo/p/pcvt40w new file mode 100644 index 0000000000000000000000000000000000000000..d034a0f487d943bfd12e362d8b094bffbac92d0b Binary files /dev/null and b/llava_next/share/terminfo/p/pcvt40w differ diff --git a/llava_next/share/terminfo/p/pcvt43w b/llava_next/share/terminfo/p/pcvt43w new file mode 100644 index 0000000000000000000000000000000000000000..f850ca2c089388fe53c5fa6d140c73f1770cdb78 Binary files /dev/null and b/llava_next/share/terminfo/p/pcvt43w differ diff --git a/llava_next/share/terminfo/p/pe6300 b/llava_next/share/terminfo/p/pe6300 new file mode 100644 index 0000000000000000000000000000000000000000..01acc1b70c66fd6eb748506db7d001cdc8e487a1 Binary files /dev/null and b/llava_next/share/terminfo/p/pe6300 differ diff --git a/llava_next/share/terminfo/p/pe7000c b/llava_next/share/terminfo/p/pe7000c new file mode 100644 index 0000000000000000000000000000000000000000..e86846227c04c7c20c2d8e086fb824569b4ebf65 Binary files /dev/null and b/llava_next/share/terminfo/p/pe7000c differ diff --git a/llava_next/share/terminfo/p/pmconsole b/llava_next/share/terminfo/p/pmconsole new file mode 100644 index 0000000000000000000000000000000000000000..5a5b783f338fc1dddbfd715f8810a78d3f013041 Binary files /dev/null and b/llava_next/share/terminfo/p/pmconsole differ diff --git a/llava_next/share/terminfo/p/prism12 b/llava_next/share/terminfo/p/prism12 new file mode 100644 index 0000000000000000000000000000000000000000..12de25677be37fc5fee85d23eda474b8cb642755 Binary files /dev/null and b/llava_next/share/terminfo/p/prism12 differ diff --git a/llava_next/share/terminfo/p/prism12-m b/llava_next/share/terminfo/p/prism12-m new file mode 100644 index 0000000000000000000000000000000000000000..c4dd32141f0efc92e1692a94f55bacf68d0db902 Binary files /dev/null and b/llava_next/share/terminfo/p/prism12-m differ diff --git a/llava_next/share/terminfo/p/prism12-w b/llava_next/share/terminfo/p/prism12-w new file mode 100644 index 0000000000000000000000000000000000000000..2fe870dbc535c37e247b6f7213e7d89ec4e3879e Binary files /dev/null and b/llava_next/share/terminfo/p/prism12-w differ diff --git a/llava_next/share/terminfo/p/prism14-m b/llava_next/share/terminfo/p/prism14-m new file mode 100644 index 0000000000000000000000000000000000000000..39dd310720abf005cd3c3561b386b426fbb3a3a4 Binary files /dev/null and b/llava_next/share/terminfo/p/prism14-m differ diff --git a/llava_next/share/terminfo/p/prism7 b/llava_next/share/terminfo/p/prism7 new file mode 100644 index 0000000000000000000000000000000000000000..539b4ff6570496b03f33a1c661ff661137089364 Binary files /dev/null and b/llava_next/share/terminfo/p/prism7 differ diff --git a/llava_next/share/terminfo/p/prism8 b/llava_next/share/terminfo/p/prism8 new file mode 100644 index 0000000000000000000000000000000000000000..12fd9e6abc9789d25991acfcd58a0d0e89650205 Binary files /dev/null and b/llava_next/share/terminfo/p/prism8 differ diff --git a/llava_next/share/terminfo/p/prism8-w b/llava_next/share/terminfo/p/prism8-w new file mode 100644 index 0000000000000000000000000000000000000000..8f254d3604fcd521d7b155ab6a90e2717ba6b1c8 Binary files /dev/null and b/llava_next/share/terminfo/p/prism8-w differ diff --git a/llava_next/share/terminfo/p/prism9-w b/llava_next/share/terminfo/p/prism9-w new file mode 100644 index 0000000000000000000000000000000000000000..44029ba7f9d33ae39f2d11a5ddbb8d53e8d6874f Binary files /dev/null and b/llava_next/share/terminfo/p/prism9-w differ diff --git a/llava_next/share/terminfo/p/pro350 b/llava_next/share/terminfo/p/pro350 new file mode 100644 index 0000000000000000000000000000000000000000..e73dc12216fa051e6de0de0905aff1e0cd9ff4d1 Binary files /dev/null and b/llava_next/share/terminfo/p/pro350 differ diff --git a/llava_next/share/terminfo/p/psterm b/llava_next/share/terminfo/p/psterm new file mode 100644 index 0000000000000000000000000000000000000000..d7ec53e2a8b70c03e9a3cd8232df21405bbc0c58 Binary files /dev/null and b/llava_next/share/terminfo/p/psterm differ diff --git a/llava_next/share/terminfo/p/psterm-90x28 b/llava_next/share/terminfo/p/psterm-90x28 new file mode 100644 index 0000000000000000000000000000000000000000..6557b3727a3d9725cbf3f0af1285a99973f2d29c Binary files /dev/null and b/llava_next/share/terminfo/p/psterm-90x28 differ diff --git a/llava_next/share/terminfo/p/pt100 b/llava_next/share/terminfo/p/pt100 new file mode 100644 index 0000000000000000000000000000000000000000..cf224e6f4f4ab49dd2d1b020d58572b8905c8f54 Binary files /dev/null and b/llava_next/share/terminfo/p/pt100 differ diff --git a/llava_next/share/terminfo/p/pt200w b/llava_next/share/terminfo/p/pt200w new file mode 100644 index 0000000000000000000000000000000000000000..1f73e3468a655831aa03a6d73c88f794337019d1 Binary files /dev/null and b/llava_next/share/terminfo/p/pt200w differ diff --git a/llava_next/share/terminfo/p/pt250 b/llava_next/share/terminfo/p/pt250 new file mode 100644 index 0000000000000000000000000000000000000000..01b1660d892af3129f0530f10846f8bfb443249e Binary files /dev/null and b/llava_next/share/terminfo/p/pt250 differ diff --git a/llava_next/share/terminfo/p/pt505 b/llava_next/share/terminfo/p/pt505 new file mode 100644 index 0000000000000000000000000000000000000000..6640ef9e893070e99b99b7805bca48846a13aa95 Binary files /dev/null and b/llava_next/share/terminfo/p/pt505 differ diff --git a/llava_next/share/terminfo/p/pt505-22 b/llava_next/share/terminfo/p/pt505-22 new file mode 100644 index 0000000000000000000000000000000000000000..bbf0d9a1a0969f9627684f6a45de7e8880107c7e Binary files /dev/null and b/llava_next/share/terminfo/p/pt505-22 differ diff --git a/llava_next/share/terminfo/p/pt505-24 b/llava_next/share/terminfo/p/pt505-24 new file mode 100644 index 0000000000000000000000000000000000000000..ead33f945ac23d717bf22d18f000b965418d4631 Binary files /dev/null and b/llava_next/share/terminfo/p/pt505-24 differ diff --git a/llava_next/share/terminfo/p/putty+fnkeys b/llava_next/share/terminfo/p/putty+fnkeys new file mode 100644 index 0000000000000000000000000000000000000000..8d0c4405aac6bfa23245adc392d9477456a1b922 Binary files /dev/null and b/llava_next/share/terminfo/p/putty+fnkeys differ diff --git a/llava_next/share/terminfo/p/putty+fnkeys+esc b/llava_next/share/terminfo/p/putty+fnkeys+esc new file mode 100644 index 0000000000000000000000000000000000000000..5b307946e9d09a2beae6df241eec4f4637873e5d Binary files /dev/null and b/llava_next/share/terminfo/p/putty+fnkeys+esc differ diff --git a/llava_next/share/terminfo/p/putty+fnkeys+vt400 b/llava_next/share/terminfo/p/putty+fnkeys+vt400 new file mode 100644 index 0000000000000000000000000000000000000000..1d329d383e81793c381ff2f8ab9b5651fd7abfa7 Binary files /dev/null and b/llava_next/share/terminfo/p/putty+fnkeys+vt400 differ diff --git a/llava_next/share/terminfo/p/putty+fnkeys+xterm b/llava_next/share/terminfo/p/putty+fnkeys+xterm new file mode 100644 index 0000000000000000000000000000000000000000..994007e6b4d4c24eaf9099bdfdce13565e3d341b Binary files /dev/null and b/llava_next/share/terminfo/p/putty+fnkeys+xterm differ diff --git a/llava_next/share/terminfo/p/putty+keypad b/llava_next/share/terminfo/p/putty+keypad new file mode 100644 index 0000000000000000000000000000000000000000..2bf59a0a4aa8d8f2d31ee2d74af30e61ae1779ed Binary files /dev/null and b/llava_next/share/terminfo/p/putty+keypad differ diff --git a/llava_next/share/terminfo/p/putty-m1b b/llava_next/share/terminfo/p/putty-m1b new file mode 100644 index 0000000000000000000000000000000000000000..148e727c4470d4519f9dd86537d0f3d3924c74e5 Binary files /dev/null and b/llava_next/share/terminfo/p/putty-m1b differ diff --git a/llava_next/share/terminfo/p/putty-m2 b/llava_next/share/terminfo/p/putty-m2 new file mode 100644 index 0000000000000000000000000000000000000000..6d07493f3ba55c91d816c6b84b48950eaefab8fd Binary files /dev/null and b/llava_next/share/terminfo/p/putty-m2 differ diff --git a/llava_next/share/terminfo/p/putty-noapp b/llava_next/share/terminfo/p/putty-noapp new file mode 100644 index 0000000000000000000000000000000000000000..c67bf882bf52d45a9f5cdf6e52fa2e21ee377030 Binary files /dev/null and b/llava_next/share/terminfo/p/putty-noapp differ diff --git a/llava_next/share/terminfo/p/putty-sco b/llava_next/share/terminfo/p/putty-sco new file mode 100644 index 0000000000000000000000000000000000000000..d615ac650ad983cc887fd5b0594a7d85673cb0eb Binary files /dev/null and b/llava_next/share/terminfo/p/putty-sco differ diff --git a/parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b8014c293701313b9916d4aacffcc79b195c600 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py b/parrot/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..34268c5911d6648c452dd7025edfc3e2f5b0db78 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py @@ -0,0 +1,86 @@ +# mypy: ignore-errors + +"""Export torch work functions for binary ufuncs, rename/tweak to match numpy. +This listing is further exported to public symbols in the `torch._numpy/_ufuncs.py` module. +""" + +import torch + +from torch import ( # noqa: F401 + add, # noqa: F401 + arctan2, # noqa: F401 + bitwise_and, # noqa: F401 + bitwise_left_shift as left_shift, # noqa: F401 + bitwise_or, # noqa: F401 + bitwise_right_shift as right_shift, # noqa: F401 + bitwise_xor, # noqa: F401 + copysign, # noqa: F401 + divide, # noqa: F401 + eq as equal, # noqa: F401 + float_power, # noqa: F401 + floor_divide, # noqa: F401 + fmax, # noqa: F401 + fmin, # noqa: F401 + fmod, # noqa: F401 + gcd, # noqa: F401 + greater, # noqa: F401 + greater_equal, # noqa: F401 + heaviside, # noqa: F401 + hypot, # noqa: F401 + lcm, # noqa: F401 + ldexp, # noqa: F401 + less, # noqa: F401 + less_equal, # noqa: F401 + logaddexp, # noqa: F401 + logaddexp2, # noqa: F401 + logical_and, # noqa: F401 + logical_or, # noqa: F401 + logical_xor, # noqa: F401 + maximum, # noqa: F401 + minimum, # noqa: F401 + multiply, # noqa: F401 + nextafter, # noqa: F401 + not_equal, # noqa: F401 + pow as power, # noqa: F401 + remainder, # noqa: F401 + remainder as mod, # noqa: F401 + subtract, # noqa: F401 + true_divide, # noqa: F401 +) + +from . import _dtypes_impl, _util + + +# work around torch limitations w.r.t. numpy +def matmul(x, y): + # work around: + # - RuntimeError: expected scalar type Int but found Double + # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Bool' + # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Half' + dtype = _dtypes_impl.result_type_impl(x, y) + is_bool = dtype == torch.bool + is_half = (x.dtype == torch.float16 or y.dtype == torch.float16) and ( + x.is_cpu or y.is_cpu + ) + + work_dtype = dtype + if is_bool: + work_dtype = torch.uint8 + if is_half: + work_dtype = torch.float32 + + x = _util.cast_if_needed(x, work_dtype) + y = _util.cast_if_needed(y, work_dtype) + + result = torch.matmul(x, y) + + if work_dtype != dtype: + result = result.to(dtype) + + return result + + +# a stub implementation of divmod, should be improved after +# https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch +def divmod(x, y): + return x // y, x % y diff --git a/parrot/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py b/parrot/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py new file mode 100644 index 0000000000000000000000000000000000000000..b30ce7c556044879ae02d023a60153533db7a5d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py @@ -0,0 +1,1367 @@ +# mypy: ignore-errors + +import torch + +# These two dicts are autogenerated with autogen/gen_dtypes.py, +# using numpy version 1.24.3. + +_can_cast_dict = { + "no": { + torch.float16: { + torch.float16: True, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float32: { + torch.float16: False, + torch.float32: True, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex128: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint8: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: True, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint16: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: True, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint32: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: True, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: True, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int8: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: True, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int16: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: True, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int32: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: True, + torch.int64: False, + torch.bool: False, + }, + torch.int64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: True, + torch.bool: False, + }, + torch.bool: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: True, + }, + }, + "equiv": { + torch.float16: { + torch.float16: True, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float32: { + torch.float16: False, + torch.float32: True, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex128: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint8: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: True, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint16: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: True, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint32: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: True, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: True, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int8: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: True, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int16: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: True, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int32: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: True, + torch.int64: False, + torch.bool: False, + }, + torch.int64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: True, + torch.bool: False, + }, + torch.bool: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: True, + }, + }, + "safe": { + torch.float16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float32: { + torch.float16: False, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex128: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: False, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.uint16: { + torch.float16: False, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: False, + torch.int16: False, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.uint32: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: True, + torch.uint64: True, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: True, + torch.bool: False, + }, + torch.uint64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: True, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int16: { + torch.float16: False, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int32: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: True, + torch.bool: False, + }, + torch.bool: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + }, + "same_kind": { + torch.float16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex128: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.uint16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.uint32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.uint64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.uint16: False, + torch.uint32: False, + torch.uint64: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.bool: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + }, + "unsafe": { + torch.float16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.float32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.float64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.complex64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.complex128: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.uint8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.uint16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.uint32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.uint64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.int8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.int16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.int32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.int64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.bool: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.uint16: True, + torch.uint32: True, + torch.uint64: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + }, +} + + +_result_type_dict = { + torch.float16: { + torch.float16: torch.float16, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.float16, + torch.uint16: torch.float32, + torch.uint32: torch.float64, + torch.uint64: torch.float64, + torch.int8: torch.float16, + torch.int16: torch.float32, + torch.int32: torch.float64, + torch.int64: torch.float64, + torch.bool: torch.float16, + }, + torch.float32: { + torch.float16: torch.float32, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.float32, + torch.uint16: torch.float32, + torch.uint32: torch.float64, + torch.uint64: torch.float64, + torch.int8: torch.float32, + torch.int16: torch.float32, + torch.int32: torch.float64, + torch.int64: torch.float64, + torch.bool: torch.float32, + }, + torch.float64: { + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.float64: torch.float64, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.float64, + torch.uint16: torch.float64, + torch.uint32: torch.float64, + torch.uint64: torch.float64, + torch.int8: torch.float64, + torch.int16: torch.float64, + torch.int32: torch.float64, + torch.int64: torch.float64, + torch.bool: torch.float64, + }, + torch.complex64: { + torch.float16: torch.complex64, + torch.float32: torch.complex64, + torch.float64: torch.complex128, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.complex64, + torch.uint16: torch.complex64, + torch.uint32: torch.complex128, + torch.uint64: torch.complex128, + torch.int8: torch.complex64, + torch.int16: torch.complex64, + torch.int32: torch.complex128, + torch.int64: torch.complex128, + torch.bool: torch.complex64, + }, + torch.complex128: { + torch.float16: torch.complex128, + torch.float32: torch.complex128, + torch.float64: torch.complex128, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.complex128, + torch.uint16: torch.complex128, + torch.uint32: torch.complex128, + torch.uint64: torch.complex128, + torch.int8: torch.complex128, + torch.int16: torch.complex128, + torch.int32: torch.complex128, + torch.int64: torch.complex128, + torch.bool: torch.complex128, + }, + torch.uint8: { + torch.float16: torch.float16, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.uint8, + torch.uint16: torch.uint16, + torch.uint32: torch.uint32, + torch.uint64: torch.uint64, + torch.int8: torch.int16, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.uint8, + }, + torch.uint16: { + torch.float16: torch.float32, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.uint16, + torch.uint16: torch.uint16, + torch.uint32: torch.uint32, + torch.uint64: torch.uint64, + torch.int8: torch.int32, + torch.int16: torch.int32, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.uint16, + }, + torch.uint32: { + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.float64: torch.float64, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.uint32, + torch.uint16: torch.uint32, + torch.uint32: torch.uint32, + torch.uint64: torch.uint64, + torch.int8: torch.int64, + torch.int16: torch.int64, + torch.int32: torch.int64, + torch.int64: torch.int64, + torch.bool: torch.uint32, + }, + torch.uint64: { + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.float64: torch.float64, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.uint64, + torch.uint16: torch.uint64, + torch.uint32: torch.uint64, + torch.uint64: torch.uint64, + torch.int8: torch.float64, + torch.int16: torch.float64, + torch.int32: torch.float64, + torch.int64: torch.float64, + torch.bool: torch.uint64, + }, + torch.int8: { + torch.float16: torch.float16, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.int16, + torch.uint16: torch.int32, + torch.uint32: torch.int64, + torch.uint64: torch.float64, + torch.int8: torch.int8, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.int8, + }, + torch.int16: { + torch.float16: torch.float32, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.int16, + torch.uint16: torch.int32, + torch.uint32: torch.int64, + torch.uint64: torch.float64, + torch.int8: torch.int16, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.int16, + }, + torch.int32: { + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.float64: torch.float64, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.int32, + torch.uint16: torch.int32, + torch.uint32: torch.int64, + torch.uint64: torch.float64, + torch.int8: torch.int32, + torch.int16: torch.int32, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.int32, + }, + torch.int64: { + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.float64: torch.float64, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.int64, + torch.uint16: torch.int64, + torch.uint32: torch.int64, + torch.uint64: torch.float64, + torch.int8: torch.int64, + torch.int16: torch.int64, + torch.int32: torch.int64, + torch.int64: torch.int64, + torch.bool: torch.int64, + }, + torch.bool: { + torch.float16: torch.float16, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.uint8, + torch.uint16: torch.uint16, + torch.uint32: torch.uint32, + torch.uint64: torch.uint64, + torch.int8: torch.int8, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.bool, + }, +} diff --git a/parrot/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py b/parrot/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..c1c46ad2a47746188b6fd5a8f167f2408a0993ee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py @@ -0,0 +1,458 @@ +# mypy: ignore-errors + +""" Implementation of reduction operations, to be wrapped into arrays, dtypes etc +in the 'public' layer. + +Anything here only deals with torch objects, e.g. "dtype" is a torch.dtype instance etc +""" +from __future__ import annotations + +import functools +from typing import Optional, TYPE_CHECKING + +import torch + +from . import _dtypes_impl, _util + +if TYPE_CHECKING: + from ._normalizations import ( + ArrayLike, + AxisLike, + DTypeLike, + KeepDims, + NotImplementedType, + OutArray, + ) + + +def _deco_axis_expand(func): + """ + Generically handle axis arguments in reductions. + axis is *always* the 2nd arg in the function so no need to have a look at its signature + """ + + @functools.wraps(func) + def wrapped(a, axis=None, *args, **kwds): + if axis is not None: + axis = _util.normalize_axis_tuple(axis, a.ndim) + + if axis == (): + # So we insert a length-one axis and run the reduction along it. + # We cannot return a.clone() as this would sidestep the checks inside the function + newshape = _util.expand_shape(a.shape, axis=0) + a = a.reshape(newshape) + axis = (0,) + + return func(a, axis, *args, **kwds) + + return wrapped + + +def _atleast_float(dtype, other_dtype): + """Return a dtype that is real or complex floating-point. + + For inputs that are boolean or integer dtypes, this returns the default + float dtype; inputs that are complex get converted to the default complex + dtype; real floating-point dtypes (`float*`) get passed through unchanged + """ + if dtype is None: + dtype = other_dtype + if not (dtype.is_floating_point or dtype.is_complex): + return _dtypes_impl.default_dtypes().float_dtype + return dtype + + +@_deco_axis_expand +def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims: KeepDims = False): + return a.count_nonzero(axis) + + +@_deco_axis_expand +def argmax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + *, + keepdims: KeepDims = False, +): + if a.is_complex(): + raise NotImplementedError(f"argmax with dtype={a.dtype}.") + + axis = _util.allow_only_single_axis(axis) + + if a.dtype == torch.bool: + # RuntimeError: "argmax_cpu" not implemented for 'Bool' + a = a.to(torch.uint8) + + return torch.argmax(a, axis) + + +@_deco_axis_expand +def argmin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + *, + keepdims: KeepDims = False, +): + if a.is_complex(): + raise NotImplementedError(f"argmin with dtype={a.dtype}.") + + axis = _util.allow_only_single_axis(axis) + + if a.dtype == torch.bool: + # RuntimeError: "argmin_cpu" not implemented for 'Bool' + a = a.to(torch.uint8) + + return torch.argmin(a, axis) + + +@_deco_axis_expand +def any( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + axis_kw = {} if axis is None else {"dim": axis} + return torch.any(a, **axis_kw) + + +@_deco_axis_expand +def all( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + axis_kw = {} if axis is None else {"dim": axis} + return torch.all(a, **axis_kw) + + +@_deco_axis_expand +def amax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + if a.is_complex(): + raise NotImplementedError(f"amax with dtype={a.dtype}") + + return a.amax(axis) + + +max = amax + + +@_deco_axis_expand +def amin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + if a.is_complex(): + raise NotImplementedError(f"amin with dtype={a.dtype}") + + return a.amin(axis) + + +min = amin + + +@_deco_axis_expand +def ptp( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, +): + return a.amax(axis) - a.amin(axis) + + +@_deco_axis_expand +def sum( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + assert dtype is None or isinstance(dtype, torch.dtype) + + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + + axis_kw = {} if axis is None else {"dim": axis} + return a.sum(dtype=dtype, **axis_kw) + + +@_deco_axis_expand +def prod( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + + axis_kw = {} if axis is None else {"dim": axis} + return a.prod(dtype=dtype, **axis_kw) + + +product = prod + + +@_deco_axis_expand +def mean( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + dtype = _atleast_float(dtype, a.dtype) + + axis_kw = {} if axis is None else {"dim": axis} + result = a.mean(dtype=dtype, **axis_kw) + + return result + + +@_deco_axis_expand +def std( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + ddof=0, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + in_dtype = dtype + dtype = _atleast_float(dtype, a.dtype) + tensor = _util.cast_if_needed(a, dtype) + result = tensor.std(dim=axis, correction=ddof) + return _util.cast_if_needed(result, in_dtype) + + +@_deco_axis_expand +def var( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + ddof=0, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + in_dtype = dtype + dtype = _atleast_float(dtype, a.dtype) + tensor = _util.cast_if_needed(a, dtype) + result = tensor.var(dim=axis, correction=ddof) + return _util.cast_if_needed(result, in_dtype) + + +# cumsum / cumprod are almost reductions: +# 1. no keepdims +# 2. axis=None flattens + + +def cumsum( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, +): + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + if dtype is None: + dtype = a.dtype + + (a,), axis = _util.axis_none_flatten(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + + return a.cumsum(axis=axis, dtype=dtype) + + +def cumprod( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, +): + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + if dtype is None: + dtype = a.dtype + + (a,), axis = _util.axis_none_flatten(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + + return a.cumprod(axis=axis, dtype=dtype) + + +cumproduct = cumprod + + +def average( + a: ArrayLike, + axis=None, + weights: ArrayLike = None, + returned=False, + *, + keepdims=False, +): + if weights is None: + result = mean(a, axis=axis) + wsum = torch.as_tensor(a.numel() / result.numel(), dtype=result.dtype) + else: + if not a.dtype.is_floating_point: + a = a.double() + + # axis & weights + if a.shape != weights.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights differ." + ) + if weights.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ." + ) + if weights.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis." + ) + + # setup weight to broadcast along axis + weights = torch.broadcast_to(weights, (a.ndim - 1) * (1,) + weights.shape) + weights = weights.swapaxes(-1, axis) + + # do the work + result_dtype = _dtypes_impl.result_type_impl(a, weights) + numerator = sum(a * weights, axis, dtype=result_dtype) + wsum = sum(weights, axis, dtype=result_dtype) + result = numerator / wsum + + # We process keepdims manually because the decorator does not deal with variadic returns + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + if returned: + if wsum.shape != result.shape: + wsum = torch.broadcast_to(wsum, result.shape).clone() + return result, wsum + else: + return result + + +# Not using deco_axis_expand as it assumes that axis is the second arg +def quantile( + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + overwrite_input=False, + method="linear", + keepdims: KeepDims = False, + *, + interpolation: NotImplementedType = None, +): + if overwrite_input: + # raise NotImplementedError("overwrite_input in quantile not implemented.") + # NumPy documents that `overwrite_input` MAY modify inputs: + # https://numpy.org/doc/stable/reference/generated/numpy.percentile.html#numpy-percentile + # Here we choose to work out-of-place because why not. + pass + + if not a.dtype.is_floating_point: + dtype = _dtypes_impl.default_dtypes().float_dtype + a = a.to(dtype) + + # edge case: torch.quantile only supports float32 and float64 + if a.dtype == torch.float16: + a = a.to(torch.float32) + + if axis is None: + a = a.flatten() + q = q.flatten() + axis = (0,) + else: + axis = _util.normalize_axis_tuple(axis, a.ndim) + + # FIXME(Mario) Doesn't np.quantile accept a tuple? + # torch.quantile does accept a number. If we don't want to implement the tuple behaviour + # (it's deffo low prio) change `normalize_axis_tuple` into a normalize_axis index above. + axis = _util.allow_only_single_axis(axis) + + q = _util.cast_if_needed(q, a.dtype) + + return torch.quantile(a, q, axis=axis, interpolation=method) + + +def percentile( + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + overwrite_input=False, + method="linear", + keepdims: KeepDims = False, + *, + interpolation: NotImplementedType = None, +): + # np.percentile(float_tensor, 30) : q.dtype is int64 => q / 100.0 is float32 + if _dtypes_impl.python_type_for_torch(q.dtype) == int: + q = q.to(_dtypes_impl.default_dtypes().float_dtype) + qq = q / 100.0 + + return quantile( + a, + qq, + axis=axis, + overwrite_input=overwrite_input, + method=method, + keepdims=keepdims, + interpolation=interpolation, + ) + + +def median( + a: ArrayLike, + axis=None, + out: Optional[OutArray] = None, + overwrite_input=False, + keepdims: KeepDims = False, +): + return quantile( + a, + torch.as_tensor(0.5), + axis=axis, + overwrite_input=overwrite_input, + out=out, + keepdims=keepdims, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py b/parrot/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..ce4a30cc40e7132e24812041d438c60b8f07ca8e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py @@ -0,0 +1,73 @@ +# mypy: ignore-errors + +"""Export torch work functions for unary ufuncs, rename/tweak to match numpy. +This listing is further exported to public symbols in the `_numpy/_ufuncs.py` module. +""" + +import torch + +from torch import ( # noqa: F401 + absolute as fabs, # noqa: F401 + arccos, # noqa: F401 + arccosh, # noqa: F401 + arcsin, # noqa: F401 + arcsinh, # noqa: F401 + arctan, # noqa: F401 + arctanh, # noqa: F401 + bitwise_not, # noqa: F401 + bitwise_not as invert, # noqa: F401 + ceil, # noqa: F401 + conj_physical as conjugate, # noqa: F401 + cos, # noqa: F401 + cosh, # noqa: F401 + deg2rad, # noqa: F401 + deg2rad as radians, # noqa: F401 + exp, # noqa: F401 + exp2, # noqa: F401 + expm1, # noqa: F401 + floor, # noqa: F401 + isfinite, # noqa: F401 + isinf, # noqa: F401 + isnan, # noqa: F401 + log, # noqa: F401 + log10, # noqa: F401 + log1p, # noqa: F401 + log2, # noqa: F401 + logical_not, # noqa: F401 + negative, # noqa: F401 + rad2deg, # noqa: F401 + rad2deg as degrees, # noqa: F401 + reciprocal, # noqa: F401 + round as fix, # noqa: F401 + round as rint, # noqa: F401 + sign, # noqa: F401 + signbit, # noqa: F401 + sin, # noqa: F401 + sinh, # noqa: F401 + sqrt, # noqa: F401 + square, # noqa: F401 + tan, # noqa: F401 + tanh, # noqa: F401 + trunc, # noqa: F401 +) + + +# special cases: torch does not export these names +def cbrt(x): + return torch.pow(x, 1 / 3) + + +def positive(x): + return +x + + +def absolute(x): + # work around torch.absolute not impl for bools + if x.dtype == torch.bool: + return x + return torch.absolute(x) + + +# TODO set __name__ and __qualname__ +abs = absolute +conj = conjugate diff --git a/parrot/lib/python3.10/site-packages/torch/_numpy/_util.py b/parrot/lib/python3.10/site-packages/torch/_numpy/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..477d3d44671ad4dd37ebc61a01032c6d3130e655 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_numpy/_util.py @@ -0,0 +1,261 @@ +# mypy: ignore-errors + +"""Assorted utilities, which do not need anything other then torch and stdlib. +""" + +import operator + +import torch + +from . import _dtypes_impl + + +# https://github.com/numpy/numpy/blob/v1.23.0/numpy/distutils/misc_util.py#L497-L504 +def is_sequence(seq): + if isinstance(seq, str): + return False + try: + len(seq) + except Exception: + return False + return True + + +class AxisError(ValueError, IndexError): + pass + + +class UFuncTypeError(TypeError, RuntimeError): + pass + + +def cast_if_needed(tensor, dtype): + # NB: no casting if dtype=None + if dtype is not None and tensor.dtype != dtype: + tensor = tensor.to(dtype) + return tensor + + +def cast_int_to_float(x): + # cast integers and bools to the default float dtype + if _dtypes_impl._category(x.dtype) < 2: + x = x.to(_dtypes_impl.default_dtypes().float_dtype) + return x + + +# a replica of the version in ./numpy/numpy/core/src/multiarray/common.h +def normalize_axis_index(ax, ndim, argname=None): + if not (-ndim <= ax < ndim): + raise AxisError(f"axis {ax} is out of bounds for array of dimension {ndim}") + if ax < 0: + ax += ndim + return ax + + +# from https://github.com/numpy/numpy/blob/main/numpy/core/numeric.py#L1378 +def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): + """ + Normalizes an axis argument into a tuple of non-negative integer axes. + + This handles shorthands such as ``1`` and converts them to ``(1,)``, + as well as performing the handling of negative indices covered by + `normalize_axis_index`. + + By default, this forbids axes from being specified multiple times. + Used internally by multi-axis-checking logic. + + Parameters + ---------- + axis : int, iterable of int + The un-normalized index or indices of the axis. + ndim : int + The number of dimensions of the array that `axis` should be normalized + against. + argname : str, optional + A prefix to put before the error message, typically the name of the + argument. + allow_duplicate : bool, optional + If False, the default, disallow an axis from being specified twice. + + Returns + ------- + normalized_axes : tuple of int + The normalized axis index, such that `0 <= normalized_axis < ndim` + """ + # Optimization to speed-up the most common cases. + if type(axis) not in (tuple, list): + try: + axis = [operator.index(axis)] + except TypeError: + pass + # Going via an iterator directly is slower than via list comprehension. + axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) + if not allow_duplicate and len(set(axis)) != len(axis): + if argname: + raise ValueError(f"repeated axis in `{argname}` argument") + else: + raise ValueError("repeated axis") + return axis + + +def allow_only_single_axis(axis): + if axis is None: + return axis + if len(axis) != 1: + raise NotImplementedError("does not handle tuple axis") + return axis[0] + + +def expand_shape(arr_shape, axis): + # taken from numpy 1.23.x, expand_dims function + if type(axis) not in (list, tuple): + axis = (axis,) + out_ndim = len(axis) + len(arr_shape) + axis = normalize_axis_tuple(axis, out_ndim) + shape_it = iter(arr_shape) + shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] + return shape + + +def apply_keepdims(tensor, axis, ndim): + if axis is None: + # tensor was a scalar + shape = (1,) * ndim + tensor = tensor.expand(shape).contiguous() + else: + shape = expand_shape(tensor.shape, axis) + tensor = tensor.reshape(shape) + return tensor + + +def axis_none_flatten(*tensors, axis=None): + """Flatten the arrays if axis is None.""" + if axis is None: + tensors = tuple(ar.flatten() for ar in tensors) + return tensors, 0 + else: + return tensors, axis + + +def typecast_tensor(t, target_dtype, casting): + """Dtype-cast tensor to target_dtype. + + Parameters + ---------- + t : torch.Tensor + The tensor to cast + target_dtype : torch dtype object + The array dtype to cast all tensors to + casting : str + The casting mode, see `np.can_cast` + + Returns + ------- + `torch.Tensor` of the `target_dtype` dtype + + Raises + ------ + ValueError + if the argument cannot be cast according to the `casting` rule + + """ + can_cast = _dtypes_impl.can_cast_impl + + if not can_cast(t.dtype, target_dtype, casting=casting): + raise TypeError( + f"Cannot cast array data from {t.dtype} to" + f" {target_dtype} according to the rule '{casting}'" + ) + return cast_if_needed(t, target_dtype) + + +def typecast_tensors(tensors, target_dtype, casting): + return tuple(typecast_tensor(t, target_dtype, casting) for t in tensors) + + +def _try_convert_to_tensor(obj): + try: + tensor = torch.as_tensor(obj) + except Exception as e: + mesg = f"failed to convert {obj} to ndarray. \nInternal error is: {str(e)}." + raise NotImplementedError(mesg) # noqa: B904 + return tensor + + +def _coerce_to_tensor(obj, dtype=None, copy=False, ndmin=0): + """The core logic of the array(...) function. + + Parameters + ---------- + obj : tensor_like + The thing to coerce + dtype : torch.dtype object or None + Coerce to this torch dtype + copy : bool + Copy or not + ndmin : int + The results as least this many dimensions + is_weak : bool + Whether obj is a weakly typed python scalar. + + Returns + ------- + tensor : torch.Tensor + a tensor object with requested dtype, ndim and copy semantics. + + Notes + ----- + This is almost a "tensor_like" coersion function. Does not handle wrapper + ndarrays (those should be handled in the ndarray-aware layer prior to + invoking this function). + """ + if isinstance(obj, torch.Tensor): + tensor = obj + else: + # tensor.dtype is the pytorch default, typically float32. If obj's elements + # are not exactly representable in float32, we've lost precision: + # >>> torch.as_tensor(1e12).item() - 1e12 + # -4096.0 + default_dtype = torch.get_default_dtype() + torch.set_default_dtype(_dtypes_impl.get_default_dtype_for(torch.float32)) + try: + tensor = _try_convert_to_tensor(obj) + finally: + torch.set_default_dtype(default_dtype) + + # type cast if requested + tensor = cast_if_needed(tensor, dtype) + + # adjust ndim if needed + ndim_extra = ndmin - tensor.ndim + if ndim_extra > 0: + tensor = tensor.view((1,) * ndim_extra + tensor.shape) + + # copy if requested + if copy: + tensor = tensor.clone() + + return tensor + + +def ndarrays_to_tensors(*inputs): + """Convert all ndarrays from `inputs` to tensors. (other things are intact)""" + from ._ndarray import ndarray + + if len(inputs) == 0: + return ValueError() + elif len(inputs) == 1: + input_ = inputs[0] + if isinstance(input_, ndarray): + return input_.tensor + elif isinstance(input_, tuple): + result = [] + for sub_input in input_: + sub_result = ndarrays_to_tensors(sub_input) + result.append(sub_result) + return tuple(result) + else: + return input_ + else: + assert isinstance(inputs, tuple) # sanity check + return ndarrays_to_tensors(inputs) diff --git a/parrot/lib/python3.10/site-packages/torch/_numpy/linalg.py b/parrot/lib/python3.10/site-packages/torch/_numpy/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..093851142dbca5ab714126a532c0618dee298c45 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_numpy/linalg.py @@ -0,0 +1,239 @@ +# mypy: ignore-errors + +from __future__ import annotations + +import functools +import math +from typing import Sequence + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ArrayLike, KeepDims, normalizer + + +class LinAlgError(Exception): + pass + + +def _atleast_float_1(a): + if not (a.dtype.is_floating_point or a.dtype.is_complex): + a = a.to(_dtypes_impl.default_dtypes().float_dtype) + return a + + +def _atleast_float_2(a, b): + dtyp = _dtypes_impl.result_type_impl(a, b) + if not (dtyp.is_floating_point or dtyp.is_complex): + dtyp = _dtypes_impl.default_dtypes().float_dtype + + a = _util.cast_if_needed(a, dtyp) + b = _util.cast_if_needed(b, dtyp) + return a, b + + +def linalg_errors(func): + @functools.wraps(func) + def wrapped(*args, **kwds): + try: + return func(*args, **kwds) + except torch._C._LinAlgError as e: + raise LinAlgError(*e.args) # noqa: B904 + + return wrapped + + +# ### Matrix and vector products ### + + +@normalizer +@linalg_errors +def matrix_power(a: ArrayLike, n): + a = _atleast_float_1(a) + return torch.linalg.matrix_power(a, n) + + +@normalizer +@linalg_errors +def multi_dot(inputs: Sequence[ArrayLike], *, out=None): + return torch.linalg.multi_dot(inputs) + + +# ### Solving equations and inverting matrices ### + + +@normalizer +@linalg_errors +def solve(a: ArrayLike, b: ArrayLike): + a, b = _atleast_float_2(a, b) + return torch.linalg.solve(a, b) + + +@normalizer +@linalg_errors +def lstsq(a: ArrayLike, b: ArrayLike, rcond=None): + a, b = _atleast_float_2(a, b) + # NumPy is using gelsd: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/umath_linalg.cpp#L3991 + # on CUDA, only `gels` is available though, so use it instead + driver = "gels" if a.is_cuda or b.is_cuda else "gelsd" + return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver) + + +@normalizer +@linalg_errors +def inv(a: ArrayLike): + a = _atleast_float_1(a) + result = torch.linalg.inv(a) + return result + + +@normalizer +@linalg_errors +def pinv(a: ArrayLike, rcond=1e-15, hermitian=False): + a = _atleast_float_1(a) + return torch.linalg.pinv(a, rtol=rcond, hermitian=hermitian) + + +@normalizer +@linalg_errors +def tensorsolve(a: ArrayLike, b: ArrayLike, axes=None): + a, b = _atleast_float_2(a, b) + return torch.linalg.tensorsolve(a, b, dims=axes) + + +@normalizer +@linalg_errors +def tensorinv(a: ArrayLike, ind=2): + a = _atleast_float_1(a) + return torch.linalg.tensorinv(a, ind=ind) + + +# ### Norms and other numbers ### + + +@normalizer +@linalg_errors +def det(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.det(a) + + +@normalizer +@linalg_errors +def slogdet(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.slogdet(a) + + +@normalizer +@linalg_errors +def cond(x: ArrayLike, p=None): + x = _atleast_float_1(x) + + # check if empty + # cf: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744 + if x.numel() == 0 and math.prod(x.shape[-2:]) == 0: + raise LinAlgError("cond is not defined on empty arrays") + + result = torch.linalg.cond(x, p=p) + + # Convert nans to infs (numpy does it in a data-dependent way, depending on + # whether the input array has nans or not) + # XXX: NumPy does this: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744 + return torch.where(torch.isnan(result), float("inf"), result) + + +@normalizer +@linalg_errors +def matrix_rank(a: ArrayLike, tol=None, hermitian=False): + a = _atleast_float_1(a) + + if a.ndim < 2: + return int((a != 0).any()) + + if tol is None: + # follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885 + atol = 0 + rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps + else: + atol, rtol = tol, 0 + return torch.linalg.matrix_rank(a, atol=atol, rtol=rtol, hermitian=hermitian) + + +@normalizer +@linalg_errors +def norm(x: ArrayLike, ord=None, axis=None, keepdims: KeepDims = False): + x = _atleast_float_1(x) + return torch.linalg.norm(x, ord=ord, dim=axis) + + +# ### Decompositions ### + + +@normalizer +@linalg_errors +def cholesky(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.cholesky(a) + + +@normalizer +@linalg_errors +def qr(a: ArrayLike, mode="reduced"): + a = _atleast_float_1(a) + result = torch.linalg.qr(a, mode=mode) + if mode == "r": + # match NumPy + result = result.R + return result + + +@normalizer +@linalg_errors +def svd(a: ArrayLike, full_matrices=True, compute_uv=True, hermitian=False): + a = _atleast_float_1(a) + if not compute_uv: + return torch.linalg.svdvals(a) + + # NB: ignore the hermitian= argument (no pytorch equivalent) + result = torch.linalg.svd(a, full_matrices=full_matrices) + return result + + +# ### Eigenvalues and eigenvectors ### + + +@normalizer +@linalg_errors +def eig(a: ArrayLike): + a = _atleast_float_1(a) + w, vt = torch.linalg.eig(a) + + if not a.is_complex() and w.is_complex() and (w.imag == 0).all(): + w = w.real + vt = vt.real + return w, vt + + +@normalizer +@linalg_errors +def eigh(a: ArrayLike, UPLO="L"): + a = _atleast_float_1(a) + return torch.linalg.eigh(a, UPLO=UPLO) + + +@normalizer +@linalg_errors +def eigvals(a: ArrayLike): + a = _atleast_float_1(a) + result = torch.linalg.eigvals(a) + if not a.is_complex() and result.is_complex() and (result.imag == 0).all(): + result = result.real + return result + + +@normalizer +@linalg_errors +def eigvalsh(a: ArrayLike, UPLO="L"): + a = _atleast_float_1(a) + return torch.linalg.eigvalsh(a, UPLO=UPLO) diff --git a/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/random.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8292b8500374c1d67c98e21cb2f111741cdb3900 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/xpu/__pycache__/random.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so b/videochat2/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so new file mode 100644 index 0000000000000000000000000000000000000000..04f75b8eebf2e13b8e1963d8bd3938e22466df1b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:919e84c4acc62fc9412c6e53780036acabcb004c460ece6f65779760d6745e77 +size 5922168 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/preempted_hook.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/preempted_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e52f8370210093c15da0e31f019a36f1c7938786 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__pycache__/preempted_hook.cpython-310.pyc differ