diff --git a/mplug_owl2/share/terminfo/8/8510 b/mplug_owl2/share/terminfo/8/8510 new file mode 100644 index 0000000000000000000000000000000000000000..2874d511c91ced0214a77389dce5eb056d1482a7 Binary files /dev/null and b/mplug_owl2/share/terminfo/8/8510 differ diff --git a/mplug_owl2/share/terminfo/l/linux-c-nc b/mplug_owl2/share/terminfo/l/linux-c-nc new file mode 100644 index 0000000000000000000000000000000000000000..13473aec2b6d7a4917c753b0ab1014fd8a2de342 Binary files /dev/null and b/mplug_owl2/share/terminfo/l/linux-c-nc differ diff --git a/mplug_owl2/share/terminfo/l/linux-koi8 b/mplug_owl2/share/terminfo/l/linux-koi8 new file mode 100644 index 0000000000000000000000000000000000000000..0fbad8ca3d2fe79e987130288adf1a5877348d9e Binary files /dev/null and b/mplug_owl2/share/terminfo/l/linux-koi8 differ diff --git a/mplug_owl2/share/terminfo/l/linux-m b/mplug_owl2/share/terminfo/l/linux-m new file mode 100644 index 0000000000000000000000000000000000000000..02b202e4e3f2afeccd783d4b5aefd0d78014f036 Binary files /dev/null and b/mplug_owl2/share/terminfo/l/linux-m differ diff --git a/mplug_owl2/share/terminfo/l/lisa b/mplug_owl2/share/terminfo/l/lisa new file mode 100644 index 0000000000000000000000000000000000000000..2bbfd1ec9478c1a8f50af4e8d0be98662c56a792 Binary files /dev/null and b/mplug_owl2/share/terminfo/l/lisa differ diff --git a/mplug_owl2/share/terminfo/l/luna68k b/mplug_owl2/share/terminfo/l/luna68k new file mode 100644 index 0000000000000000000000000000000000000000..c9a18a80e5849ac23dbbc22df44f922bb30835bc Binary files /dev/null and b/mplug_owl2/share/terminfo/l/luna68k differ diff --git a/mplug_owl2/share/terminfo/p/p14-w b/mplug_owl2/share/terminfo/p/p14-w new file mode 100644 index 0000000000000000000000000000000000000000..1d7df53b710aff295e42eeca63d45879279a70fe Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p14-w differ diff --git a/mplug_owl2/share/terminfo/p/p19 b/mplug_owl2/share/terminfo/p/p19 new file mode 100644 index 0000000000000000000000000000000000000000..4b1eda0ea4ee7d8afc1f8a16ed14eef213605712 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p19 differ diff --git a/mplug_owl2/share/terminfo/p/p4 b/mplug_owl2/share/terminfo/p/p4 new file mode 100644 index 0000000000000000000000000000000000000000..1d53e488c16eac6171ad331aeba03638332c7940 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p4 differ diff --git a/mplug_owl2/share/terminfo/p/p8-w b/mplug_owl2/share/terminfo/p/p8-w new file mode 100644 index 0000000000000000000000000000000000000000..8f254d3604fcd521d7b155ab6a90e2717ba6b1c8 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p8-w differ diff --git a/mplug_owl2/share/terminfo/p/pc-coherent b/mplug_owl2/share/terminfo/p/pc-coherent new file mode 100644 index 0000000000000000000000000000000000000000..e6e3856d35b312c32688be30ac6bdda2f7816393 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pc-coherent differ diff --git a/mplug_owl2/share/terminfo/p/pc-minix b/mplug_owl2/share/terminfo/p/pc-minix new file mode 100644 index 0000000000000000000000000000000000000000..b78d8a1e9434fe8ad99af043f2fb537615da46d1 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pc-minix differ diff --git a/mplug_owl2/share/terminfo/p/pc-venix b/mplug_owl2/share/terminfo/p/pc-venix new file mode 100644 index 0000000000000000000000000000000000000000..32a7ec05eeaa5938c238b39519911e132250b0b1 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pc-venix differ diff --git a/mplug_owl2/share/terminfo/p/pc3r b/mplug_owl2/share/terminfo/p/pc3r new file mode 100644 index 0000000000000000000000000000000000000000..0cb4322ea844d6ea0ac6b122ae7bf194e4c81c68 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pc3r differ diff --git a/mplug_owl2/share/terminfo/p/pc3r-m b/mplug_owl2/share/terminfo/p/pc3r-m new file mode 100644 index 0000000000000000000000000000000000000000..e5cab4c9851f3866c647d13c1f8af5c88260bfff Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pc3r-m differ diff --git a/mplug_owl2/share/terminfo/p/pcansi b/mplug_owl2/share/terminfo/p/pcansi new file mode 100644 index 0000000000000000000000000000000000000000..2e24b9ac0983d3f5b99e828eca5e86504c6926a2 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi differ diff --git a/mplug_owl2/share/terminfo/p/pcansi-33 b/mplug_owl2/share/terminfo/p/pcansi-33 new file mode 100644 index 0000000000000000000000000000000000000000..77ec07a919bdc8c2553d752f939c2a65198964b1 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi-33 differ diff --git a/mplug_owl2/share/terminfo/p/pcansi-43-m b/mplug_owl2/share/terminfo/p/pcansi-43-m new file mode 100644 index 0000000000000000000000000000000000000000..be5691893bf0209fcf2a2b12f960a6d341708385 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi-43-m differ diff --git a/mplug_owl2/share/terminfo/p/pcansi-m b/mplug_owl2/share/terminfo/p/pcansi-m new file mode 100644 index 0000000000000000000000000000000000000000..8be0ab6f54451f2304eaf37dee1f05901caf7022 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi-m differ diff --git a/mplug_owl2/share/terminfo/p/pcansi25m b/mplug_owl2/share/terminfo/p/pcansi25m new file mode 100644 index 0000000000000000000000000000000000000000..eea464609fa627787de94bef3b401d01f63540ae Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi25m differ diff --git a/mplug_owl2/share/terminfo/p/pccon+keys b/mplug_owl2/share/terminfo/p/pccon+keys new file mode 100644 index 0000000000000000000000000000000000000000..75b7a4e80d5264e565cd919eee5eee93cd49d692 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pccon+keys differ diff --git a/mplug_owl2/share/terminfo/p/pccons b/mplug_owl2/share/terminfo/p/pccons new file mode 100644 index 0000000000000000000000000000000000000000..5d7c9457abb34e75d0d0c1f5d58b68eefa697978 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pccons differ diff --git a/mplug_owl2/share/terminfo/p/pcplot b/mplug_owl2/share/terminfo/p/pcplot new file mode 100644 index 0000000000000000000000000000000000000000..2a358389845686fa5df78667c83160b853db9082 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcplot differ diff --git a/mplug_owl2/share/terminfo/p/pcvt25-color b/mplug_owl2/share/terminfo/p/pcvt25-color new file mode 100644 index 0000000000000000000000000000000000000000..e73b0491d038ce8af15feeab335c2b620929ec7b Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvt25-color differ diff --git a/mplug_owl2/share/terminfo/p/pcvt40 b/mplug_owl2/share/terminfo/p/pcvt40 new file mode 100644 index 0000000000000000000000000000000000000000..92aa78e0d7b48380ba93ceeebf9757b532e2e9db Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvt40 differ diff --git a/mplug_owl2/share/terminfo/p/pcvt40w b/mplug_owl2/share/terminfo/p/pcvt40w new file mode 100644 index 0000000000000000000000000000000000000000..9c157aa5bd946f32b289b3e98b63245189aa4f33 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvt40w differ diff --git a/mplug_owl2/share/terminfo/p/pe7000c b/mplug_owl2/share/terminfo/p/pe7000c new file mode 100644 index 0000000000000000000000000000000000000000..e86846227c04c7c20c2d8e086fb824569b4ebf65 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pe7000c differ diff --git a/mplug_owl2/share/terminfo/p/pmconsole b/mplug_owl2/share/terminfo/p/pmconsole new file mode 100644 index 0000000000000000000000000000000000000000..5a5b783f338fc1dddbfd715f8810a78d3f013041 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pmconsole differ diff --git a/mplug_owl2/share/terminfo/p/prism12 b/mplug_owl2/share/terminfo/p/prism12 new file mode 100644 index 0000000000000000000000000000000000000000..12de25677be37fc5fee85d23eda474b8cb642755 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism12 differ diff --git a/mplug_owl2/share/terminfo/p/prism12-w b/mplug_owl2/share/terminfo/p/prism12-w new file mode 100644 index 0000000000000000000000000000000000000000..2fe870dbc535c37e247b6f7213e7d89ec4e3879e Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism12-w differ diff --git a/mplug_owl2/share/terminfo/p/prism7 b/mplug_owl2/share/terminfo/p/prism7 new file mode 100644 index 0000000000000000000000000000000000000000..539b4ff6570496b03f33a1c661ff661137089364 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism7 differ diff --git a/mplug_owl2/share/terminfo/p/prism8 b/mplug_owl2/share/terminfo/p/prism8 new file mode 100644 index 0000000000000000000000000000000000000000..12fd9e6abc9789d25991acfcd58a0d0e89650205 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism8 differ diff --git a/mplug_owl2/share/terminfo/p/prism8-w b/mplug_owl2/share/terminfo/p/prism8-w new file mode 100644 index 0000000000000000000000000000000000000000..8f254d3604fcd521d7b155ab6a90e2717ba6b1c8 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism8-w differ diff --git a/mplug_owl2/share/terminfo/p/prism9-w b/mplug_owl2/share/terminfo/p/prism9-w new file mode 100644 index 0000000000000000000000000000000000000000..44029ba7f9d33ae39f2d11a5ddbb8d53e8d6874f Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism9-w differ diff --git a/mplug_owl2/share/terminfo/p/pro350 b/mplug_owl2/share/terminfo/p/pro350 new file mode 100644 index 0000000000000000000000000000000000000000..e73dc12216fa051e6de0de0905aff1e0cd9ff4d1 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pro350 differ diff --git a/mplug_owl2/share/terminfo/p/psterm b/mplug_owl2/share/terminfo/p/psterm new file mode 100644 index 0000000000000000000000000000000000000000..d7ec53e2a8b70c03e9a3cd8232df21405bbc0c58 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/psterm differ diff --git a/mplug_owl2/share/terminfo/p/pt200w b/mplug_owl2/share/terminfo/p/pt200w new file mode 100644 index 0000000000000000000000000000000000000000..1f73e3468a655831aa03a6d73c88f794337019d1 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pt200w differ diff --git a/mplug_owl2/share/terminfo/p/pt505 b/mplug_owl2/share/terminfo/p/pt505 new file mode 100644 index 0000000000000000000000000000000000000000..6640ef9e893070e99b99b7805bca48846a13aa95 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pt505 differ diff --git a/mplug_owl2/share/terminfo/p/putty+fnkeys b/mplug_owl2/share/terminfo/p/putty+fnkeys new file mode 100644 index 0000000000000000000000000000000000000000..8d0c4405aac6bfa23245adc392d9477456a1b922 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty+fnkeys differ diff --git a/mplug_owl2/share/terminfo/p/putty+fnkeys+esc b/mplug_owl2/share/terminfo/p/putty+fnkeys+esc new file mode 100644 index 0000000000000000000000000000000000000000..5b307946e9d09a2beae6df241eec4f4637873e5d Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty+fnkeys+esc differ diff --git a/mplug_owl2/share/terminfo/p/putty+fnkeys+xterm b/mplug_owl2/share/terminfo/p/putty+fnkeys+xterm new file mode 100644 index 0000000000000000000000000000000000000000..994007e6b4d4c24eaf9099bdfdce13565e3d341b Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty+fnkeys+xterm differ diff --git a/mplug_owl2/share/terminfo/p/putty+keypad b/mplug_owl2/share/terminfo/p/putty+keypad new file mode 100644 index 0000000000000000000000000000000000000000..2bf59a0a4aa8d8f2d31ee2d74af30e61ae1779ed Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty+keypad differ diff --git a/mplug_owl2/share/terminfo/p/putty-m1b b/mplug_owl2/share/terminfo/p/putty-m1b new file mode 100644 index 0000000000000000000000000000000000000000..148e727c4470d4519f9dd86537d0f3d3924c74e5 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty-m1b differ diff --git a/mplug_owl2/share/terminfo/p/putty-noapp b/mplug_owl2/share/terminfo/p/putty-noapp new file mode 100644 index 0000000000000000000000000000000000000000..c67bf882bf52d45a9f5cdf6e52fa2e21ee377030 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty-noapp differ diff --git a/mplug_owl2/share/terminfo/p/putty-vt100 b/mplug_owl2/share/terminfo/p/putty-vt100 new file mode 100644 index 0000000000000000000000000000000000000000..c0e68034c30804edfe107039f1d009fe7d305bd5 Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty-vt100 differ diff --git a/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/INSTALLER b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/METADATA b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..ac51fe019585ed14b015f89a8d8908ccc7c356c2 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/METADATA @@ -0,0 +1,135 @@ +Metadata-Version: 2.1 +Name: networkx +Version: 3.2.1 +Summary: Python package for creating and manipulating graphs and networks +Author-email: Aric Hagberg +Maintainer-email: NetworkX Developers +Project-URL: Homepage, https://networkx.org/ +Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues +Project-URL: Documentation, https://networkx.org/documentation/stable/ +Project-URL: Source Code, https://github.com/networkx/networkx +Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math +Platform: Linux +Platform: Mac OSX +Platform: Windows +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Scientific/Engineering :: Bio-Informatics +Classifier: Topic :: Scientific/Engineering :: Information Analysis +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Physics +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: default +Requires-Dist: numpy >=1.22 ; extra == 'default' +Requires-Dist: scipy !=1.11.0,!=1.11.1,>=1.9 ; extra == 'default' +Requires-Dist: matplotlib >=3.5 ; extra == 'default' +Requires-Dist: pandas >=1.4 ; extra == 'default' +Provides-Extra: developer +Requires-Dist: changelist ==0.4 ; extra == 'developer' +Requires-Dist: pre-commit >=3.2 ; extra == 'developer' +Requires-Dist: mypy >=1.1 ; extra == 'developer' +Requires-Dist: rtoml ; extra == 'developer' +Provides-Extra: doc +Requires-Dist: sphinx >=7 ; extra == 'doc' +Requires-Dist: pydata-sphinx-theme >=0.14 ; extra == 'doc' +Requires-Dist: sphinx-gallery >=0.14 ; extra == 'doc' +Requires-Dist: numpydoc >=1.6 ; extra == 'doc' +Requires-Dist: pillow >=9.4 ; extra == 'doc' +Requires-Dist: nb2plots >=0.7 ; extra == 'doc' +Requires-Dist: texext >=0.6.7 ; extra == 'doc' +Requires-Dist: nbconvert <7.9 ; extra == 'doc' +Provides-Extra: extra +Requires-Dist: lxml >=4.6 ; extra == 'extra' +Requires-Dist: pygraphviz >=1.11 ; extra == 'extra' +Requires-Dist: pydot >=1.4.2 ; extra == 'extra' +Requires-Dist: sympy >=1.10 ; extra == 'extra' +Provides-Extra: test +Requires-Dist: pytest >=7.2 ; extra == 'test' +Requires-Dist: pytest-cov >=4.0 ; extra == 'test' + +NetworkX +======== + + +.. image:: https://github.com/networkx/networkx/workflows/test/badge.svg?branch=main + :target: https://github.com/networkx/networkx/actions?query=workflow%3A%22test%22 + +.. image:: https://codecov.io/gh/networkx/networkx/branch/main/graph/badge.svg + :target: https://app.codecov.io/gh/networkx/networkx/branch/main + +.. image:: https://img.shields.io/github/labels/networkx/networkx/Good%20First%20Issue?color=green&label=Contribute%20&style=flat-square + :target: https://github.com/networkx/networkx/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22 + + +NetworkX is a Python package for the creation, manipulation, +and study of the structure, dynamics, and functions +of complex networks. + +- **Website (including documentation):** https://networkx.org +- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss +- **Source:** https://github.com/networkx/networkx +- **Bug reports:** https://github.com/networkx/networkx/issues +- **Report a security vulnerability:** https://tidelift.com/security +- **Tutorial:** https://networkx.org/documentation/latest/tutorial.html +- **GitHub Discussions:** https://github.com/networkx/networkx/discussions + +Simple example +-------------- + +Find the shortest path between two nodes in an undirected graph: + +.. code:: pycon + + >>> import networkx as nx + >>> G = nx.Graph() + >>> G.add_edge("A", "B", weight=4) + >>> G.add_edge("B", "D", weight=2) + >>> G.add_edge("A", "C", weight=3) + >>> G.add_edge("C", "D", weight=4) + >>> nx.shortest_path(G, "A", "D", weight="weight") + ['A', 'B', 'D'] + +Install +------- + +Install the latest version of NetworkX:: + + $ pip install networkx + +Install with all optional dependencies:: + + $ pip install networkx[all] + +For additional details, please see `INSTALL.rst`. + +Bugs +---- + +Please report any bugs that you find `here `_. +Or, even better, fork the repository on `GitHub `_ +and create a pull request (PR). We welcome all changes, big or small, and we +will help you make the PR if you are new to `git` (just ask on the issue and/or +see `CONTRIBUTING.rst`). + +License +------- + +Released under the 3-Clause BSD license (see `LICENSE.txt`):: + + Copyright (C) 2004-2023 NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart diff --git a/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/REQUESTED b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/WHEEL b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/entry_points.txt b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..2170e9f4285422f4f95b05fa682a9a479c19bf24 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/networkx-3.2.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[networkx.backends] +nx-loopback = networkx.classes.tests.dispatch_interface:dispatcher diff --git a/openflamingo/lib/python3.10/site-packages/psutil-6.1.1.dist-info/LICENSE b/openflamingo/lib/python3.10/site-packages/psutil-6.1.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cff5eb74e1badd1c5237ed2654b349530179ad1d --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/psutil-6.1.1.dist-info/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the psutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/openflamingo/lib/python3.10/site-packages/requests/_internal_utils.py b/openflamingo/lib/python3.10/site-packages/requests/_internal_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f2cf635e2937ee9b123a1498c5c5f723a6e20084 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/requests/_internal_utils.py @@ -0,0 +1,50 @@ +""" +requests._internal_utils +~~~~~~~~~~~~~~ + +Provides utility functions that are consumed internally by Requests +which depend on extremely few external helpers (such as compat) +""" +import re + +from .compat import builtin_str + +_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") +_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") +_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") +_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") + +_HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR) +_HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE) +HEADER_VALIDATORS = { + bytes: _HEADER_VALIDATORS_BYTE, + str: _HEADER_VALIDATORS_STR, +} + + +def to_native_string(string, encoding="ascii"): + """Given a string object, regardless of type, returns a representation of + that string in the native string type, encoding and decoding where + necessary. This assumes ASCII unless told otherwise. + """ + if isinstance(string, builtin_str): + out = string + else: + out = string.decode(encoding) + + return out + + +def unicode_is_ascii(u_string): + """Determine if unicode string only contains ASCII characters. + + :param str u_string: unicode string to check. Must be unicode + and not Python 2 `str`. + :rtype: bool + """ + assert isinstance(u_string, str) + try: + u_string.encode("ascii") + return True + except UnicodeEncodeError: + return False diff --git a/openflamingo/lib/python3.10/site-packages/requests/exceptions.py b/openflamingo/lib/python3.10/site-packages/requests/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..83986b489849131efeb7f286b328961205256fd8 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/requests/exceptions.py @@ -0,0 +1,151 @@ +""" +requests.exceptions +~~~~~~~~~~~~~~~~~~~ + +This module contains the set of Requests' exceptions. +""" +from urllib3.exceptions import HTTPError as BaseHTTPError + +from .compat import JSONDecodeError as CompatJSONDecodeError + + +class RequestException(IOError): + """There was an ambiguous exception that occurred while handling your + request. + """ + + def __init__(self, *args, **kwargs): + """Initialize RequestException with `request` and `response` objects.""" + response = kwargs.pop("response", None) + self.response = response + self.request = kwargs.pop("request", None) + if response is not None and not self.request and hasattr(response, "request"): + self.request = self.response.request + super().__init__(*args, **kwargs) + + +class InvalidJSONError(RequestException): + """A JSON error occurred.""" + + +class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError): + """Couldn't decode the text into json""" + + def __init__(self, *args, **kwargs): + """ + Construct the JSONDecodeError instance first with all + args. Then use it's args to construct the IOError so that + the json specific args aren't used as IOError specific args + and the error message from JSONDecodeError is preserved. + """ + CompatJSONDecodeError.__init__(self, *args) + InvalidJSONError.__init__(self, *self.args, **kwargs) + + def __reduce__(self): + """ + The __reduce__ method called when pickling the object must + be the one from the JSONDecodeError (be it json/simplejson) + as it expects all the arguments for instantiation, not just + one like the IOError, and the MRO would by default call the + __reduce__ method from the IOError due to the inheritance order. + """ + return CompatJSONDecodeError.__reduce__(self) + + +class HTTPError(RequestException): + """An HTTP error occurred.""" + + +class ConnectionError(RequestException): + """A Connection error occurred.""" + + +class ProxyError(ConnectionError): + """A proxy error occurred.""" + + +class SSLError(ConnectionError): + """An SSL error occurred.""" + + +class Timeout(RequestException): + """The request timed out. + + Catching this error will catch both + :exc:`~requests.exceptions.ConnectTimeout` and + :exc:`~requests.exceptions.ReadTimeout` errors. + """ + + +class ConnectTimeout(ConnectionError, Timeout): + """The request timed out while trying to connect to the remote server. + + Requests that produced this error are safe to retry. + """ + + +class ReadTimeout(Timeout): + """The server did not send any data in the allotted amount of time.""" + + +class URLRequired(RequestException): + """A valid URL is required to make a request.""" + + +class TooManyRedirects(RequestException): + """Too many redirects.""" + + +class MissingSchema(RequestException, ValueError): + """The URL scheme (e.g. http or https) is missing.""" + + +class InvalidSchema(RequestException, ValueError): + """The URL scheme provided is either invalid or unsupported.""" + + +class InvalidURL(RequestException, ValueError): + """The URL provided was somehow invalid.""" + + +class InvalidHeader(RequestException, ValueError): + """The header value provided was somehow invalid.""" + + +class InvalidProxyURL(InvalidURL): + """The proxy URL provided is invalid.""" + + +class ChunkedEncodingError(RequestException): + """The server declared chunked encoding but sent an invalid chunk.""" + + +class ContentDecodingError(RequestException, BaseHTTPError): + """Failed to decode response content.""" + + +class StreamConsumedError(RequestException, TypeError): + """The content for this response was already consumed.""" + + +class RetryError(RequestException): + """Custom retries logic failed""" + + +class UnrewindableBodyError(RequestException): + """Requests encountered an error when trying to rewind a body.""" + + +# Warnings + + +class RequestsWarning(Warning): + """Base warning for Requests.""" + + +class FileModeWarning(RequestsWarning, DeprecationWarning): + """A file was opened in text mode, but Requests determined its binary length.""" + + +class RequestsDependencyWarning(RequestsWarning): + """An imported dependency doesn't match the expected version range.""" diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/__init__.pyi b/openflamingo/lib/python3.10/site-packages/safetensors/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7781229fe91d0649996e257dccf9f6d0c38823cd --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/safetensors/__init__.pyi @@ -0,0 +1,149 @@ +# Generated content DO NOT EDIT +@staticmethod +def deserialize(bytes): + """ + Opens a safetensors lazily and returns tensors as asked + + Args: + data (`bytes`): + The byte content of a file + + Returns: + (`List[str, Dict[str, Dict[str, any]]]`): + The deserialized content is like: + [("tensor_name", {"shape": [2, 3], "dtype": "F32", "data": b"\0\0.." }), (...)] + """ + pass + +@staticmethod +def serialize(tensor_dict, metadata=None): + """ + Serializes raw data. + + Args: + tensor_dict (`Dict[str, Dict[Any]]`): + The tensor dict is like: + {"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}} + metadata (`Dict[str, str]`, *optional*): + The optional purely text annotations + + Returns: + (`bytes`): + The serialized content. + """ + pass + +@staticmethod +def serialize_file(tensor_dict, filename, metadata=None): + """ + Serializes raw data. + + Args: + tensor_dict (`Dict[str, Dict[Any]]`): + The tensor dict is like: + {"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}} + filename (`str`, or `os.PathLike`): + The name of the file to write into. + metadata (`Dict[str, str]`, *optional*): + The optional purely text annotations + + Returns: + (`bytes`): + The serialized content. + """ + pass + +class safe_open: + """ + Opens a safetensors lazily and returns tensors as asked + + Args: + filename (`str`, or `os.PathLike`): + The filename to open + + framework (`str`): + The framework you want you tensors in. Supported values: + `pt`, `tf`, `flax`, `numpy`. + + device (`str`, defaults to `"cpu"`): + The device on which you want the tensors. + """ + + def __init__(self, filename, framework, device=...): + pass + def __enter__(self): + """ + Start the context manager + """ + pass + def __exit__(self, _exc_type, _exc_value, _traceback): + """ + Exits the context manager + """ + pass + def get_slice(self, name): + """ + Returns a full slice view object + + Args: + name (`str`): + The name of the tensor you want + + Returns: + (`PySafeSlice`): + A dummy object you can slice into to get a real tensor + Example: + ```python + from safetensors import safe_open + + with safe_open("model.safetensors", framework="pt", device=0) as f: + tensor_part = f.get_slice("embedding")[:, ::8] + + ``` + """ + pass + def get_tensor(self, name): + """ + Returns a full tensor + + Args: + name (`str`): + The name of the tensor you want + + Returns: + (`Tensor`): + The tensor in the framework you opened the file for. + + Example: + ```python + from safetensors import safe_open + + with safe_open("model.safetensors", framework="pt", device=0) as f: + tensor = f.get_tensor("embedding") + + ``` + """ + pass + def keys(self): + """ + Returns the names of the tensors in the file. + + Returns: + (`List[str]`): + The name of the tensors contained in that file + """ + pass + def metadata(self): + """ + Return the special non tensor information in the header + + Returns: + (`Dict[str, str]`): + The freeform metadata. + """ + pass + +class SafetensorError(Exception): + """ + Custom Python Exception for Safetensor errors. + """ diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c28acc8a9a574bd12669da590fbc3a0169c447f5 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85b93ebe1a3ae8cb96c4175fb7cb04dc4adcee34 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7761ed1b8c11a2d97ebd766a0a414e2ac44466b Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9469d83de98226dafed2c72aa067442a02712e5a Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/flax.py b/openflamingo/lib/python3.10/site-packages/safetensors/flax.py new file mode 100644 index 0000000000000000000000000000000000000000..d0b8375e038eff487af33fcfaa4a597aacb5743f --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/safetensors/flax.py @@ -0,0 +1,138 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np + +import jax.numpy as jnp +from jax import Array +from safetensors import numpy, safe_open + + +def save(tensors: Dict[str, Array], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, Array]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.flax import save + from jax import numpy as jnp + + tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _jnp2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, Array], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, Array]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.flax import save_file + from jax import numpy as jnp + + tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _jnp2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, Array]: + """ + Loads a safetensors file into flax format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, Array]`: dictionary that contains name as key, value as `Array` on cpu + + Example: + + ```python + from safetensors.flax import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2jnp(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, Array]: + """ + Loads a safetensors file into flax format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, Array]`: dictionary that contains name as key, value as `Array` + + Example: + + ```python + from safetensors.flax import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="flax") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +def _np2jnp(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, Array]: + for k, v in numpy_dict.items(): + numpy_dict[k] = jnp.array(v) + return numpy_dict + + +def _jnp2np(jnp_dict: Dict[str, Array]) -> Dict[str, np.array]: + for k, v in jnp_dict.items(): + jnp_dict[k] = np.asarray(v) + return jnp_dict diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/numpy.py b/openflamingo/lib/python3.10/site-packages/safetensors/numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..0b245f12c1c949456c9b2edb45a11343e6a8099a --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/safetensors/numpy.py @@ -0,0 +1,176 @@ +import os +import sys +from typing import Dict, Optional, Union + +import numpy as np + +from safetensors import deserialize, safe_open, serialize, serialize_file + + +def _tobytes(tensor: np.ndarray) -> bytes: + if not _is_little_endian(tensor): + tensor = tensor.byteswap(inplace=False) + return tensor.tobytes() + + +def save(tensor_dict: Dict[str, np.ndarray], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensor_dict (`Dict[str, np.ndarray]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.numpy import save + import numpy as np + + tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()} + serialized = serialize(flattened, metadata=metadata) + result = bytes(serialized) + return result + + +def save_file( + tensor_dict: Dict[str, np.ndarray], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]] = None +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensor_dict (`Dict[str, np.ndarray]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.numpy import save_file + import numpy as np + + tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()} + serialize_file(flattened, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, np.ndarray]: + """ + Loads a safetensors file into numpy format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray` on cpu + + Example: + + ```python + from safetensors.numpy import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = deserialize(data) + return _view2np(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, np.ndarray]: + """ + Loads a safetensors file into numpy format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray` + + Example: + + ```python + from safetensors.numpy import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="np") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +_TYPES = { + "F64": np.float64, + "F32": np.float32, + "F16": np.float16, + "I64": np.int64, + "U64": np.uint64, + "I32": np.int32, + "U32": np.uint32, + "I16": np.int16, + "U16": np.uint16, + "I8": np.int8, + "U8": np.uint8, + "BOOL": bool, +} + + +def _getdtype(dtype_str: str) -> np.dtype: + return _TYPES[dtype_str] + + +def _view2np(safeview) -> Dict[str, np.ndarray]: + result = {} + for k, v in safeview: + dtype = _getdtype(v["dtype"]) + arr = np.frombuffer(v["data"], dtype=dtype).reshape(v["shape"]) + result[k] = arr + return result + + +def _is_little_endian(tensor: np.ndarray) -> bool: + byteorder = tensor.dtype.byteorder + if byteorder == "=": + if sys.byteorder == "little": + return True + else: + return False + elif byteorder == "|": + return True + elif byteorder == "<": + return True + elif byteorder == ">": + return False + raise ValueError(f"Unexpected byte order {byteorder}") diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/paddle.py b/openflamingo/lib/python3.10/site-packages/safetensors/paddle.py new file mode 100644 index 0000000000000000000000000000000000000000..cec368665de31d17757c0c6621df5dc4926bfab1 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/safetensors/paddle.py @@ -0,0 +1,138 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np + +import paddle +from safetensors import numpy + + +def save(tensors: Dict[str, paddle.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, paddle.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.paddle import save + import paddle + + tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _paddle2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, paddle.Tensor], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, paddle.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.paddle import save_file + import paddle + + tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _paddle2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes, device: str = "cpu") -> Dict[str, paddle.Tensor]: + """ + Loads a safetensors file into paddle format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor` on cpu + + Example: + + ```python + from safetensors.paddle import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2paddle(flat, device) + + +def load_file(filename: Union[str, os.PathLike], device="cpu") -> Dict[str, paddle.Tensor]: + """ + Loads a safetensors file into paddle format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + device (`Union[Dict[str, any], str]`, *optional*, defaults to `cpu`): + The device where the tensors need to be located after load. + available options are all regular paddle device locations + + Returns: + `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor` + + Example: + + ```python + from safetensors.paddle import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + flat = numpy.load_file(filename) + output = _np2paddle(flat, device) + return output + + +def _np2paddle(numpy_dict: Dict[str, np.ndarray], device: str = "cpu") -> Dict[str, paddle.Tensor]: + for k, v in numpy_dict.items(): + numpy_dict[k] = paddle.to_tensor(v, place=device) + return numpy_dict + + +def _paddle2np(paddle_dict: Dict[str, paddle.Tensor]) -> Dict[str, np.array]: + for k, v in paddle_dict.items(): + paddle_dict[k] = v.detach().cpu().numpy() + return paddle_dict diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/py.typed b/openflamingo/lib/python3.10/site-packages/safetensors/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/openflamingo/lib/python3.10/site-packages/safetensors/torch.py b/openflamingo/lib/python3.10/site-packages/safetensors/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..48532ea5996cd807510b97458a0451f092ea0f35 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/safetensors/torch.py @@ -0,0 +1,503 @@ +import os +import sys +from collections import defaultdict +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import torch + +from safetensors import deserialize, safe_open, serialize, serialize_file + + +def storage_ptr(tensor: torch.Tensor) -> int: + try: + return tensor.untyped_storage().data_ptr() + except Exception: + # Fallback for torch==1.10 + try: + return tensor.storage().data_ptr() + except NotImplementedError: + # Fallback for meta storage + return 0 + + +def _end_ptr(tensor: torch.Tensor) -> int: + if tensor.nelement(): + stop = tensor.view(-1)[-1].data_ptr() + _SIZE[tensor.dtype] + else: + stop = tensor.data_ptr() + return stop + + +def storage_size(tensor: torch.Tensor) -> int: + try: + return tensor.untyped_storage().nbytes() + except AttributeError: + # Fallback for torch==1.10 + try: + return tensor.storage().size() * _SIZE[tensor.dtype] + except NotImplementedError: + # Fallback for meta storage + # On torch >=2.0 this is the tensor size + return tensor.nelement() * _SIZE[tensor.dtype] + + +def _filter_shared_not_shared(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]: + filtered_tensors = [] + for shared in tensors: + if len(shared) < 2: + filtered_tensors.append(shared) + continue + + areas = [] + for name in shared: + tensor = state_dict[name] + areas.append((tensor.data_ptr(), _end_ptr(tensor), name)) + areas.sort() + + _, last_stop, last_name = areas[0] + filtered_tensors.append({last_name}) + for start, stop, name in areas[1:]: + if start >= last_stop: + filtered_tensors.append({name}) + else: + filtered_tensors[-1].add(name) + last_stop = stop + + return filtered_tensors + + +def _find_shared_tensors(state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]: + tensors = defaultdict(set) + for k, v in state_dict.items(): + if v.device != torch.device("meta") and storage_ptr(v) != 0 and storage_size(v) != 0: + # Need to add device as key because of multiple GPU. + tensors[(v.device, storage_ptr(v), storage_size(v))].add(k) + tensors = list(sorted(tensors.values())) + tensors = _filter_shared_not_shared(tensors, state_dict) + return tensors + + +def _is_complete(tensor: torch.Tensor) -> bool: + return tensor.data_ptr() == storage_ptr(tensor) and tensor.nelement() * _SIZE[tensor.dtype] == storage_size(tensor) + + +def _remove_duplicate_names( + state_dict: Dict[str, torch.Tensor], + *, + preferred_names: Optional[List[str]] = None, + discard_names: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + if preferred_names is None: + preferred_names = [] + preferred_names = set(preferred_names) + if discard_names is None: + discard_names = [] + discard_names = set(discard_names) + + shareds = _find_shared_tensors(state_dict) + to_remove = defaultdict(list) + for shared in shareds: + complete_names = set([name for name in shared if _is_complete(state_dict[name])]) + if not complete_names: + raise RuntimeError( + "Error while trying to find names to remove to save state dict, but found no suitable name to keep" + f" for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model" + " since you could be storing much more memory than needed. Please refer to" + " https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an" + " issue." + ) + + keep_name = sorted(list(complete_names))[0] + + # Mechanism to preferentially select keys to keep + # coming from the on-disk file to allow + # loading models saved with a different choice + # of keep_name + preferred = complete_names.difference(discard_names) + if preferred: + keep_name = sorted(list(preferred))[0] + + if preferred_names: + preferred = preferred_names.intersection(complete_names) + if preferred: + keep_name = sorted(list(preferred))[0] + for name in sorted(shared): + if name != keep_name: + to_remove[keep_name].append(name) + return to_remove + + +def save_model( + model: torch.nn.Module, filename: str, metadata: Optional[Dict[str, str]] = None, force_contiguous: bool = True +): + """ + Saves a given torch model to specified filename. + This method exists specifically to avoid tensor sharing issues which are + not allowed in `safetensors`. [More information on tensor sharing](../torch_shared_tensors) + + Args: + model (`torch.nn.Module`): + The model to save on disk. + filename (`str`): + The filename location to save the file + metadata (`Dict[str, str]`, *optional*): + Extra information to save along with the file. + Some metadata will be added for each dropped tensors. + This information will not be enough to recover the entire + shared structure but might help understanding things + force_contiguous (`boolean`, *optional*, defaults to True): + Forcing the state_dict to be saved as contiguous tensors. + This has no effect on the correctness of the model, but it + could potentially change performance if the layout of the tensor + was chosen specifically for that reason. + """ + state_dict = model.state_dict() + to_removes = _remove_duplicate_names(state_dict) + + for kept_name, to_remove_group in to_removes.items(): + for to_remove in to_remove_group: + if metadata is None: + metadata = {} + + if to_remove not in metadata: + # Do not override user data + metadata[to_remove] = kept_name + del state_dict[to_remove] + if force_contiguous: + state_dict = {k: v.contiguous() for k, v in state_dict.items()} + try: + save_file(state_dict, filename, metadata=metadata) + except ValueError as e: + msg = str(e) + msg += " Or use save_model(..., force_contiguous=True), read the docs for potential caveats." + raise ValueError(msg) + + +def load_model( + model: torch.nn.Module, filename: Union[str, os.PathLike], strict: bool = True, device: Union[str, int] = "cpu" +) -> Tuple[List[str], List[str]]: + """ + Loads a given filename onto a torch model. + This method exists specifically to avoid tensor sharing issues which are + not allowed in `safetensors`. [More information on tensor sharing](../torch_shared_tensors) + + Args: + model (`torch.nn.Module`): + The model to load onto. + filename (`str`, or `os.PathLike`): + The filename location to load the file from. + strict (`bool`, *optional*, defaults to True): + Whether to fail if you're missing keys or having unexpected ones. + When false, the function simply returns missing and unexpected names. + device (`Union[str, int]`, *optional*, defaults to `cpu`): + The device where the tensors need to be located after load. + available options are all regular torch device locations. + + Returns: + `(missing, unexpected): (List[str], List[str])` + `missing` are names in the model which were not modified during loading + `unexpected` are names that are on the file, but weren't used during + the load. + """ + state_dict = load_file(filename, device=device) + model_state_dict = model.state_dict() + to_removes = _remove_duplicate_names(model_state_dict, preferred_names=state_dict.keys()) + missing, unexpected = model.load_state_dict(state_dict, strict=False) + missing = set(missing) + for to_remove_group in to_removes.values(): + for to_remove in to_remove_group: + if to_remove not in missing: + unexpected.append(to_remove) + else: + missing.remove(to_remove) + if strict and (missing or unexpected): + missing_keys = ", ".join([f'"{k}"' for k in sorted(missing)]) + unexpected_keys = ", ".join([f'"{k}"' for k in sorted(unexpected)]) + error = f"Error(s) in loading state_dict for {model.__class__.__name__}:" + if missing: + error += f"\n Missing key(s) in state_dict: {missing_keys}" + if unexpected: + error += f"\n Unexpected key(s) in state_dict: {unexpected_keys}" + raise RuntimeError(error) + return missing, unexpected + + +def save(tensors: Dict[str, torch.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, torch.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.torch import save + import torch + + tensors = {"embedding": torch.zeros((512, 1024)), "attention": torch.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + serialized = serialize(_flatten(tensors), metadata=metadata) + result = bytes(serialized) + return result + + +def save_file( + tensors: Dict[str, torch.Tensor], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +): + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, torch.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.torch import save_file + import torch + + tensors = {"embedding": torch.zeros((512, 1024)), "attention": torch.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + serialize_file(_flatten(tensors), filename, metadata=metadata) + + +def load_file(filename: Union[str, os.PathLike], device: Union[str, int] = "cpu") -> Dict[str, torch.Tensor]: + """ + Loads a safetensors file into torch format. + + Args: + filename (`str`, or `os.PathLike`): + The name of the file which contains the tensors + device (`Union[str, int]`, *optional*, defaults to `cpu`): + The device where the tensors need to be located after load. + available options are all regular torch device locations. + + Returns: + `Dict[str, torch.Tensor]`: dictionary that contains name as key, value as `torch.Tensor` + + Example: + + ```python + from safetensors.torch import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="pt", device=device) as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +def load(data: bytes) -> Dict[str, torch.Tensor]: + """ + Loads a safetensors file into torch format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, torch.Tensor]`: dictionary that contains name as key, value as `torch.Tensor` on cpu + + Example: + + ```python + from safetensors.torch import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = deserialize(data) + return _view2torch(flat) + + +# torch.float8 formats require 2.1; we do not support these dtypes on earlier versions +_float8_e4m3fn = getattr(torch, "float8_e4m3fn", None) +_float8_e5m2 = getattr(torch, "float8_e5m2", None) + +_SIZE = { + torch.int64: 8, + torch.float32: 4, + torch.int32: 4, + torch.bfloat16: 2, + torch.float16: 2, + torch.int16: 2, + torch.uint8: 1, + torch.int8: 1, + torch.bool: 1, + torch.float64: 8, + _float8_e4m3fn: 1, + _float8_e5m2: 1, +} + +_TYPES = { + "F64": torch.float64, + "F32": torch.float32, + "F16": torch.float16, + "BF16": torch.bfloat16, + "I64": torch.int64, + # "U64": torch.uint64, + "I32": torch.int32, + # "U32": torch.uint32, + "I16": torch.int16, + # "U16": torch.uint16, + "I8": torch.int8, + "U8": torch.uint8, + "BOOL": torch.bool, + "F8_E4M3": _float8_e4m3fn, + "F8_E5M2": _float8_e5m2, +} + + +def _getdtype(dtype_str: str) -> torch.dtype: + return _TYPES[dtype_str] + + +def _view2torch(safeview) -> Dict[str, torch.Tensor]: + result = {} + for k, v in safeview: + dtype = _getdtype(v["dtype"]) + if len(v["data"]) == 0: + # Workaround because frombuffer doesn't accept zero-size tensors + assert any(x == 0 for x in v["shape"]) + arr = torch.empty(v["shape"], dtype=dtype) + else: + arr = torch.frombuffer(v["data"], dtype=dtype).reshape(v["shape"]) + if sys.byteorder == "big": + arr = torch.from_numpy(arr.numpy().byteswap(inplace=False)) + result[k] = arr + + return result + + +def _tobytes(tensor: torch.Tensor, name: str) -> bytes: + if tensor.layout != torch.strided: + raise ValueError( + f"You are trying to save a sparse tensor: `{name}` which this library does not support." + " You can make it a dense tensor before saving with `.to_dense()` but be aware this might" + " make a much larger file than needed." + ) + + if not tensor.is_contiguous(): + raise ValueError( + f"You are trying to save a non contiguous tensor: `{name}` which is not allowed. It either means you" + " are trying to save tensors which are reference of each other in which case it's recommended to save" + " only the full tensors, and reslice at load time, or simply call `.contiguous()` on your tensor to" + " pack it before saving." + ) + if tensor.device.type != "cpu": + # Moving tensor to cpu before saving + tensor = tensor.to("cpu") + + import ctypes + + import numpy as np + + # When shape is empty (scalar), np.prod returns a float + # we need a int for the following calculations + length = int(np.prod(tensor.shape).item()) + bytes_per_item = _SIZE[tensor.dtype] + + total_bytes = length * bytes_per_item + + ptr = tensor.data_ptr() + if ptr == 0: + return b"" + newptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_ubyte)) + data = np.ctypeslib.as_array(newptr, (total_bytes,)) # no internal copy + if sys.byteorder == "big": + NPDTYPES = { + torch.int64: np.int64, + torch.float32: np.float32, + torch.int32: np.int32, + # XXX: This is ok because both have the same width + torch.bfloat16: np.float16, + torch.float16: np.float16, + torch.int16: np.int16, + torch.uint8: np.uint8, + torch.int8: np.int8, + torch.bool: bool, + torch.float64: np.float64, + # XXX: This is ok because both have the same width and byteswap is a no-op anyway + _float8_e4m3fn: np.uint8, + _float8_e5m2: np.uint8, + } + npdtype = NPDTYPES[tensor.dtype] + # Not in place as that would potentially modify a live running model + data = data.view(npdtype).byteswap(inplace=False) + return data.tobytes() + + +def _flatten(tensors: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, Any]]: + if not isinstance(tensors, dict): + raise ValueError(f"Expected a dict of [str, torch.Tensor] but received {type(tensors)}") + + invalid_tensors = [] + for k, v in tensors.items(): + if not isinstance(v, torch.Tensor): + raise ValueError(f"Key `{k}` is invalid, expected torch.Tensor but received {type(v)}") + + if v.layout != torch.strided: + invalid_tensors.append(k) + if invalid_tensors: + raise ValueError( + f"You are trying to save a sparse tensors: `{invalid_tensors}` which this library does not support." + " You can make it a dense tensor before saving with `.to_dense()` but be aware this might" + " make a much larger file than needed." + ) + + shared_pointers = _find_shared_tensors(tensors) + failing = [] + for names in shared_pointers: + if len(names) > 1: + failing.append(names) + + if failing: + raise RuntimeError( + f""" + Some tensors share memory, this will lead to duplicate memory on disk and potential differences when loading them again: {failing}. + A potential way to correctly save your model is to use `save_model`. + More information at https://huggingface.co/docs/safetensors/torch_shared_tensors + """ + ) + + return { + k: { + "dtype": str(v.dtype).split(".")[-1], + "shape": v.shape, + "data": _tobytes(v, k), + } + for k, v in tensors.items() + } diff --git a/openflamingo/lib/python3.10/site-packages/tokenizers-0.13.3.dist-info/RECORD b/openflamingo/lib/python3.10/site-packages/tokenizers-0.13.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..263c5c89e7bbff16c9a180c250b3b0752c8703ec --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/tokenizers-0.13.3.dist-info/RECORD @@ -0,0 +1,57 @@ +tokenizers-0.13.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tokenizers-0.13.3.dist-info/METADATA,sha256=DiZYpQNzHhyWf29uAMBdxhhyLYiHpVH7j0qp1CGMGNg,6729 +tokenizers-0.13.3.dist-info/RECORD,, +tokenizers-0.13.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tokenizers-0.13.3.dist-info/WHEEL,sha256=iZaXX0Td62Nww8bojl0E84uJHjT41csHPKZmbUBbJPs,152 +tokenizers-0.13.3.dist-info/top_level.txt,sha256=JcOIHPbSqrQvqcDyFOnNNcee6wJlpDZvyk-BSCeX4QA,11 +tokenizers.libs/libcom_err-2abe824b.so.2.1,sha256=VCbctU3QHJ7t2gXiF58ORxFOi0ilNP_p6UkW55Rxslc,17497 +tokenizers.libs/libcrypto-d3570994.so.1.0.2k,sha256=0MuwMwOqNZyxoXNl8w6SXHLekru2FpDhADKt8PijvTM,2644337 +tokenizers.libs/libgssapi_krb5-497db0c6.so.2.2,sha256=KnSwMw7pcygbJvjr5KzvDr-e6ZxraEl8-RUf_2xMNOE,345209 +tokenizers.libs/libk5crypto-b1f99d5c.so.3.1,sha256=mETlAJ5wpq0vsitYcwaBD-Knsbn2uZItqhx4ujRm3ic,219953 +tokenizers.libs/libkeyutils-dfe70bd6.so.1.5,sha256=wp5BsDz0st_7-0lglG4rQvgsDKXVPSMdPw_Fl7onRIg,17913 +tokenizers.libs/libkrb5-fcafa220.so.3.3,sha256=sqq1KP9MqyFE5c4BskasCfV0oHKlP_Y-qB1rspsmuPE,1018953 +tokenizers.libs/libkrb5support-d0bcff84.so.0.1,sha256=anH1fXSP73m05zbVNIh1VF0KIk-okotdYqPPJkf8EJ8,76873 +tokenizers.libs/libpcre-9513aab5.so.1.2.0,sha256=Au2oUOBJMWVtivgfUXG_902L7BVT09hcPTLX_F7-iGQ,406817 +tokenizers.libs/libselinux-0922c95c.so.1,sha256=1PqOf7Ot2WCmgyWlnJaUJErqMhP9c5pQgVywZ8SWVlQ,178337 +tokenizers.libs/libssl-cd1d6220.so.1.0.2k,sha256=BWi1wACgo1Adpy3kyrtdXyvWZMph-paeUMTN9Dk4toU,519553 +tokenizers/__init__.py,sha256=glqA4Fxrk4gtj18eKRtDr4og4Fb8Rp77GZ4zzWQVmZM,2622 +tokenizers/__init__.pyi,sha256=gjqP26VqhenkbraWab_llJpN2P8SjCk9v-1ml8sYnc8,37561 +tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/decoders/__init__.py,sha256=lGp32h8qerE0F48gyZL8wGmeQVlmjVpeIsRb1SM9kf4,335 +tokenizers/decoders/__init__.pyi,sha256=qF5dcAK7Hiqdj_eXCTaG_-AdC_mvLy-E96_HzovIzgI,7041 +tokenizers/decoders/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__init__.py,sha256=VzAsplaIo7rl4AFO8Miu7ig7MfZjvonwVblZw01zR6M,310 +tokenizers/implementations/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc,, +tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc,, +tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc,, +tokenizers/implementations/base_tokenizer.py,sha256=x7PcneIVmrNuDrk_UmGBQNKU34ftmGpBBEI7msKs1ZA,13943 +tokenizers/implementations/bert_wordpiece.py,sha256=sKCum0FKPYdSgJFJN8LDerVBoTDRSqyqSdrcm-lvQqI,5520 +tokenizers/implementations/byte_level_bpe.py,sha256=OA_jyy3EQmYTa6hnf-EKwLOFuyroqFYOJz25ysM2BUk,4289 +tokenizers/implementations/char_level_bpe.py,sha256=Q2ZEAW0xMQHF7YCUtmplwaxbU-J0P2NK4PJGMxUb-_c,5466 +tokenizers/implementations/sentencepiece_bpe.py,sha256=syNXoQZX1JtI8U1A9XSDcoihlF3bIGVTVN53YJ83pV4,3679 +tokenizers/implementations/sentencepiece_unigram.py,sha256=6D4d-pf5Qc7IdtXvnA9DmuQbD_W5GYsVvEiF_18WbSE,7196 +tokenizers/models/__init__.py,sha256=eJZ4HTAQZpxnKILNylWaTFqxXy-Ba6OKswWN47feeV8,176 +tokenizers/models/__init__.pyi,sha256=sbd54iE9KIRH7qkwgcRiqQuUkSpH6ye2H39zK22DjfU,16714 +tokenizers/models/__pycache__/__init__.cpython-310.pyc,, +tokenizers/normalizers/__init__.py,sha256=hKOwnqWM-IlcVv7HDWT9SYhlczevuCNDQJY05ZFxkzk,808 +tokenizers/normalizers/__init__.pyi,sha256=pR7NwcZDyFncmyZa9Wn1gjqzopaq5HyiPkRNHYuhzZk,19585 +tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/pre_tokenizers/__init__.py,sha256=wd6KYQA_RsGSQK-HeG9opTRhv4ttSRkyno2dk6az-PM,557 +tokenizers/pre_tokenizers/__init__.pyi,sha256=MKskOBQVYlYu-U53hcsjC4ik5Js1gXD3ViuogF-3w74,23010 +tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/processors/__init__.py,sha256=xM2DEKwKtHIumHsszM8AMkq-AlaqvBZFXWgLU8SNhOY,307 +tokenizers/processors/__init__.pyi,sha256=AZXH9ZDWb0jvzp0DKC7lKjCeOwli70_ZvR3zqMKf7v4,11352 +tokenizers/processors/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so,sha256=8zcbNdfJqUOIbjjxJYAERhcvcfaXmSbdm2APVVWctpY,17916457 +tokenizers/tools/__init__.py,sha256=xG8caB9OHC8cbB01S5vYV14HZxhO6eWbLehsb70ppio,55 +tokenizers/tools/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tools/__pycache__/visualizer.cpython-310.pyc,, +tokenizers/tools/visualizer-styles.css,sha256=zAydq1oGWD8QEll4-eyL8Llw0B1sty_hpIE3tYxL02k,4850 +tokenizers/tools/visualizer.py,sha256=0KUrLhkBLhPvg3GAkvsiBokb517bMCMSN-vuYY1qmEo,14621 +tokenizers/trainers/__init__.py,sha256=UTu22AGcp76IvpW45xLRbJWET04NxPW6NfCb2YYz0EM,248 +tokenizers/trainers/__init__.pyi,sha256=R69lgwClNIkIEkgBzhUXvgnsRLpRlh9DlhlO-XRKEe4,5126 +tokenizers/trainers/__pycache__/__init__.cpython-310.pyc,, diff --git a/openflamingo/lib/python3.10/site-packages/tokenizers-0.13.3.dist-info/REQUESTED b/openflamingo/lib/python3.10/site-packages/tokenizers-0.13.3.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/openflamingo/lib/python3.10/site-packages/tokenizers-0.13.3.dist-info/top_level.txt b/openflamingo/lib/python3.10/site-packages/tokenizers-0.13.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..853ed9dc186c1f11ade035593e3968cbe5c7a41e --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/tokenizers-0.13.3.dist-info/top_level.txt @@ -0,0 +1 @@ +tokenizers diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/__init__.py b/openflamingo/lib/python3.10/site-packages/transformers/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1a8ef35ff439e48caf92dba731f7c551f6dcf285 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/transformers/data/__init__.py @@ -0,0 +1,44 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .data_collator import ( + DataCollatorForLanguageModeling, + DataCollatorForPermutationLanguageModeling, + DataCollatorForSeq2Seq, + DataCollatorForSOP, + DataCollatorForTokenClassification, + DataCollatorForWholeWordMask, + DataCollatorWithPadding, + DefaultDataCollator, + default_data_collator, +) +from .metrics import glue_compute_metrics, xnli_compute_metrics +from .processors import ( + DataProcessor, + InputExample, + InputFeatures, + SingleSentenceClassificationProcessor, + SquadExample, + SquadFeatures, + SquadV1Processor, + SquadV2Processor, + glue_convert_examples_to_features, + glue_output_modes, + glue_processors, + glue_tasks_num_labels, + squad_convert_examples_to_features, + xnli_output_modes, + xnli_processors, + xnli_tasks_num_labels, +) diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/metrics/__init__.py b/openflamingo/lib/python3.10/site-packages/transformers/data/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebd0d17aa55bb4529820ce347f6275d38f6c0caa --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/transformers/data/metrics/__init__.py @@ -0,0 +1,98 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +from ...utils import is_sklearn_available, requires_backends + + +if is_sklearn_available(): + from scipy.stats import pearsonr, spearmanr + from sklearn.metrics import f1_score, matthews_corrcoef + + +DEPRECATION_WARNING = ( + "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate " + "library. You can have a look at this example script for pointers: " + "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" +) + + +def simple_accuracy(preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(simple_accuracy, "sklearn") + return (preds == labels).mean() + + +def acc_and_f1(preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(acc_and_f1, "sklearn") + acc = simple_accuracy(preds, labels) + f1 = f1_score(y_true=labels, y_pred=preds) + return { + "acc": acc, + "f1": f1, + "acc_and_f1": (acc + f1) / 2, + } + + +def pearson_and_spearman(preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(pearson_and_spearman, "sklearn") + pearson_corr = pearsonr(preds, labels)[0] + spearman_corr = spearmanr(preds, labels)[0] + return { + "pearson": pearson_corr, + "spearmanr": spearman_corr, + "corr": (pearson_corr + spearman_corr) / 2, + } + + +def glue_compute_metrics(task_name, preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(glue_compute_metrics, "sklearn") + assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}" + if task_name == "cola": + return {"mcc": matthews_corrcoef(labels, preds)} + elif task_name == "sst-2": + return {"acc": simple_accuracy(preds, labels)} + elif task_name == "mrpc": + return acc_and_f1(preds, labels) + elif task_name == "sts-b": + return pearson_and_spearman(preds, labels) + elif task_name == "qqp": + return acc_and_f1(preds, labels) + elif task_name == "mnli": + return {"mnli/acc": simple_accuracy(preds, labels)} + elif task_name == "mnli-mm": + return {"mnli-mm/acc": simple_accuracy(preds, labels)} + elif task_name == "qnli": + return {"acc": simple_accuracy(preds, labels)} + elif task_name == "rte": + return {"acc": simple_accuracy(preds, labels)} + elif task_name == "wnli": + return {"acc": simple_accuracy(preds, labels)} + elif task_name == "hans": + return {"acc": simple_accuracy(preds, labels)} + else: + raise KeyError(task_name) + + +def xnli_compute_metrics(task_name, preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(xnli_compute_metrics, "sklearn") + if len(preds) != len(labels): + raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}") + if task_name == "xnli": + return {"acc": simple_accuracy(preds, labels)} + else: + raise KeyError(task_name) diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__init__.py b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a26ab5776d74715428b10c4d9cd943e53b253785 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels +from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features +from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor +from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..973e9e66f4e1a3d0ab1b0f1bb8966e94cbf710fa Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29b3119261fa6a0766bac9f48a47808747efa090 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/processors/glue.py b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/glue.py new file mode 100644 index 0000000000000000000000000000000000000000..3d22968c9d06323c7c1cd4b00e5fcd2e6cf3f35d --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/glue.py @@ -0,0 +1,643 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GLUE processors and helpers""" + +import os +import warnings +from dataclasses import asdict +from enum import Enum +from typing import List, Optional, Union + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import is_tf_available, logging +from .utils import DataProcessor, InputExample, InputFeatures + + +if is_tf_available(): + import tensorflow as tf + +logger = logging.get_logger(__name__) + +DEPRECATION_WARNING = ( + "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " + "library. You can have a look at this example script for pointers: " + "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" +) + + +def glue_convert_examples_to_features( + examples: Union[List[InputExample], "tf.data.Dataset"], + tokenizer: PreTrainedTokenizer, + max_length: Optional[int] = None, + task=None, + label_list=None, + output_mode=None, +): + """ + Loads a data file into a list of `InputFeatures` + + Args: + examples: List of `InputExamples` or `tf.data.Dataset` containing the examples. + tokenizer: Instance of a tokenizer that will tokenize the examples + max_length: Maximum example length. Defaults to the tokenizer's max_len + task: GLUE task + label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method + output_mode: String indicating the output mode. Either `regression` or `classification` + + Returns: + If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific + features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which + can be fed to the model. + + """ + warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning) + if is_tf_available() and isinstance(examples, tf.data.Dataset): + if task is None: + raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.") + return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) + return _glue_convert_examples_to_features( + examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode + ) + + +if is_tf_available(): + + def _tf_glue_convert_examples_to_features( + examples: tf.data.Dataset, + tokenizer: PreTrainedTokenizer, + task=str, + max_length: Optional[int] = None, + ) -> tf.data.Dataset: + """ + Returns: + A `tf.data.Dataset` containing the task-specific features. + + """ + processor = glue_processors[task]() + examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples] + features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) + label_type = tf.float32 if task == "sts-b" else tf.int64 + + def gen(): + for ex in features: + d = {k: v for k, v in asdict(ex).items() if v is not None} + label = d.pop("label") + yield (d, label) + + input_names = tokenizer.model_input_names + + return tf.data.Dataset.from_generator( + gen, + ({k: tf.int32 for k in input_names}, label_type), + ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])), + ) + + +def _glue_convert_examples_to_features( + examples: List[InputExample], + tokenizer: PreTrainedTokenizer, + max_length: Optional[int] = None, + task=None, + label_list=None, + output_mode=None, +): + if max_length is None: + max_length = tokenizer.model_max_length + + if task is not None: + processor = glue_processors[task]() + if label_list is None: + label_list = processor.get_labels() + logger.info(f"Using label list {label_list} for task {task}") + if output_mode is None: + output_mode = glue_output_modes[task] + logger.info(f"Using output mode {output_mode} for task {task}") + + label_map = {label: i for i, label in enumerate(label_list)} + + def label_from_example(example: InputExample) -> Union[int, float, None]: + if example.label is None: + return None + if output_mode == "classification": + return label_map[example.label] + elif output_mode == "regression": + return float(example.label) + raise KeyError(output_mode) + + labels = [label_from_example(example) for example in examples] + + batch_encoding = tokenizer( + [(example.text_a, example.text_b) for example in examples], + max_length=max_length, + padding="max_length", + truncation=True, + ) + + features = [] + for i in range(len(examples)): + inputs = {k: batch_encoding[k][i] for k in batch_encoding} + + feature = InputFeatures(**inputs, label=labels[i]) + features.append(feature) + + for i, example in enumerate(examples[:5]): + logger.info("*** Example ***") + logger.info(f"guid: {example.guid}") + logger.info(f"features: {features[i]}") + + return features + + +class OutputMode(Enum): + classification = "classification" + regression = "regression" + + +class MrpcProcessor(DataProcessor): + """Processor for the MRPC data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}") + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{i}" + text_a = line[3] + text_b = line[4] + label = None if set_type == "test" else line[0] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliProcessor(DataProcessor): + """Processor for the MultiNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["premise"].numpy().decode("utf-8"), + tensor_dict["hypothesis"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched") + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[8] + text_b = line[9] + label = None if set_type.startswith("test") else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliMismatchedProcessor(MnliProcessor): + """Processor for the MultiNLI Mismatched data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched") + + +class ColaProcessor(DataProcessor): + """Processor for the CoLA data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence"].numpy().decode("utf-8"), + None, + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + test_mode = set_type == "test" + if test_mode: + lines = lines[1:] + text_index = 1 if test_mode else 3 + examples = [] + for i, line in enumerate(lines): + guid = f"{set_type}-{i}" + text_a = line[text_index] + label = None if test_mode else line[1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class Sst2Processor(DataProcessor): + """Processor for the SST-2 data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence"].numpy().decode("utf-8"), + None, + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + text_index = 1 if set_type == "test" else 0 + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{i}" + text_a = line[text_index] + label = None if set_type == "test" else line[1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class StsbProcessor(DataProcessor): + """Processor for the STS-B data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return [None] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[7] + text_b = line[8] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QqpProcessor(DataProcessor): + """Processor for the QQP data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["question1"].numpy().decode("utf-8"), + tensor_dict["question2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + test_mode = set_type == "test" + q1_index = 1 if test_mode else 3 + q2_index = 2 if test_mode else 4 + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + try: + text_a = line[q1_index] + text_b = line[q2_index] + label = None if test_mode else line[5] + except IndexError: + continue + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QnliProcessor(DataProcessor): + """Processor for the QNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["question"].numpy().decode("utf-8"), + tensor_dict["sentence"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class RteProcessor(DataProcessor): + """Processor for the RTE data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class WnliProcessor(DataProcessor): + """Processor for the WNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +glue_tasks_num_labels = { + "cola": 2, + "mnli": 3, + "mrpc": 2, + "sst-2": 2, + "sts-b": 1, + "qqp": 2, + "qnli": 2, + "rte": 2, + "wnli": 2, +} + +glue_processors = { + "cola": ColaProcessor, + "mnli": MnliProcessor, + "mnli-mm": MnliMismatchedProcessor, + "mrpc": MrpcProcessor, + "sst-2": Sst2Processor, + "sts-b": StsbProcessor, + "qqp": QqpProcessor, + "qnli": QnliProcessor, + "rte": RteProcessor, + "wnli": WnliProcessor, +} + +glue_output_modes = { + "cola": "classification", + "mnli": "classification", + "mnli-mm": "classification", + "mrpc": "classification", + "sst-2": "classification", + "sts-b": "regression", + "qqp": "classification", + "qnli": "classification", + "rte": "classification", + "wnli": "classification", +} diff --git a/openflamingo/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.h b/openflamingo/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..a95b46f7d159b11851143710034cf80c20aa6bf8 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.h @@ -0,0 +1,59 @@ + +#define WARP_SIZE 32 +#define FULL_MASK 0xffffffff +#define OPTIMAL_THREADS 256 + +__global__ void index_max_cuda_kernel( + float *index_vals, // [batch_size, 32, num_block] + int *indices, // [batch_size, num_block] + float *max_vals, // [batch_size, A_num_block * 32] + float *max_vals_scatter, // [batch_size, 32, num_block] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +); + +__global__ void mm_to_sparse_cuda_kernel( + float *dense_A, // [batch_size, A_num_block, dim, 32] + float *dense_B, // [batch_size, B_num_block, dim, 32] + int *indices, // [batch_size, num_block] + float *sparse_C, // [batch_size, num_block, 32, 32] + long batch_size, + long A_num_block, + long B_num_block, + long dim, + long num_block +); + +__global__ void sparse_dense_mm_cuda_kernel( + float *sparse_A, // [batch_size, num_block, 32, 32] + int *indices, // [batch_size, num_block] + float *dense_B, // [batch_size, B_num_block, dim, 32] + float *dense_C, // [batch_size, A_num_block, dim, 32] + long batch_size, + long A_num_block, + long B_num_block, + long dim, + long num_block +); + +__global__ void reduce_sum_cuda_kernel( + float *sparse_A, // [batch_size, num_block, 32, 32] + int *indices, // [batch_size, num_block] + float *dense_C, // [batch_size, A_num_block, 32] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +); + +__global__ void scatter_cuda_kernel( + float *dense_A, // [batch_size, A_num_block, 32] + int *indices, // [batch_size, num_block] + float *sparse_C, // [batch_size, num_block, 32, 32] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +); diff --git a/openflamingo/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.h b/openflamingo/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.h new file mode 100644 index 0000000000000000000000000000000000000000..0200140ee337b8c5d9583767bbad1e842e9d4677 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.h @@ -0,0 +1,39 @@ +#include +#include +#include + +#define min(a, b) ((a)<(b)?(a):(b)) +#define max(a, b) ((a)>(b)?(a):(b)) + +std::vector index_max_kernel( + at::Tensor index_vals, + at::Tensor indices, + int A_num_block, + int B_num_block +); + +at::Tensor mm_to_sparse_kernel( + at::Tensor dense_A, + at::Tensor dense_B, + at::Tensor indices +); + +at::Tensor sparse_dense_mm_kernel( + at::Tensor sparse_A, + at::Tensor indices, + at::Tensor dense_B, + int A_num_block +); + +at::Tensor reduce_sum_kernel( + at::Tensor sparse_A, + at::Tensor indices, + int A_num_block, + int B_num_block +); + +at::Tensor scatter_kernel( + at::Tensor dense_A, + at::Tensor indices, + int B_num_block +); diff --git a/openflamingo/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h b/openflamingo/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h new file mode 100644 index 0000000000000000000000000000000000000000..dd48de0ed159f49ee3afe93b12aaae719fe87688 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h @@ -0,0 +1,71 @@ +#include +#include +#include + +std::vector fast_hash_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_vector, + at::Tensor key_mask, + at::Tensor key_vector, + int num_hash_f, + int hash_code_len, + bool use_cuda +); + +at::Tensor lsh_cumulation_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver2_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver3_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver4_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +);