diff --git a/.gitattributes b/.gitattributes index 12efb4e1f4a01a6bcffecced61187b88f85ca119..d26b2f24bae67c05b83486ee764eb69cbb05854c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1646,3 +1646,8 @@ evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/sing evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libz.37eba27a.so.1 filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-linux64-v4.2.2 filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/LICENSE b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..104eebf5a3002fccdaceef3a4cb936173c1c2035 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2018 Alex Grönholm + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/RECORD b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..038793aa3eb813e159c11b49247c185cf1901f59 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/RECORD @@ -0,0 +1,83 @@ +anyio-3.7.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +anyio-3.7.1.dist-info/LICENSE,sha256=U2GsncWPLvX9LpsJxoKXwX8ElQkJu8gCO9uC6s8iwrA,1081 +anyio-3.7.1.dist-info/METADATA,sha256=mOhfXPB7qKVQh3dUtp2NgLysa10jHWeDBNnRg-93A_c,4708 +anyio-3.7.1.dist-info/RECORD,, +anyio-3.7.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio-3.7.1.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +anyio-3.7.1.dist-info/entry_points.txt,sha256=_d6Yu6uiaZmNe0CydowirE9Cmg7zUL2g08tQpoS3Qvc,39 +anyio-3.7.1.dist-info/top_level.txt,sha256=QglSMiWX8_5dpoVAEIHdEYzvqFMdSYWmCj6tYw2ITkQ,6 +anyio/__init__.py,sha256=Pq9lO03Zm5ynIPlhkquaOuIc1dTTeLGNUQ5HT5qwYMI,4073 +anyio/__pycache__/__init__.cpython-310.pyc,, +anyio/__pycache__/from_thread.cpython-310.pyc,, +anyio/__pycache__/lowlevel.cpython-310.pyc,, +anyio/__pycache__/pytest_plugin.cpython-310.pyc,, +anyio/__pycache__/to_process.cpython-310.pyc,, +anyio/__pycache__/to_thread.cpython-310.pyc,, +anyio/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/_backends/__pycache__/__init__.cpython-310.pyc,, +anyio/_backends/__pycache__/_asyncio.cpython-310.pyc,, +anyio/_backends/__pycache__/_trio.cpython-310.pyc,, +anyio/_backends/_asyncio.py,sha256=fgwZmYnGOxT_pX0OZTPPgRdFqKLjnKvQUk7tsfuNmfM,67056 +anyio/_backends/_trio.py,sha256=EJAj0tNi0JRM2y3QWP7oS4ct7wnjMSYDG8IZUWMta-E,30035 +anyio/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/_core/__pycache__/__init__.cpython-310.pyc,, +anyio/_core/__pycache__/_compat.cpython-310.pyc,, +anyio/_core/__pycache__/_eventloop.cpython-310.pyc,, +anyio/_core/__pycache__/_exceptions.cpython-310.pyc,, +anyio/_core/__pycache__/_fileio.cpython-310.pyc,, +anyio/_core/__pycache__/_resources.cpython-310.pyc,, +anyio/_core/__pycache__/_signals.cpython-310.pyc,, +anyio/_core/__pycache__/_sockets.cpython-310.pyc,, +anyio/_core/__pycache__/_streams.cpython-310.pyc,, +anyio/_core/__pycache__/_subprocesses.cpython-310.pyc,, +anyio/_core/__pycache__/_synchronization.cpython-310.pyc,, +anyio/_core/__pycache__/_tasks.cpython-310.pyc,, +anyio/_core/__pycache__/_testing.cpython-310.pyc,, +anyio/_core/__pycache__/_typedattr.cpython-310.pyc,, +anyio/_core/_compat.py,sha256=XZfBUInEt7jaiTBI2Qbul7EpJdngbwTtG4Qj26un1YE,5726 +anyio/_core/_eventloop.py,sha256=xJ8KflV1bJ9GAuQRr4o1ojv8wWya4nt_XARta8uLPwc,4083 +anyio/_core/_exceptions.py,sha256=uOrN5l98o6UrOU6O3kPf0VCDl_zPP-kgZs4IyaLVgwU,2916 +anyio/_core/_fileio.py,sha256=DWuIul5izCocmJpgqDDNKc_GhMUwayHKdM5R-sbT_A8,18026 +anyio/_core/_resources.py,sha256=NbmU5O5UX3xEyACnkmYX28Fmwdl-f-ny0tHym26e0w0,435 +anyio/_core/_signals.py,sha256=KKkZAYL08auydjZnK9S4FQsxx555jT4gXAMcTXdNaok,863 +anyio/_core/_sockets.py,sha256=szcPd7kKBmlHnx8g_KJWZo2k6syouRNF2614ZrtqiV0,20667 +anyio/_core/_streams.py,sha256=5gryxQiUisED8uFUAHje5O44RL9wyndNMANzzQWUn1U,1518 +anyio/_core/_subprocesses.py,sha256=OSAcLAsjfCplXlRyTjWonfS1xU8d5MaZblXYqqY-BM4,4977 +anyio/_core/_synchronization.py,sha256=Uquo_52vZ7iZzDDoaN_j-N7jeyAlefzOZ8Pxt9mU6gY,16747 +anyio/_core/_tasks.py,sha256=1wZZWlpDkr6w3kMD629vzJDkPselDvx4XVElgTCVwyM,5316 +anyio/_core/_testing.py,sha256=7Yll-DOI0uIlIF5VHLUpGGyDPWtDEjFZ85-6ZniwIJU,2217 +anyio/_core/_typedattr.py,sha256=8o0gwQYSl04zlO9uHqcHu1T6hOw7peY9NW1mOX5DKnY,2551 +anyio/abc/__init__.py,sha256=UkC-KDbyIoKeDUDhJciwANSoyzz_qaFh4Fb7_AvwjZc,2159 +anyio/abc/__pycache__/__init__.cpython-310.pyc,, +anyio/abc/__pycache__/_resources.cpython-310.pyc,, +anyio/abc/__pycache__/_sockets.cpython-310.pyc,, +anyio/abc/__pycache__/_streams.cpython-310.pyc,, +anyio/abc/__pycache__/_subprocesses.cpython-310.pyc,, +anyio/abc/__pycache__/_tasks.cpython-310.pyc,, +anyio/abc/__pycache__/_testing.cpython-310.pyc,, +anyio/abc/_resources.py,sha256=h1rkzr3E0MFqdXLh9aLLXe-A5W7k_Jc-5XzNr6SJ4w4,763 +anyio/abc/_sockets.py,sha256=WWYJ6HndKCEuvobAPDkmX0tjwN2FOxf3eTGb1DB7wHE,5243 +anyio/abc/_streams.py,sha256=yGhOmlVI3W9whmzPuewwYQ2BrKhrUFuWZ4zpVLWOK84,6584 +anyio/abc/_subprocesses.py,sha256=r-totaRbFX6kKV-4WTeuswz8n01aap8cvkYVQCRKN0M,2067 +anyio/abc/_tasks.py,sha256=a_5DLyiCbp0K57LJPOyF-PZyXmUcv_p9VRXPFj_K03M,3413 +anyio/abc/_testing.py,sha256=Eub7gXJ0tVPo_WN5iJAw10FrvC7C1uaL3b2neGr_pfs,1924 +anyio/from_thread.py,sha256=aUVKXctPgZ5wK3p5VTyrtjDj9tSQSrH6xCjBuo-hv3A,16563 +anyio/lowlevel.py,sha256=cOTncxRW5KeswqYQQdp0pfAw6OFWXius1SPhCYwHZL4,4647 +anyio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/pytest_plugin.py,sha256=_Txgl0-I3kO1rk_KATXmIUV57C34hajcJCGcgV26CU0,5022 +anyio/streams/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/streams/__pycache__/__init__.cpython-310.pyc,, +anyio/streams/__pycache__/buffered.cpython-310.pyc,, +anyio/streams/__pycache__/file.cpython-310.pyc,, +anyio/streams/__pycache__/memory.cpython-310.pyc,, +anyio/streams/__pycache__/stapled.cpython-310.pyc,, +anyio/streams/__pycache__/text.cpython-310.pyc,, +anyio/streams/__pycache__/tls.cpython-310.pyc,, +anyio/streams/buffered.py,sha256=2ifplNLwT73d1UKBxrkFdlC9wTAze9LhPL7pt_7cYgY,4473 +anyio/streams/file.py,sha256=-NP6jMcUd2f1VJwgcxgiRHdEsNnhE0lANl0ov_i7FrE,4356 +anyio/streams/memory.py,sha256=QZhc5qdomBpGCgrUVWAaqEBxI0oklVxK_62atW6tnNk,9274 +anyio/streams/stapled.py,sha256=9u2GxpiOPsGtgO1qsj2tVoW4b8bgiwp5rSDs1BFKkLM,4275 +anyio/streams/text.py,sha256=1K4ZCLKl2b7yywrW6wKEeMu3xyQHE_T0aU5_oC9GPTE,5043 +anyio/streams/tls.py,sha256=TbdCz1KtfEnp3mxHvkROXRefhE6S1LHiwgWiJX8zYaU,12099 +anyio/to_process.py,sha256=_RSsG8UME2nGxeFEdg3OEfv9XshSQwrMU7DAbwWGx9U,9242 +anyio/to_thread.py,sha256=HVpTvBei2sSXgJJeNKdwhJwQaW76LDbb1htQ-Mc6zDs,2146 diff --git a/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/REQUESTED b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/WHEEL b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1f37c02f2eb2e26b306202feaccb31e522b8b169 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/top_level.txt b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..c77c069ecc9b7f8b1f97dbcfec905725db0253a8 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/top_level.txt @@ -0,0 +1 @@ +anyio diff --git a/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/INSTALLER b/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/LICENSE b/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..96837bd9ed70a5a818227f2e1eb0db263dcd10ac --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2021-2024, ContourPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/WHEEL b/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..4e4c38ae320920b8f083b87f408214cdecd350d2 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/evalkit_internvl/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14ca10c83d8d9af43e92aba75c464ddc2296bc82 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so b/evalkit_internvl/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c73536aa4107b965c5be8d6b418204a32760105b --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6074265dd34a9e3432470e05c02d1670d530f8201a34419216e7b6eb84757dfb +size 771776 diff --git a/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/LICENSE b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..fc87aa36d5a15b67f694a2a8db5dfb9f7ec55e22 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2019, imageio +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/METADATA b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..ebb7d35931ef8c9fb39b52894a5c81f903a8aa7b --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/METADATA @@ -0,0 +1,41 @@ +Metadata-Version: 2.1 +Name: imageio-ffmpeg +Version: 0.5.1 +Summary: FFMPEG wrapper for Python +Home-page: https://github.com/imageio/imageio-ffmpeg +Download-URL: http://pypi.python.org/pypi/imageio-ffmpeg +Author: imageio contributors +Author-email: almar.klein@gmail.com +License: BSD-2-Clause +Keywords: video ffmpeg +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Provides: imageio_ffmpeg +Requires-Python: >=3.5 +License-File: LICENSE +Requires-Dist: setuptools + +FFMPEG wrapper for Python. + +Note that the platform-specific wheels contain the binary executable +of ffmpeg, which makes this package around 60 MiB in size. +I guess that's the cost for being able to read/write video files. + +For Linux users: the above is not the case when installing via your +Linux package manager (if that is possible), because this package would +simply depend on ffmpeg in that case. diff --git a/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_definitions.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_definitions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64ff6d29ebd89bd99078739c559d5da6625dc01d Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_definitions.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_parsing.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b70737fdab192f7d91297d196b4d093d98f04cb5 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_parsing.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14522753d085f8e041c9ddef3433a754eca6fa6a Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-linux64-v4.2.2 b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-linux64-v4.2.2 new file mode 100644 index 0000000000000000000000000000000000000000..a62e97257c2a4a064b218097699b973822c2a7ca --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-linux64-v4.2.2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:700073daef5c23bbcb18c2eae60553a454a5221ec19b4a88c8c367a664671a7c +size 73807592 diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/_utils.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0b7cc64ee51695fe18e2fc8a819696e0246b54f8 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/_utils.py @@ -0,0 +1,83 @@ +# Adapted from https://stackoverflow.com/a/9558001/2536294 + +import ast +from dataclasses import dataclass +import operator as op + + +from ._multiprocessing_helpers import mp + +if mp is not None: + from .externals.loky.process_executor import _ExceptionWithTraceback + + +# supported operators +operators = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.Div: op.truediv, + ast.FloorDiv: op.floordiv, + ast.Mod: op.mod, + ast.Pow: op.pow, + ast.USub: op.neg, +} + + +def eval_expr(expr): + """ + >>> eval_expr('2*6') + 12 + >>> eval_expr('2**6') + 64 + >>> eval_expr('1 + 2*3**(4) / (6 + -7)') + -161.0 + """ + try: + return eval_(ast.parse(expr, mode="eval").body) + except (TypeError, SyntaxError, KeyError) as e: + raise ValueError( + f"{expr!r} is not a valid or supported arithmetic expression." + ) from e + + +def eval_(node): + if isinstance(node, ast.Constant): # + return node.value + elif isinstance(node, ast.BinOp): # + return operators[type(node.op)](eval_(node.left), eval_(node.right)) + elif isinstance(node, ast.UnaryOp): # e.g., -1 + return operators[type(node.op)](eval_(node.operand)) + else: + raise TypeError(node) + + +@dataclass(frozen=True) +class _Sentinel: + """A sentinel to mark a parameter as not explicitly set""" + default_value: object + + def __repr__(self): + return f"default({self.default_value!r})" + + +class _TracebackCapturingWrapper: + """Protect function call and return error with traceback.""" + + def __init__(self, func): + self.func = func + + def __call__(self, **kwargs): + try: + return self.func(**kwargs) + except BaseException as e: + return _ExceptionWithTraceback(e) + + +def _retrieve_traceback_capturing_wrapped_call(out): + if isinstance(out, _ExceptionWithTraceback): + rebuild, args = out.__reduce__() + out = rebuild(*args) + if isinstance(out, BaseException): + raise out + return out diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/compressor.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..0d9e2618a48339e1af8cf2da573fd0af8c96f0b0 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/compressor.py @@ -0,0 +1,570 @@ +"""Classes and functions for managing compressors.""" + +import io +import zlib +from joblib.backports import LooseVersion + +try: + from threading import RLock +except ImportError: + from dummy_threading import RLock + +try: + import bz2 +except ImportError: + bz2 = None + +try: + import lz4 + from lz4.frame import LZ4FrameFile +except ImportError: + lz4 = None + +try: + import lzma +except ImportError: + lzma = None + + +LZ4_NOT_INSTALLED_ERROR = ('LZ4 is not installed. Install it with pip: ' + 'https://python-lz4.readthedocs.io/') + +# Registered compressors +_COMPRESSORS = {} + +# Magic numbers of supported compression file formats. +_ZFILE_PREFIX = b'ZF' # used with pickle files created before 0.9.3. +_ZLIB_PREFIX = b'\x78' +_GZIP_PREFIX = b'\x1f\x8b' +_BZ2_PREFIX = b'BZ' +_XZ_PREFIX = b'\xfd\x37\x7a\x58\x5a' +_LZMA_PREFIX = b'\x5d\x00' +_LZ4_PREFIX = b'\x04\x22\x4D\x18' + + +def register_compressor(compressor_name, compressor, + force=False): + """Register a new compressor. + + Parameters + ---------- + compressor_name: str. + The name of the compressor. + compressor: CompressorWrapper + An instance of a 'CompressorWrapper'. + """ + global _COMPRESSORS + if not isinstance(compressor_name, str): + raise ValueError("Compressor name should be a string, " + "'{}' given.".format(compressor_name)) + + if not isinstance(compressor, CompressorWrapper): + raise ValueError("Compressor should implement the CompressorWrapper " + "interface, '{}' given.".format(compressor)) + + if (compressor.fileobj_factory is not None and + (not hasattr(compressor.fileobj_factory, 'read') or + not hasattr(compressor.fileobj_factory, 'write') or + not hasattr(compressor.fileobj_factory, 'seek') or + not hasattr(compressor.fileobj_factory, 'tell'))): + raise ValueError("Compressor 'fileobj_factory' attribute should " + "implement the file object interface, '{}' given." + .format(compressor.fileobj_factory)) + + if compressor_name in _COMPRESSORS and not force: + raise ValueError("Compressor '{}' already registered." + .format(compressor_name)) + + _COMPRESSORS[compressor_name] = compressor + + +class CompressorWrapper(): + """A wrapper around a compressor file object. + + Attributes + ---------- + obj: a file-like object + The object must implement the buffer interface and will be used + internally to compress/decompress the data. + prefix: bytestring + A bytestring corresponding to the magic number that identifies the + file format associated to the compressor. + extension: str + The file extension used to automatically select this compressor during + a dump to a file. + """ + + def __init__(self, obj, prefix=b'', extension=''): + self.fileobj_factory = obj + self.prefix = prefix + self.extension = extension + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compresslevel=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + return self.fileobj_factory(fileobj, 'rb') + + +class BZ2CompressorWrapper(CompressorWrapper): + + prefix = _BZ2_PREFIX + extension = '.bz2' + + def __init__(self): + if bz2 is not None: + self.fileobj_factory = bz2.BZ2File + else: + self.fileobj_factory = None + + def _check_versions(self): + if bz2 is None: + raise ValueError('bz2 module is not compiled on your python ' + 'standard library.') + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + self._check_versions() + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compresslevel=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + self._check_versions() + fileobj = self.fileobj_factory(fileobj, 'rb') + return fileobj + + +class LZMACompressorWrapper(CompressorWrapper): + + prefix = _LZMA_PREFIX + extension = '.lzma' + _lzma_format_name = 'FORMAT_ALONE' + + def __init__(self): + if lzma is not None: + self.fileobj_factory = lzma.LZMAFile + self._lzma_format = getattr(lzma, self._lzma_format_name) + else: + self.fileobj_factory = None + + def _check_versions(self): + if lzma is None: + raise ValueError('lzma module is not compiled on your python ' + 'standard library.') + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb', + format=self._lzma_format) + else: + return self.fileobj_factory(fileobj, 'wb', + format=self._lzma_format, + preset=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + return lzma.LZMAFile(fileobj, 'rb') + + +class XZCompressorWrapper(LZMACompressorWrapper): + + prefix = _XZ_PREFIX + extension = '.xz' + _lzma_format_name = 'FORMAT_XZ' + + +class LZ4CompressorWrapper(CompressorWrapper): + + prefix = _LZ4_PREFIX + extension = '.lz4' + + def __init__(self): + if lz4 is not None: + self.fileobj_factory = LZ4FrameFile + else: + self.fileobj_factory = None + + def _check_versions(self): + if lz4 is None: + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + lz4_version = lz4.__version__ + if lz4_version.startswith("v"): + lz4_version = lz4_version[1:] + if LooseVersion(lz4_version) < LooseVersion('0.19'): + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + self._check_versions() + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compression_level=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + self._check_versions() + return self.fileobj_factory(fileobj, 'rb') + + +############################################################################### +# base file compression/decompression object definition +_MODE_CLOSED = 0 +_MODE_READ = 1 +_MODE_READ_EOF = 2 +_MODE_WRITE = 3 +_BUFFER_SIZE = 8192 + + +class BinaryZlibFile(io.BufferedIOBase): + """A file object providing transparent zlib (de)compression. + + TODO python2_drop: is it still needed since we dropped Python 2 support A + BinaryZlibFile can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BinaryZlibFile provides only a *binary* file interface: data read + is returned as bytes, and data to be written should be given as bytes. + + This object is an adaptation of the BZ2File object and is compatible with + versions of python >= 2.7. + + If filename is a str or bytes object, it gives the name + of the file to be opened. Otherwise, it should be a file object, + which will be used to read or write the compressed data. + + mode can be 'rb' for reading (default) or 'wb' for (over)writing + + If mode is 'wb', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 produces the most compression. 3 is the default. + """ + + wbits = zlib.MAX_WBITS + + def __init__(self, filename, mode="rb", compresslevel=3): + # This lock must be recursive, so that BufferedIOBase's + # readline(), readlines() and writelines() don't deadlock. + self._lock = RLock() + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._pos = 0 + self._size = -1 + self.compresslevel = compresslevel + + if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9): + raise ValueError("'compresslevel' must be an integer " + "between 1 and 9. You provided 'compresslevel={}'" + .format(compresslevel)) + + if mode == "rb": + self._mode = _MODE_READ + self._decompressor = zlib.decompressobj(self.wbits) + self._buffer = b"" + self._buffer_offset = 0 + elif mode == "wb": + self._mode = _MODE_WRITE + self._compressor = zlib.compressobj(self.compresslevel, + zlib.DEFLATED, self.wbits, + zlib.DEF_MEM_LEVEL, 0) + else: + raise ValueError("Invalid mode: %r" % (mode,)) + + if isinstance(filename, str): + self._fp = io.open(filename, mode) + self._closefp = True + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + else: + raise TypeError("filename must be a str or bytes object, " + "or a file") + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + with self._lock: + if self._mode == _MODE_CLOSED: + return + try: + if self._mode in (_MODE_READ, _MODE_READ_EOF): + self._decompressor = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._buffer = b"" + self._buffer_offset = 0 + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._fp.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode in (_MODE_READ, _MODE_READ_EOF) + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + # Mode-checking helper functions. + + def _check_not_closed(self): + if self.closed: + fname = getattr(self._fp, 'name', None) + msg = "I/O operation on closed file" + if fname is not None: + msg += " {}".format(fname) + msg += "." + raise ValueError(msg) + + def _check_can_read(self): + if self._mode not in (_MODE_READ, _MODE_READ_EOF): + self._check_not_closed() + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if self._mode != _MODE_WRITE: + self._check_not_closed() + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if self._mode not in (_MODE_READ, _MODE_READ_EOF): + self._check_not_closed() + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + if not self._fp.seekable(): + raise io.UnsupportedOperation("The underlying file object " + "does not support seeking") + + # Fill the readahead buffer if it is empty. Returns False on EOF. + def _fill_buffer(self): + if self._mode == _MODE_READ_EOF: + return False + # Depending on the input data, our call to the decompressor may not + # return any data. In this case, try again after reading another block. + while self._buffer_offset == len(self._buffer): + try: + rawblock = (self._decompressor.unused_data or + self._fp.read(_BUFFER_SIZE)) + if not rawblock: + raise EOFError + except EOFError: + # End-of-stream marker and end of file. We're good. + self._mode = _MODE_READ_EOF + self._size = self._pos + return False + else: + self._buffer = self._decompressor.decompress(rawblock) + self._buffer_offset = 0 + return True + + # Read data until EOF. + # If return_data is false, consume the data without returning it. + def _read_all(self, return_data=True): + # The loop assumes that _buffer_offset is 0. Ensure that this is true. + self._buffer = self._buffer[self._buffer_offset:] + self._buffer_offset = 0 + + blocks = [] + while self._fill_buffer(): + if return_data: + blocks.append(self._buffer) + self._pos += len(self._buffer) + self._buffer = b"" + if return_data: + return b"".join(blocks) + + # Read a block of up to n bytes. + # If return_data is false, consume the data without returning it. + def _read_block(self, n_bytes, return_data=True): + # If we have enough data buffered, return immediately. + end = self._buffer_offset + n_bytes + if end <= len(self._buffer): + data = self._buffer[self._buffer_offset: end] + self._buffer_offset = end + self._pos += len(data) + return data if return_data else None + + # The loop assumes that _buffer_offset is 0. Ensure that this is true. + self._buffer = self._buffer[self._buffer_offset:] + self._buffer_offset = 0 + + blocks = [] + while n_bytes > 0 and self._fill_buffer(): + if n_bytes < len(self._buffer): + data = self._buffer[:n_bytes] + self._buffer_offset = n_bytes + else: + data = self._buffer + self._buffer = b"" + if return_data: + blocks.append(data) + self._pos += len(data) + n_bytes -= len(data) + if return_data: + return b"".join(blocks) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + """ + with self._lock: + self._check_can_read() + if size == 0: + return b"" + elif size < 0: + return self._read_all() + else: + return self._read_block(size) + + def readinto(self, b): + """Read up to len(b) bytes into b. + + Returns the number of bytes read (0 for EOF). + """ + with self._lock: + return io.BufferedIOBase.readinto(self, b) + + def write(self, data): + """Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + """ + with self._lock: + self._check_can_write() + # Convert data type if called by io.BufferedWriter. + if isinstance(data, memoryview): + data = data.tobytes() + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += len(data) + return len(data) + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0, 0) + self._mode = _MODE_READ + self._pos = 0 + self._decompressor = zlib.decompressobj(self.wbits) + self._buffer = b"" + self._buffer_offset = 0 + + def seek(self, offset, whence=0): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + with self._lock: + self._check_can_seek() + + # Recalculate offset as an absolute file position. + if whence == 0: + pass + elif whence == 1: + offset = self._pos + offset + elif whence == 2: + # Seeking relative to EOF - we need to know the file's size. + if self._size < 0: + self._read_all(return_data=False) + offset = self._size + offset + else: + raise ValueError("Invalid value for whence: %s" % (whence,)) + + # Make it so that offset is the number of bytes to skip forward. + if offset < self._pos: + self._rewind() + else: + offset -= self._pos + + # Read and discard data until we reach the desired position. + self._read_block(offset, return_data=False) + + return self._pos + + def tell(self): + """Return the current file position.""" + with self._lock: + self._check_not_closed() + return self._pos + + +class ZlibCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryZlibFile, + prefix=_ZLIB_PREFIX, extension='.z') + + +class BinaryGzipFile(BinaryZlibFile): + """A file object providing transparent gzip (de)compression. + + If filename is a str or bytes object, it gives the name + of the file to be opened. Otherwise, it should be a file object, + which will be used to read or write the compressed data. + + mode can be 'rb' for reading (default) or 'wb' for (over)writing + + If mode is 'wb', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 produces the most compression. 3 is the default. + """ + + wbits = 31 # zlib compressor/decompressor wbits value for gzip format. + + +class GzipCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryGzipFile, + prefix=_GZIP_PREFIX, extension='.gz') diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/disk.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/disk.py new file mode 100644 index 0000000000000000000000000000000000000000..32fbb89f6dc6c9c7df532c5fefa14934f16321f6 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/disk.py @@ -0,0 +1,136 @@ +""" +Disk management utilities. +""" + +# Authors: Gael Varoquaux +# Lars Buitinck +# Copyright (c) 2010 Gael Varoquaux +# License: BSD Style, 3 clauses. + + +import os +import sys +import time +import errno +import shutil + +from multiprocessing import util + + +try: + WindowsError +except NameError: + WindowsError = OSError + + +def disk_used(path): + """ Return the disk usage in a directory.""" + size = 0 + for file in os.listdir(path) + ['.']: + stat = os.stat(os.path.join(path, file)) + if hasattr(stat, 'st_blocks'): + size += stat.st_blocks * 512 + else: + # on some platform st_blocks is not available (e.g., Windows) + # approximate by rounding to next multiple of 512 + size += (stat.st_size // 512 + 1) * 512 + # We need to convert to int to avoid having longs on some systems (we + # don't want longs to avoid problems we SQLite) + return int(size / 1024.) + + +def memstr_to_bytes(text): + """ Convert a memory text to its value in bytes. + """ + kilo = 1024 + units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3) + try: + size = int(units[text[-1]] * float(text[:-1])) + except (KeyError, ValueError) as e: + raise ValueError( + "Invalid literal for size give: %s (type %s) should be " + "alike '10G', '500M', '50K'." % (text, type(text))) from e + return size + + +def mkdirp(d): + """Ensure directory d exists (like mkdir -p on Unix) + No guarantee that the directory is writable. + """ + try: + os.makedirs(d) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs), +# then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the +# exception. this mechanism ensures that the sub-process gc have the time to +# collect and close the memmaps before we fail. +RM_SUBDIRS_RETRY_TIME = 0.1 +RM_SUBDIRS_N_RETRY = 10 + + +def rm_subdirs(path, onerror=None): + """Remove all subdirectories in this path. + + The directory indicated by `path` is left in place, and its subdirectories + are erased. + + If onerror is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If onerror is None, + an exception is raised. + """ + + # NOTE this code is adapted from the one in shutil.rmtree, and is + # just as fast + + names = [] + try: + names = os.listdir(path) + except os.error: + if onerror is not None: + onerror(os.listdir, path, sys.exc_info()) + else: + raise + + for name in names: + fullname = os.path.join(path, name) + delete_folder(fullname, onerror=onerror) + + +def delete_folder(folder_path, onerror=None, allow_non_empty=True): + """Utility function to cleanup a temporary folder if it still exists.""" + if os.path.isdir(folder_path): + if onerror is not None: + shutil.rmtree(folder_path, False, onerror) + else: + # allow the rmtree to fail once, wait and re-try. + # if the error is raised again, fail + err_count = 0 + while True: + files = os.listdir(folder_path) + try: + if len(files) == 0 or allow_non_empty: + shutil.rmtree( + folder_path, ignore_errors=False, onerror=None + ) + util.debug( + "Successfully deleted {}".format(folder_path)) + break + else: + raise OSError( + "Expected empty folder {} but got {} " + "files.".format(folder_path, len(files)) + ) + except (OSError, WindowsError): + err_count += 1 + if err_count > RM_SUBDIRS_N_RETRY: + # the folder cannot be deleted right now. It maybe + # because some temporary files have not been deleted + # yet. + raise + time.sleep(RM_SUBDIRS_RETRY_TIME) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/pool.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c3549c17300632c30b3992dc9bfd6c72b18c5d --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/pool.py @@ -0,0 +1,354 @@ +"""Custom implementation of multiprocessing.Pool with custom pickler. + +This module provides efficient ways of working with data stored in +shared memory with numpy.memmap arrays without inducing any memory +copy between the parent and child processes. + +This module should not be imported if multiprocessing is not +available as it implements subclasses of multiprocessing Pool +that uses a custom alternative to SimpleQueue. + +""" +# Author: Olivier Grisel +# Copyright: 2012, Olivier Grisel +# License: BSD 3 clause + +import copyreg +import sys +import warnings +from time import sleep + +try: + WindowsError +except NameError: + WindowsError = type(None) + +from pickle import Pickler + +from pickle import HIGHEST_PROTOCOL +from io import BytesIO + +from ._memmapping_reducer import get_memmapping_reducers +from ._memmapping_reducer import TemporaryResourcesManager +from ._multiprocessing_helpers import mp, assert_spawning + +# We need the class definition to derive from it, not the multiprocessing.Pool +# factory function +from multiprocessing.pool import Pool + +try: + import numpy as np +except ImportError: + np = None + + +############################################################################### +# Enable custom pickling in Pool queues + +class CustomizablePickler(Pickler): + """Pickler that accepts custom reducers. + + TODO python2_drop : can this be simplified ? + + HIGHEST_PROTOCOL is selected by default as this pickler is used + to pickle ephemeral datastructures for interprocess communication + hence no backward compatibility is required. + + `reducers` is expected to be a dictionary with key/values + being `(type, callable)` pairs where `callable` is a function that + give an instance of `type` will return a tuple `(constructor, + tuple_of_objects)` to rebuild an instance out of the pickled + `tuple_of_objects` as would return a `__reduce__` method. See the + standard library documentation on pickling for more details. + + """ + + # We override the pure Python pickler as its the only way to be able to + # customize the dispatch table without side effects in Python 2.7 + # to 3.2. For Python 3.3+ leverage the new dispatch_table + # feature from https://bugs.python.org/issue14166 that makes it possible + # to use the C implementation of the Pickler which is faster. + + def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): + Pickler.__init__(self, writer, protocol=protocol) + if reducers is None: + reducers = {} + if hasattr(Pickler, 'dispatch'): + # Make the dispatch registry an instance level attribute instead of + # a reference to the class dictionary under Python 2 + self.dispatch = Pickler.dispatch.copy() + else: + # Under Python 3 initialize the dispatch table with a copy of the + # default registry + self.dispatch_table = copyreg.dispatch_table.copy() + for type, reduce_func in reducers.items(): + self.register(type, reduce_func) + + def register(self, type, reduce_func): + """Attach a reducer function to a given type in the dispatch table.""" + if hasattr(Pickler, 'dispatch'): + # Python 2 pickler dispatching is not explicitly customizable. + # Let us use a closure to workaround this limitation. + def dispatcher(self, obj): + reduced = reduce_func(obj) + self.save_reduce(obj=obj, *reduced) + self.dispatch[type] = dispatcher + else: + self.dispatch_table[type] = reduce_func + + +class CustomizablePicklingQueue(object): + """Locked Pipe implementation that uses a customizable pickler. + + This class is an alternative to the multiprocessing implementation + of SimpleQueue in order to make it possible to pass custom + pickling reducers, for instance to avoid memory copy when passing + memory mapped datastructures. + + `reducers` is expected to be a dict with key / values being + `(type, callable)` pairs where `callable` is a function that, given an + instance of `type`, will return a tuple `(constructor, tuple_of_objects)` + to rebuild an instance out of the pickled `tuple_of_objects` as would + return a `__reduce__` method. + + See the standard library documentation on pickling for more details. + """ + + def __init__(self, context, reducers=None): + self._reducers = reducers + self._reader, self._writer = context.Pipe(duplex=False) + self._rlock = context.Lock() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = context.Lock() + self._make_methods() + + def __getstate__(self): + assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock, + self._reducers) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock, + self._reducers) = state + self._make_methods() + + def empty(self): + return not self._reader.poll() + + def _make_methods(self): + self._recv = recv = self._reader.recv + racquire, rrelease = self._rlock.acquire, self._rlock.release + + def get(): + racquire() + try: + return recv() + finally: + rrelease() + + self.get = get + + if self._reducers: + def send(obj): + buffer = BytesIO() + CustomizablePickler(buffer, self._reducers).dump(obj) + self._writer.send_bytes(buffer.getvalue()) + self._send = send + else: + self._send = send = self._writer.send + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self.put = send + else: + wlock_acquire, wlock_release = ( + self._wlock.acquire, self._wlock.release) + + def put(obj): + wlock_acquire() + try: + return send(obj) + finally: + wlock_release() + + self.put = put + + +class PicklingPool(Pool): + """Pool implementation with customizable pickling reducers. + + This is useful to control how data is shipped between processes + and makes it possible to use shared memory without useless + copies induces by the default pickling methods of the original + objects passed as arguments to dispatch. + + `forward_reducers` and `backward_reducers` are expected to be + dictionaries with key/values being `(type, callable)` pairs where + `callable` is a function that, given an instance of `type`, will return a + tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the + pickled `tuple_of_objects` as would return a `__reduce__` method. + See the standard library documentation about pickling for more details. + + """ + + def __init__(self, processes=None, forward_reducers=None, + backward_reducers=None, **kwargs): + if forward_reducers is None: + forward_reducers = dict() + if backward_reducers is None: + backward_reducers = dict() + self._forward_reducers = forward_reducers + self._backward_reducers = backward_reducers + poolargs = dict(processes=processes) + poolargs.update(kwargs) + super(PicklingPool, self).__init__(**poolargs) + + def _setup_queues(self): + context = getattr(self, '_ctx', mp) + self._inqueue = CustomizablePicklingQueue(context, + self._forward_reducers) + self._outqueue = CustomizablePicklingQueue(context, + self._backward_reducers) + self._quick_put = self._inqueue._send + self._quick_get = self._outqueue._recv + + +class MemmappingPool(PicklingPool): + """Process pool that shares large arrays to avoid memory copy. + + This drop-in replacement for `multiprocessing.pool.Pool` makes + it possible to work efficiently with shared memory in a numpy + context. + + Existing instances of numpy.memmap are preserved: the child + suprocesses will have access to the same shared memory in the + original mode except for the 'w+' mode that is automatically + transformed as 'r+' to avoid zeroing the original data upon + instantiation. + + Furthermore large arrays from the parent process are automatically + dumped to a temporary folder on the filesystem such as child + processes to access their content via memmapping (file system + backed shared memory). + + Note: it is important to call the terminate method to collect + the temporary folder used by the pool. + + Parameters + ---------- + processes: int, optional + Number of worker processes running concurrently in the pool. + initializer: callable, optional + Callable executed on worker process creation. + initargs: tuple, optional + Arguments passed to the initializer callable. + temp_folder: (str, callable) optional + If str: + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, + - /dev/shm if the folder exists and is writable: this is a RAMdisk + filesystem available by default on modern Linux distributions, + - the default system temporary folder that can be overridden + with TMP, TMPDIR or TEMP environment variables, typically /tmp + under Unix operating systems. + if callable: + An callable in charge of dynamically resolving a temporary folder + for memmapping large arrays. + max_nbytes int or None, optional, 1e6 by default + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. + Use None to disable memmapping of large arrays. + mmap_mode: {'r+', 'r', 'w+', 'c'} + Memmapping mode for numpy arrays passed to workers. + See 'max_nbytes' parameter documentation for more details. + forward_reducers: dictionary, optional + Reducers used to pickle objects passed from main process to worker + processes: see below. + backward_reducers: dictionary, optional + Reducers used to pickle return values from workers back to the + main process. + verbose: int, optional + Make it possible to monitor how the communication of numpy arrays + with the subprocess is handled (pickling or memmapping) + prewarm: bool or str, optional, "auto" by default. + If True, force a read on newly memmapped array to make sure that OS + pre-cache it in memory. This can be useful to avoid concurrent disk + access when the same data array is passed to different worker + processes. If "auto" (by default), prewarm is set to True, unless the + Linux shared memory partition /dev/shm is available and used as temp + folder. + + `forward_reducers` and `backward_reducers` are expected to be + dictionaries with key/values being `(type, callable)` pairs where + `callable` is a function that give an instance of `type` will return + a tuple `(constructor, tuple_of_objects)` to rebuild an instance out + of the pickled `tuple_of_objects` as would return a `__reduce__` + method. See the standard library documentation on pickling for more + details. + + """ + + def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6, + mmap_mode='r', forward_reducers=None, backward_reducers=None, + verbose=0, context_id=None, prewarm=False, **kwargs): + + if context_id is not None: + warnings.warn('context_id is deprecated and ignored in joblib' + ' 0.9.4 and will be removed in 0.11', + DeprecationWarning) + + manager = TemporaryResourcesManager(temp_folder) + self._temp_folder_manager = manager + + # The usage of a temp_folder_resolver over a simple temp_folder is + # superfluous for multiprocessing pools, as they don't get reused, see + # get_memmapping_executor for more details. We still use it for code + # simplicity. + forward_reducers, backward_reducers = \ + get_memmapping_reducers( + temp_folder_resolver=manager.resolve_temp_folder_name, + max_nbytes=max_nbytes, mmap_mode=mmap_mode, + forward_reducers=forward_reducers, + backward_reducers=backward_reducers, verbose=verbose, + unlink_on_gc_collect=False, prewarm=prewarm) + + poolargs = dict( + processes=processes, + forward_reducers=forward_reducers, + backward_reducers=backward_reducers) + poolargs.update(kwargs) + super(MemmappingPool, self).__init__(**poolargs) + + def terminate(self): + n_retries = 10 + for i in range(n_retries): + try: + super(MemmappingPool, self).terminate() + break + except OSError as e: + if isinstance(e, WindowsError): + # Workaround occasional "[Error 5] Access is denied" issue + # when trying to terminate a process under windows. + sleep(0.1) + if i + 1 == n_retries: + warnings.warn("Failed to terminate worker processes in" + " multiprocessing pool: %r" % e) + + # Clean up the temporary resources as the workers should now be off. + self._temp_folder_manager._clean_temporary_resources() + + @property + def _temp_folder(self): + # Legacy property in tests. could be removed if we refactored the + # memmapping tests. SHOULD ONLY BE USED IN TESTS! + # We cache this property because it is called late in the tests - at + # this point, all context have been unregistered, and + # resolve_temp_folder_name raises an error. + if getattr(self, '_cached_temp_folder', None) is not None: + return self._cached_temp_folder + else: + self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa + return self._cached_temp_folder diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36d4ff1859cff796d44ad5cc3fadd32c9af5082d Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de12b3d9ea31177ec03b20b95703ce7a7d7a6ecd Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e611b387b889ad2e39828d87c19d2d29b55773e Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e9b03ec326dc6a09a15357745a2ac5cdfb6c14b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ebf2456a88da097452392630fa1b3c872457f9d Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b13dbfe9dd48ea6505d112c14776e68b16ddf18 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c92c57ff8134924950edfe7c62a5e0c8edafae6 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e25715d795cd8a7f0f659f7fc061cfb263b793ea Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29246b3e7fdfcc293d87c77a67b34adb3fe3f8fd Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a381ca4ca388009127396ad66ce4f0eaacad5b9b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00ebb7c3b0ea26d1635df66d9cb397f8ac32e469 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..389f398bfe34521e765dfd91955da8af91977e39 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b5cfcf53a2096976ead7f7c1b32185a73a3cae9 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6db251af5254c460e9ce04a2d5d787bc4d225ae Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9388c3657e9c9dbbe1c1a318c964a9863549e15 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3e2cd6bc8c415e10baa85c1e8f4bd2c814df590 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7225396d20bab37bf71bee05c0ba0f4c8cd5312 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..751f5560278a888472052cd2f061e7229237e39e Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86f12aef46dff663a79b25c2ea7c2b2bcd418b0b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5081e982962f9f7f56ae3acb5e4c9e79d5bec90a Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3e11105f0bca34d690706ebbb2bf99a45359e72 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef2340f2715777e581d55388558cd02e32070846 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40d2c90b320dc7d3fef6e2548a6aa44e669c6b86 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/common.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b0ca0c6abd913bc37091ebae4bd6a0b64084d20f --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/common.py @@ -0,0 +1,84 @@ +""" +Small utilities for testing. +""" +import os +import gc +import sys + +from joblib._multiprocessing_helpers import mp +from joblib.testing import SkipTest, skipif + +try: + import lz4 +except ImportError: + lz4 = None + +IS_PYPY = hasattr(sys, "pypy_version_info") + +# A decorator to run tests only when numpy is available +try: + import numpy as np + + def with_numpy(func): + """A decorator to skip tests requiring numpy.""" + return func + +except ImportError: + def with_numpy(func): + """A decorator to skip tests requiring numpy.""" + def my_func(): + raise SkipTest('Test requires numpy') + return my_func + np = None + +# TODO: Turn this back on after refactoring yield based tests in test_hashing +# with_numpy = skipif(not np, reason='Test requires numpy.') + +# we use memory_profiler library for memory consumption checks +try: + from memory_profiler import memory_usage + + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + return func + + def memory_used(func, *args, **kwargs): + """Compute memory usage when executing func.""" + gc.collect() + mem_use = memory_usage((func, args, kwargs), interval=.001) + return max(mem_use) - min(mem_use) + +except ImportError: + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + def dummy_func(): + raise SkipTest('Test requires memory_profiler.') + return dummy_func + + memory_usage = memory_used = None + + +def force_gc_pypy(): + # The gc in pypy can be delayed. Force it to test the behavior when it + # will eventually be collected. + if IS_PYPY: + # Run gc.collect() twice to make sure the weakref is collected, as + # mentionned in the pypy doc: + # https://doc.pypy.org/en/latest/config/objspace.usemodules._weakref.html + import gc + gc.collect() + gc.collect() + + +with_multiprocessing = skipif( + mp is None, reason='Needs multiprocessing to run.') + + +with_dev_shm = skipif( + not os.path.exists('/dev/shm'), + reason='This test requires a large /dev/shm shared memory fs.') + +with_lz4 = skipif(lz4 is None, reason='Needs lz4 compression to run') + +without_lz4 = skipif( + lz4 is not None, reason='Needs lz4 not being installed to run') diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_backports.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_backports.py new file mode 100644 index 0000000000000000000000000000000000000000..4f5a3bd42353bf5a3d77243b9c607a3ec2b8cb1d --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_backports.py @@ -0,0 +1,35 @@ +import mmap + +from joblib.backports import make_memmap, concurrency_safe_rename +from joblib.test.common import with_numpy +from joblib.testing import parametrize +from joblib import Parallel, delayed + + +@with_numpy +def test_memmap(tmpdir): + fname = tmpdir.join('test.mmap').strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + memmap_obj = make_memmap(fname, shape=size, mode='w+', offset=offset) + assert memmap_obj.offset == offset + + +@parametrize('dst_content', [None, 'dst content']) +@parametrize('backend', [None, 'threading']) +def test_concurrency_safe_rename(tmpdir, dst_content, backend): + src_paths = [tmpdir.join('src_%d' % i) for i in range(4)] + for src_path in src_paths: + src_path.write('src content') + dst_path = tmpdir.join('dst') + if dst_content is not None: + dst_path.write(dst_content) + + Parallel(n_jobs=4, backend=backend)( + delayed(concurrency_safe_rename)(src_path.strpath, dst_path.strpath) + for src_path in src_paths + ) + assert dst_path.exists() + assert dst_path.read() == 'src content' + for src_path in src_paths: + assert not src_path.exists() diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..57bf92f940b2a68f10863ea5c42be444c8e0d2b0 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py @@ -0,0 +1,27 @@ +""" +Test that our implementation of wrap_non_picklable_objects mimics +properly the loky implementation. +""" + +from .._cloudpickle_wrapper import wrap_non_picklable_objects +from .._cloudpickle_wrapper import _my_wrap_non_picklable_objects + + +def a_function(x): + return x + + +class AClass(object): + + def __call__(self, x): + return x + + +def test_wrap_non_picklable_objects(): + # Mostly a smoke test: test that we can use callable in the same way + # with both our implementation of wrap_non_picklable_objects and the + # upstream one + for obj in (a_function, AClass()): + wrapped_obj = wrap_non_picklable_objects(obj) + my_wrapped_obj = _my_wrap_non_picklable_objects(obj) + assert wrapped_obj(1) == my_wrapped_obj(1) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_config.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..56eeed31b0db103360771c52cae641d00edb1c3a --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_config.py @@ -0,0 +1,151 @@ +import os + +from joblib.parallel import parallel_config +from joblib.parallel import parallel_backend +from joblib.parallel import Parallel, delayed + +from joblib.parallel import BACKENDS +from joblib.parallel import DEFAULT_BACKEND +from joblib.parallel import EXTERNAL_BACKENDS + +from joblib._parallel_backends import LokyBackend +from joblib._parallel_backends import ThreadingBackend +from joblib._parallel_backends import MultiprocessingBackend + +from joblib.testing import parametrize, raises +from joblib.test.common import np, with_numpy +from joblib.test.common import with_multiprocessing +from joblib.test.test_parallel import check_memmap + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_global_parallel_backend(context): + default = Parallel()._backend + + pb = context('threading') + try: + assert isinstance(Parallel()._backend, ThreadingBackend) + finally: + pb.unregister() + assert type(Parallel()._backend) is type(default) + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_external_backends(context): + def register_foo(): + BACKENDS['foo'] = ThreadingBackend + + EXTERNAL_BACKENDS['foo'] = register_foo + try: + with context('foo'): + assert isinstance(Parallel()._backend, ThreadingBackend) + finally: + del EXTERNAL_BACKENDS['foo'] + + +@with_numpy +@with_multiprocessing +def test_parallel_config_no_backend(tmpdir): + # Check that parallel_config allows to change the config + # even if no backend is set. + with parallel_config(n_jobs=2, max_nbytes=1, temp_folder=tmpdir): + with Parallel(prefer="processes") as p: + assert isinstance(p._backend, LokyBackend) + assert p.n_jobs == 2 + + # Checks that memmapping is enabled + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + assert len(os.listdir(tmpdir)) > 0 + + +@with_numpy +@with_multiprocessing +def test_parallel_config_params_explicit_set(tmpdir): + with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir): + with Parallel(n_jobs=2, prefer="processes", max_nbytes='1M') as p: + assert isinstance(p._backend, LokyBackend) + assert p.n_jobs == 2 + + # Checks that memmapping is disabled + with raises(TypeError, match="Expected np.memmap instance"): + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + +@parametrize("param", ["prefer", "require"]) +def test_parallel_config_bad_params(param): + # Check that an error is raised when setting a wrong backend + # hint or constraint + with raises(ValueError, match=f"{param}=wrong is not a valid"): + with parallel_config(**{param: "wrong"}): + Parallel() + + +def test_parallel_config_constructor_params(): + # Check that an error is raised when backend is None + # but backend constructor params are given + with raises(ValueError, match="only supported when backend is not None"): + with parallel_config(inner_max_num_threads=1): + pass + + with raises(ValueError, match="only supported when backend is not None"): + with parallel_config(backend_param=1): + pass + + +def test_parallel_config_nested(): + # Check that nested configuration retrieves the info from the + # parent config and do not reset them. + + with parallel_config(n_jobs=2): + p = Parallel() + assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND]) + assert p.n_jobs == 2 + + with parallel_config(backend='threading'): + with parallel_config(n_jobs=2): + p = Parallel() + assert isinstance(p._backend, ThreadingBackend) + assert p.n_jobs == 2 + + with parallel_config(verbose=100): + with parallel_config(n_jobs=2): + p = Parallel() + assert p.verbose == 100 + assert p.n_jobs == 2 + + +@with_numpy +@with_multiprocessing +@parametrize('backend', ['multiprocessing', 'threading', + MultiprocessingBackend(), ThreadingBackend()]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_context_error(context, backend): + + with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"): + context(backend, inner_max_num_threads=1) + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_n_jobs_none(context): + # Check that n_jobs=None is interpreted as "unset" in Parallel + # non regression test for #1473 + with context(backend="threading", n_jobs=2): + with Parallel(n_jobs=None) as p: + assert p.n_jobs == 2 + + with context(backend="threading"): + default_n_jobs = Parallel().n_jobs + with Parallel(n_jobs=None) as p: + assert p.n_jobs == default_n_jobs + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_config_n_jobs_none(context): + # Check that n_jobs=None is interpreted as "explicitly set" in + # parallel_(config/backend) + # non regression test for #1473 + with context(backend="threading", n_jobs=2): + with context(backend="threading", n_jobs=None): + # n_jobs=None resets n_jobs to backend's default + with Parallel() as p: + assert p.n_jobs == 1 diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_dask.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_dask.py new file mode 100644 index 0000000000000000000000000000000000000000..aebe65525fc55f4593e6c83b5bfd4ffc76d945a7 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_dask.py @@ -0,0 +1,499 @@ +from __future__ import print_function, division, absolute_import +import os +import warnings + +import pytest +from random import random +from uuid import uuid4 +from time import sleep + +from .. import Parallel, delayed, parallel_config +from ..parallel import ThreadingBackend, AutoBatchingMixin +from .._dask import DaskDistributedBackend + +distributed = pytest.importorskip('distributed') +dask = pytest.importorskip('dask') + +# These imports need to be after the pytest.importorskip hence the noqa: E402 +from distributed import Client, LocalCluster, get_client # noqa: E402 +from distributed.metrics import time # noqa: E402 +# Note: pytest requires to manually import all fixtures used in the test +# and their dependencies. +from distributed.utils_test import cluster, inc, cleanup # noqa: E402, F401 + + +def noop(*args, **kwargs): + pass + + +def slow_raise_value_error(condition, duration=0.05): + sleep(duration) + if condition: + raise ValueError("condition evaluated to True") + + +def count_events(event_name, client): + worker_events = client.run(lambda dask_worker: dask_worker.log) + event_counts = {} + for w, events in worker_events.items(): + event_counts[w] = len([event for event in list(events) + if event[1] == event_name]) + return event_counts + + +def test_simple(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + with pytest.raises(ValueError): + Parallel()(delayed(slow_raise_value_error)(i == 3) + for i in range(10)) + + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + +def test_dask_backend_uses_autobatching(loop): + assert (DaskDistributedBackend.compute_batch_size + is AutoBatchingMixin.compute_batch_size) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + with Parallel() as parallel: + # The backend should be initialized with a default + # batch size of 1: + backend = parallel._backend + assert isinstance(backend, DaskDistributedBackend) + assert backend.parallel is parallel + assert backend._effective_batch_size == 1 + + # Launch many short tasks that should trigger + # auto-batching: + parallel( + delayed(lambda: None)() + for _ in range(int(1e4)) + ) + assert backend._effective_batch_size > 10 + + +def random2(): + return random() + + +def test_dont_assume_function_purity(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + x, y = Parallel()(delayed(random2)() for i in range(2)) + assert x != y + + +@pytest.mark.parametrize("mixed", [True, False]) +def test_dask_funcname(loop, mixed): + from joblib._dask import Batch + if not mixed: + tasks = [delayed(inc)(i) for i in range(4)] + batch_repr = 'batch_of_inc_4_calls' + else: + tasks = [ + delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4) + ] + batch_repr = 'mixed_batch_of_inc_4_calls' + + assert repr(Batch(tasks)) == batch_repr + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: + with parallel_config(backend='dask'): + _ = Parallel(batch_size=2, pre_dispatch='all')(tasks) + + def f(dask_scheduler): + return list(dask_scheduler.transition_log) + batch_repr = batch_repr.replace('4', '2') + log = client.run_on_scheduler(f) + assert all('batch_of_inc' in tup[0] for tup in log) + + +def test_no_undesired_distributed_cache_hit(): + # Dask has a pickle cache for callables that are called many times. Because + # the dask backends used to wrap both the functions and the arguments + # under instances of the Batch callable class this caching mechanism could + # lead to bugs as described in: https://github.com/joblib/joblib/pull/1055 + # The joblib-dask backend has been refactored to avoid bundling the + # arguments as an attribute of the Batch instance to avoid this problem. + # This test serves as non-regression problem. + + # Use a large number of input arguments to give the AutoBatchingMixin + # enough tasks to kick-in. + lists = [[] for _ in range(100)] + np = pytest.importorskip('numpy') + X = np.arange(int(1e6)) + + def isolated_operation(list_, data=None): + if data is not None: + np.testing.assert_array_equal(data, X) + list_.append(uuid4().hex) + return list_ + + cluster = LocalCluster(n_workers=1, threads_per_worker=2) + client = Client(cluster) + try: + with parallel_config(backend='dask'): + # dispatches joblib.parallel.BatchedCalls + res = Parallel()( + delayed(isolated_operation)(list_) for list_ in lists + ) + + # The original arguments should not have been mutated as the mutation + # happens in the dask worker process. + assert lists == [[] for _ in range(100)] + + # Here we did not pass any large numpy array as argument to + # isolated_operation so no scattering event should happen under the + # hood. + counts = count_events('receive-from-scatter', client) + assert sum(counts.values()) == 0 + assert all([len(r) == 1 for r in res]) + + with parallel_config(backend='dask'): + # Append a large array which will be scattered by dask, and + # dispatch joblib._dask.Batch + res = Parallel()( + delayed(isolated_operation)(list_, data=X) for list_ in lists + ) + + # This time, auto-scattering should have kicked it. + counts = count_events('receive-from-scatter', client) + assert sum(counts.values()) > 0 + assert all([len(r) == 1 for r in res]) + finally: + client.close(timeout=30) + cluster.close(timeout=30) + + +class CountSerialized(object): + def __init__(self, x): + self.x = x + self.count = 0 + + def __add__(self, other): + return self.x + getattr(other, 'x', other) + + __radd__ = __add__ + + def __reduce__(self): + self.count += 1 + return (CountSerialized, (self.x,)) + + +def add5(a, b, c, d=0, e=0): + return a + b + c + d + e + + +def test_manual_scatter(loop): + x = CountSerialized(1) + y = CountSerialized(2) + z = CountSerialized(3) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask', scatter=[x, y]): + f = delayed(add5) + tasks = [f(x, y, z, d=4, e=5), + f(x, z, y, d=5, e=4), + f(y, x, z, d=x, e=5), + f(z, z, x, d=z, e=y)] + expected = [func(*args, **kwargs) + for func, args, kwargs in tasks] + results = Parallel()(tasks) + + # Scatter must take a list/tuple + with pytest.raises(TypeError): + with parallel_config(backend='dask', loop=loop, scatter=1): + pass + + assert results == expected + + # Scattered variables only serialized once + assert x.count == 1 + assert y.count == 1 + # Depending on the version of distributed, the unscattered z variable + # is either pickled 4 or 6 times, possibly because of the memoization + # of objects that appear several times in the arguments of a delayed + # task. + assert z.count in (4, 6) + + +# When the same IOLoop is used for multiple clients in a row, use +# loop_in_thread instead of loop to prevent the Client from closing it. See +# dask/distributed #4112 +def test_auto_scatter(loop_in_thread): + np = pytest.importorskip('numpy') + data1 = np.ones(int(1e4), dtype=np.uint8) + data2 = np.ones(int(1e4), dtype=np.uint8) + data_to_process = ([data1] * 3) + ([data2] * 3) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + # Passing the same data as arg and kwarg triggers a single + # scatter operation whose result is reused. + Parallel()(delayed(noop)(data, data, i, opt=data) + for i, data in enumerate(data_to_process)) + # By default large array are automatically scattered with + # broadcast=1 which means that one worker must directly receive + # the data from the scatter operation once. + counts = count_events('receive-from-scatter', client) + assert counts[a['address']] + counts[b['address']] == 2 + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + Parallel()(delayed(noop)(data1[:3], i) for i in range(5)) + # Small arrays are passed within the task definition without going + # through a scatter operation. + counts = count_events('receive-from-scatter', client) + assert counts[a['address']] == 0 + assert counts[b['address']] == 0 + + +@pytest.mark.parametrize("retry_no", list(range(2))) +def test_nested_scatter(loop, retry_no): + + np = pytest.importorskip('numpy') + + NUM_INNER_TASKS = 10 + NUM_OUTER_TASKS = 10 + + def my_sum(x, i, j): + return np.sum(x) + + def outer_function_joblib(array, i): + client = get_client() # noqa + with parallel_config(backend="dask"): + results = Parallel()( + delayed(my_sum)(array[j:], i, j) for j in range( + NUM_INNER_TASKS) + ) + return sum(results) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as _: + with parallel_config(backend="dask"): + my_array = np.ones(10000) + _ = Parallel()( + delayed(outer_function_joblib)( + my_array[i:], i) for i in range(NUM_OUTER_TASKS) + ) + + +def test_nested_backend_context_manager(loop_in_thread): + def get_nested_pids(): + pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + return pids + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop_in_thread) as client: + with parallel_config(backend='dask'): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + # No deadlocks + with Client(s['address'], loop=loop_in_thread) as client: # noqa: F841 + with parallel_config(backend='dask'): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + +def test_nested_backend_context_manager_implicit_n_jobs(loop): + # Check that Parallel with no explicit n_jobs value automatically selects + # all the dask workers, including in nested calls. + + def _backend_type(p): + return p._backend.__class__.__name__ + + def get_nested_implicit_n_jobs(): + with Parallel() as p: + return _backend_type(p), p.n_jobs + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask'): + with Parallel() as p: + assert _backend_type(p) == "DaskDistributedBackend" + assert p.n_jobs == -1 + all_nested_n_jobs = p( + delayed(get_nested_implicit_n_jobs)() + for _ in range(2) + ) + for backend_type, nested_n_jobs in all_nested_n_jobs: + assert backend_type == "DaskDistributedBackend" + assert nested_n_jobs == -1 + + +def test_errors(loop): + with pytest.raises(ValueError) as info: + with parallel_config(backend='dask'): + pass + + assert "create a dask client" in str(info.value).lower() + + +def test_correct_nested_backend(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + # No requirement, should be us + with parallel_config(backend='dask'): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require=None) for _ in range(1)) + assert isinstance(result[0][0][0], DaskDistributedBackend) + + # Require threads, should be threading + with parallel_config(backend='dask'): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require='sharedmem') + for _ in range(1)) + assert isinstance(result[0][0][0], ThreadingBackend) + + +def outer(nested_require): + return Parallel(n_jobs=2, prefer='threads')( + delayed(middle)(nested_require) for _ in range(1) + ) + + +def middle(require): + return Parallel(n_jobs=2, require=require)( + delayed(inner)() for _ in range(1) + ) + + +def inner(): + return Parallel()._backend + + +def test_secede_with_no_processes(loop): + # https://github.com/dask/distributed/issues/1775 + with Client(loop=loop, processes=False, set_as_default=True): + with parallel_config(backend='dask'): + Parallel(n_jobs=4)(delayed(id)(i) for i in range(2)) + + +def _worker_address(_): + from distributed import get_worker + return get_worker().address + + +def test_dask_backend_keywords(loop): + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_config(backend='dask', workers=a['address']): + seq = Parallel()( + delayed(_worker_address)(i) for i in range(10)) + assert seq == [a['address']] * 10 + + with parallel_config(backend='dask', workers=b['address']): + seq = Parallel()( + delayed(_worker_address)(i) for i in range(10)) + assert seq == [b['address']] * 10 + + +def test_scheduler_tasks_cleanup(loop): + with Client(processes=False, loop=loop) as client: + with parallel_config(backend='dask'): + Parallel()(delayed(inc)(i) for i in range(10)) + + start = time() + while client.cluster.scheduler.tasks: + sleep(0.01) + assert time() < start + 5 + + assert not client.futures + + +@pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"]) +@pytest.mark.skipif( + distributed.__version__ <= '2.1.1' and distributed.__version__ >= '1.28.0', + reason="distributed bug - https://github.com/dask/distributed/pull/2841") +def test_wait_for_workers(cluster_strategy): + cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2) + client = Client(cluster) + if cluster_strategy == "adaptive": + cluster.adapt(minimum=0, maximum=2) + elif cluster_strategy == "late_scaling": + # Tell the cluster to start workers but this is a non-blocking call + # and new workers might take time to connect. In this case the Parallel + # call should wait for at least one worker to come up before starting + # to schedule work. + cluster.scale(2) + try: + with parallel_config(backend='dask'): + # The following should wait a bit for at least one worker to + # become available. + Parallel()(delayed(inc)(i) for i in range(10)) + finally: + client.close() + cluster.close() + + +def test_wait_for_workers_timeout(): + # Start a cluster with 0 worker: + cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2) + client = Client(cluster) + try: + with parallel_config(backend='dask', wait_for_workers_timeout=0.1): + # Short timeout: DaskDistributedBackend + msg = "DaskDistributedBackend has no worker after 0.1 seconds." + with pytest.raises(TimeoutError, match=msg): + Parallel()(delayed(inc)(i) for i in range(10)) + + with parallel_config(backend='dask', wait_for_workers_timeout=0): + # No timeout: fallback to generic joblib failure: + msg = "DaskDistributedBackend has no active worker" + with pytest.raises(RuntimeError, match=msg): + Parallel()(delayed(inc)(i) for i in range(10)) + finally: + client.close() + cluster.close() + + +@pytest.mark.parametrize("backend", ["loky", "multiprocessing"]) +def test_joblib_warning_inside_dask_daemonic_worker(backend): + cluster = LocalCluster(n_workers=2) + client = Client(cluster) + try: + + def func_using_joblib_parallel(): + # Somehow trying to check the warning type here (e.g. with + # pytest.warns(UserWarning)) make the test hang. Work-around: + # return the warning record to the client and the warning check is + # done client-side. + with warnings.catch_warnings(record=True) as record: + Parallel(n_jobs=2, backend=backend)( + delayed(inc)(i) for i in range(10)) + + return record + + fut = client.submit(func_using_joblib_parallel) + record = fut.result() + + assert len(record) == 1 + warning = record[0].message + assert isinstance(warning, UserWarning) + assert "distributed.worker.daemon" in str(warning) + finally: + client.close(timeout=30) + cluster.close(timeout=30) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_disk.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_disk.py new file mode 100644 index 0000000000000000000000000000000000000000..b825a8b3a5c18a3114f34ed9d7c90cce62799085 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_disk.py @@ -0,0 +1,71 @@ +""" +Unit tests for the disk utilities. +""" + +# Authors: Gael Varoquaux +# Lars Buitinck +# Copyright (c) 2010 Gael Varoquaux +# License: BSD Style, 3 clauses. + +from __future__ import with_statement +import array +import os + +from joblib.disk import disk_used, memstr_to_bytes, mkdirp, rm_subdirs +from joblib.testing import parametrize, raises + +############################################################################### + + +def test_disk_used(tmpdir): + cachedir = tmpdir.strpath + # Not write a file that is 1M big in this directory, and check the + # size. The reason we use such a big file is that it makes us robust + # to errors due to block allocation. + a = array.array('i') + sizeof_i = a.itemsize + target_size = 1024 + n = int(target_size * 1024 / sizeof_i) + a = array.array('i', n * (1,)) + with open(os.path.join(cachedir, 'test'), 'wb') as output: + a.tofile(output) + assert disk_used(cachedir) >= target_size + assert disk_used(cachedir) < target_size + 12 + + +@parametrize('text,value', + [('80G', 80 * 1024 ** 3), + ('1.4M', int(1.4 * 1024 ** 2)), + ('120M', 120 * 1024 ** 2), + ('53K', 53 * 1024)]) +def test_memstr_to_bytes(text, value): + assert memstr_to_bytes(text) == value + + +@parametrize('text,exception,regex', + [('fooG', ValueError, r'Invalid literal for size.*fooG.*'), + ('1.4N', ValueError, r'Invalid literal for size.*1.4N.*')]) +def test_memstr_to_bytes_exception(text, exception, regex): + with raises(exception) as excinfo: + memstr_to_bytes(text) + assert excinfo.match(regex) + + +def test_mkdirp(tmpdir): + mkdirp(os.path.join(tmpdir.strpath, 'ham')) + mkdirp(os.path.join(tmpdir.strpath, 'ham')) + mkdirp(os.path.join(tmpdir.strpath, 'spam', 'spam')) + + # Not all OSErrors are ignored + with raises(OSError): + mkdirp('') + + +def test_rm_subdirs(tmpdir): + sub_path = os.path.join(tmpdir.strpath, "am", "stram") + full_path = os.path.join(sub_path, "gram") + mkdirp(os.path.join(full_path)) + + rm_subdirs(sub_path) + assert os.path.exists(sub_path) + assert not os.path.exists(full_path) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_func_inspect.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_func_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..dba237d48578e5d6386e67e80f3e6d31761108d6 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_func_inspect.py @@ -0,0 +1,310 @@ +""" +Test the func_inspect module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import functools + +from joblib.func_inspect import filter_args, get_func_name, get_func_code +from joblib.func_inspect import _clean_win_chars, format_signature +from joblib.memory import Memory +from joblib.test.common import with_numpy +from joblib.testing import fixture, parametrize, raises + + +############################################################################### +# Module-level functions and fixture, for tests +def f(x, y=0): + pass + + +def g(x): + pass + + +def h(x, y=0, *args, **kwargs): + pass + + +def i(x=1): + pass + + +def j(x, y, **kwargs): + pass + + +def k(*args, **kwargs): + pass + + +def m1(x, *, y): + pass + + +def m2(x, *, y, z=3): + pass + + +@fixture(scope='module') +def cached_func(tmpdir_factory): + # Create a Memory object to test decorated functions. + # We should be careful not to call the decorated functions, so that + # cache directories are not created in the temp dir. + cachedir = tmpdir_factory.mktemp("joblib_test_func_inspect") + mem = Memory(cachedir.strpath) + + @mem.cache + def cached_func_inner(x): + return x + + return cached_func_inner + + +class Klass(object): + + def f(self, x): + return x + + +############################################################################### +# Tests + +@parametrize('func,args,filtered_args', + [(f, [[], (1, )], {'x': 1, 'y': 0}), + (f, [['x'], (1, )], {'y': 0}), + (f, [['y'], (0, )], {'x': 0}), + (f, [['y'], (0, ), {'y': 1}], {'x': 0}), + (f, [['x', 'y'], (0, )], {}), + (f, [[], (0,), {'y': 1}], {'x': 0, 'y': 1}), + (f, [['y'], (), {'x': 2, 'y': 1}], {'x': 2}), + (g, [[], (), {'x': 1}], {'x': 1}), + (i, [[], (2, )], {'x': 2})]) +def test_filter_args(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +def test_filter_args_method(): + obj = Klass() + assert filter_args(obj.f, [], (1, )) == {'x': 1, 'self': obj} + + +@parametrize('func,args,filtered_args', + [(h, [[], (1, )], + {'x': 1, 'y': 0, '*': [], '**': {}}), + (h, [[], (1, 2, 3, 4)], + {'x': 1, 'y': 2, '*': [3, 4], '**': {}}), + (h, [[], (1, 25), {'ee': 2}], + {'x': 1, 'y': 25, '*': [], '**': {'ee': 2}}), + (h, [['*'], (1, 2, 25), {'ee': 2}], + {'x': 1, 'y': 2, '**': {'ee': 2}})]) +def test_filter_varargs(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +test_filter_kwargs_extra_params = [ + (m1, [[], (1,), {'y': 2}], {'x': 1, 'y': 2}), + (m2, [[], (1,), {'y': 2}], {'x': 1, 'y': 2, 'z': 3}) +] + + +@parametrize('func,args,filtered_args', + [(k, [[], (1, 2), {'ee': 2}], + {'*': [1, 2], '**': {'ee': 2}}), + (k, [[], (3, 4)], + {'*': [3, 4], '**': {}})] + + test_filter_kwargs_extra_params) +def test_filter_kwargs(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +def test_filter_args_2(): + assert (filter_args(j, [], (1, 2), {'ee': 2}) == + {'x': 1, 'y': 2, '**': {'ee': 2}}) + + ff = functools.partial(f, 1) + # filter_args has to special-case partial + assert filter_args(ff, [], (1, )) == {'*': [1], '**': {}} + assert filter_args(ff, ['y'], (1, )) == {'*': [1], '**': {}} + + +@parametrize('func,funcname', [(f, 'f'), (g, 'g'), + (cached_func, 'cached_func')]) +def test_func_name(func, funcname): + # Check that we are not confused by decoration + # here testcase 'cached_func' is the function itself + assert get_func_name(func)[1] == funcname + + +def test_func_name_on_inner_func(cached_func): + # Check that we are not confused by decoration + # here testcase 'cached_func' is the 'cached_func_inner' function + # returned by 'cached_func' fixture + assert get_func_name(cached_func)[1] == 'cached_func_inner' + + +def test_func_name_collision_on_inner_func(): + # Check that two functions defining and caching an inner function + # with the same do not cause (module, name) collision + def f(): + def inner_func(): + return # pragma: no cover + return get_func_name(inner_func) + + def g(): + def inner_func(): + return # pragma: no cover + return get_func_name(inner_func) + + module, name = f() + other_module, other_name = g() + + assert name == other_name + assert module != other_module + + +def test_func_inspect_errors(): + # Check that func_inspect is robust and will work on weird objects + assert get_func_name('a'.lower)[-1] == 'lower' + assert get_func_code('a'.lower)[1:] == (None, -1) + ff = lambda x: x # noqa: E731 + assert get_func_name(ff, win_characters=False)[-1] == '' + assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py') + # Simulate a function defined in __main__ + ff.__module__ = '__main__' + assert get_func_name(ff, win_characters=False)[-1] == '' + assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py') + + +def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): + pass + + +def func_with_signature(a: int, b: int) -> None: + pass + + +def test_filter_args_edge_cases(): + assert ( + filter_args(func_with_kwonly_args, [], (1, 2), + {'kw1': 3, 'kw2': 4}) == + {'a': 1, 'b': 2, 'kw1': 3, 'kw2': 4}) + + # filter_args doesn't care about keyword-only arguments so you + # can pass 'kw1' into *args without any problem + with raises(ValueError) as excinfo: + filter_args(func_with_kwonly_args, [], (1, 2, 3), {'kw2': 2}) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + assert ( + filter_args(func_with_kwonly_args, ['b', 'kw2'], (1, 2), + {'kw1': 3, 'kw2': 4}) == + {'a': 1, 'kw1': 3}) + + assert (filter_args(func_with_signature, ['b'], (1, 2)) == {'a': 1}) + + +def test_bound_methods(): + """ Make sure that calling the same method on two different instances + of the same class does resolv to different signatures. + """ + a = Klass() + b = Klass() + assert filter_args(a.f, [], (1, )) != filter_args(b.f, [], (1, )) + + +@parametrize('exception,regex,func,args', + [(ValueError, 'ignore_lst must be a list of parameters to ignore', + f, ['bar', (None, )]), + (ValueError, r'Ignore list: argument \'(.*)\' is not defined', + g, [['bar'], (None, )]), + (ValueError, 'Wrong number of arguments', + h, [[]])]) +def test_filter_args_error_msg(exception, regex, func, args): + """ Make sure that filter_args returns decent error messages, for the + sake of the user. + """ + with raises(exception) as excinfo: + filter_args(func, *args) + excinfo.match(regex) + + +def test_filter_args_no_kwargs_mutation(): + """None-regression test against 0.12.0 changes. + + https://github.com/joblib/joblib/pull/75 + + Make sure filter args doesn't mutate the kwargs dict that gets passed in. + """ + kwargs = {'x': 0} + filter_args(g, [], [], kwargs) + assert kwargs == {'x': 0} + + +def test_clean_win_chars(): + string = r'C:\foo\bar\main.py' + mangled_string = _clean_win_chars(string) + for char in ('\\', ':', '<', '>', '!'): + assert char not in mangled_string + + +@parametrize('func,args,kwargs,sgn_expected', + [(g, [list(range(5))], {}, 'g([0, 1, 2, 3, 4])'), + (k, [1, 2, (3, 4)], {'y': True}, 'k(1, 2, (3, 4), y=True)')]) +def test_format_signature(func, args, kwargs, sgn_expected): + # Test signature formatting. + path, sgn_result = format_signature(func, *args, **kwargs) + assert sgn_result == sgn_expected + + +def test_format_signature_long_arguments(): + shortening_threshold = 1500 + # shortening gets it down to 700 characters but there is the name + # of the function in the signature and a few additional things + # like dots for the ellipsis + shortening_target = 700 + 10 + + arg = 'a' * shortening_threshold + _, signature = format_signature(h, arg) + assert len(signature) < shortening_target + + nb_args = 5 + args = [arg for _ in range(nb_args)] + _, signature = format_signature(h, *args) + assert len(signature) < shortening_target * nb_args + + kwargs = {str(i): arg for i, arg in enumerate(args)} + _, signature = format_signature(h, **kwargs) + assert len(signature) < shortening_target * nb_args + + _, signature = format_signature(h, *args, **kwargs) + assert len(signature) < shortening_target * 2 * nb_args + + +@with_numpy +def test_format_signature_numpy(): + """ Test the format signature formatting with numpy. + """ + + +def test_special_source_encoding(): + from joblib.test.test_func_inspect_special_encoding import big5_f + func_code, source_file, first_line = get_func_code(big5_f) + assert first_line == 5 + assert "def big5_f():" in func_code + assert "test_func_inspect_special_encoding" in source_file + + +def _get_code(): + from joblib.test.test_func_inspect_special_encoding import big5_f + return get_func_code(big5_f)[0] + + +def test_func_code_consistency(): + from joblib.parallel import Parallel, delayed + codes = Parallel(n_jobs=2)(delayed(_get_code)() for _ in range(5)) + assert len(set(codes)) == 1 diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..6c41a59a6900ced36050bf357359c1164a11fdbe --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py @@ -0,0 +1,9 @@ +# -*- coding: big5 -*- + + +# Some Traditional Chinese characters: ¤@¨Ç¤¤¤å¦r²Å +def big5_f(): + """¥Î©ó´ú¸Õªº¨ç¼Æ + """ + # µùÄÀ + return 0 diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_hashing.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..85593d297f6e2387c58cad8d5bceba4d21b1c0aa --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_hashing.py @@ -0,0 +1,495 @@ +""" +Test the hashing module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import time +import hashlib +import sys +import gc +import io +import collections +import itertools +import pickle +import random +from concurrent.futures import ProcessPoolExecutor +from decimal import Decimal + +from joblib.hashing import hash +from joblib.func_inspect import filter_args +from joblib.memory import Memory +from joblib.testing import raises, skipif, fixture, parametrize +from joblib.test.common import np, with_numpy + + +def unicode(s): + return s + + +############################################################################### +# Helper functions for the tests +def time_func(func, *args): + """ Time function func on *args. + """ + times = list() + for _ in range(3): + t1 = time.time() + func(*args) + times.append(time.time() - t1) + return min(times) + + +def relative_time(func1, func2, *args): + """ Return the relative time between func1 and func2 applied on + *args. + """ + time_func1 = time_func(func1, *args) + time_func2 = time_func(func2, *args) + relative_diff = 0.5 * (abs(time_func1 - time_func2) + / (time_func1 + time_func2)) + return relative_diff + + +class Klass(object): + + def f(self, x): + return x + + +class KlassWithCachedMethod(object): + + def __init__(self, cachedir): + mem = Memory(location=cachedir) + self.f = mem.cache(self.f) + + def f(self, x): + return x + + +############################################################################### +# Tests + +input_list = [1, 2, 1., 2., 1 + 1j, 2. + 1j, + 'a', 'b', + (1,), (1, 1,), [1, ], [1, 1, ], + {1: 1}, {1: 2}, {2: 1}, + None, + gc.collect, + [1, ].append, + # Next 2 sets have unorderable elements in python 3. + set(('a', 1)), + set(('a', 1, ('a', 1))), + # Next 2 dicts have unorderable type of keys in python 3. + {'a': 1, 1: 2}, + {'a': 1, 1: 2, 'd': {'a': 1}}] + + +@parametrize('obj1', input_list) +@parametrize('obj2', input_list) +def test_trivial_hash(obj1, obj2): + """Smoke test hash on various types.""" + # Check that 2 objects have the same hash only if they are the same. + are_hashes_equal = hash(obj1) == hash(obj2) + are_objs_identical = obj1 is obj2 + assert are_hashes_equal == are_objs_identical + + +def test_hash_methods(): + # Check that hashing instance methods works + a = io.StringIO(unicode('a')) + assert hash(a.flush) == hash(a.flush) + a1 = collections.deque(range(10)) + a2 = collections.deque(range(9)) + assert hash(a1.extend) != hash(a2.extend) + + +@fixture(scope='function') +@with_numpy +def three_np_arrays(): + rnd = np.random.RandomState(0) + arr1 = rnd.random_sample((10, 10)) + arr2 = arr1.copy() + arr3 = arr2.copy() + arr3[0] += 1 + return arr1, arr2, arr3 + + +def test_hash_numpy_arrays(three_np_arrays): + arr1, arr2, arr3 = three_np_arrays + + for obj1, obj2 in itertools.product(three_np_arrays, repeat=2): + are_hashes_equal = hash(obj1) == hash(obj2) + are_arrays_equal = np.all(obj1 == obj2) + assert are_hashes_equal == are_arrays_equal + + assert hash(arr1) != hash(arr1.T) + + +def test_hash_numpy_dict_of_arrays(three_np_arrays): + arr1, arr2, arr3 = three_np_arrays + + d1 = {1: arr1, 2: arr2} + d2 = {1: arr2, 2: arr1} + d3 = {1: arr2, 2: arr3} + + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + + +@with_numpy +@parametrize('dtype', ['datetime64[s]', 'timedelta64[D]']) +def test_numpy_datetime_array(dtype): + # memoryview is not supported for some dtypes e.g. datetime64 + # see https://github.com/joblib/joblib/issues/188 for more details + a_hash = hash(np.arange(10)) + array = np.arange(0, 10, dtype=dtype) + assert hash(array) != a_hash + + +@with_numpy +def test_hash_numpy_noncontiguous(): + a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), + order='F')[:, :1, :] + b = np.ascontiguousarray(a) + assert hash(a) != hash(b) + + c = np.asfortranarray(a) + assert hash(a) != hash(c) + + +@with_numpy +@parametrize('coerce_mmap', [True, False]) +def test_hash_memmap(tmpdir, coerce_mmap): + """Check that memmap and arrays hash identically if coerce_mmap is True.""" + filename = tmpdir.join('memmap_temp').strpath + try: + m = np.memmap(filename, shape=(10, 10), mode='w+') + a = np.asarray(m) + are_hashes_equal = (hash(a, coerce_mmap=coerce_mmap) == + hash(m, coerce_mmap=coerce_mmap)) + assert are_hashes_equal == coerce_mmap + finally: + if 'm' in locals(): + del m + # Force a garbage-collection cycle, to be certain that the + # object is delete, and we don't run in a problem under + # Windows with a file handle still open. + gc.collect() + + +@with_numpy +@skipif(sys.platform == 'win32', reason='This test is not stable under windows' + ' for some reason') +def test_hash_numpy_performance(): + """ Check the performance of hashing numpy arrays: + + In [22]: a = np.random.random(1000000) + + In [23]: %timeit hashlib.md5(a).hexdigest() + 100 loops, best of 3: 20.7 ms per loop + + In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest() + 1 loops, best of 3: 73.1 ms per loop + + In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest() + 10 loops, best of 3: 53.9 ms per loop + + In [26]: %timeit hash(a) + 100 loops, best of 3: 20.8 ms per loop + """ + rnd = np.random.RandomState(0) + a = rnd.random_sample(1000000) + + def md5_hash(x): + return hashlib.md5(memoryview(x)).hexdigest() + + relative_diff = relative_time(md5_hash, hash, a) + assert relative_diff < 0.3 + + # Check that hashing an tuple of 3 arrays takes approximately + # 3 times as much as hashing one array + time_hashlib = 3 * time_func(md5_hash, a) + time_hash = time_func(hash, (a, a, a)) + relative_diff = 0.5 * (abs(time_hash - time_hashlib) + / (time_hash + time_hashlib)) + assert relative_diff < 0.3 + + +def test_bound_methods_hash(): + """ Make sure that calling the same method on two different instances + of the same class does resolve to the same hashes. + """ + a = Klass() + b = Klass() + assert (hash(filter_args(a.f, [], (1, ))) == + hash(filter_args(b.f, [], (1, )))) + + +def test_bound_cached_methods_hash(tmpdir): + """ Make sure that calling the same _cached_ method on two different + instances of the same class does resolve to the same hashes. + """ + a = KlassWithCachedMethod(tmpdir.strpath) + b = KlassWithCachedMethod(tmpdir.strpath) + assert (hash(filter_args(a.f.func, [], (1, ))) == + hash(filter_args(b.f.func, [], (1, )))) + + +@with_numpy +def test_hash_object_dtype(): + """ Make sure that ndarrays with dtype `object' hash correctly.""" + + a = np.array([np.arange(i) for i in range(6)], dtype=object) + b = np.array([np.arange(i) for i in range(6)], dtype=object) + + assert hash(a) == hash(b) + + +@with_numpy +def test_numpy_scalar(): + # Numpy scalars are built from compiled functions, and lead to + # strange pickling paths explored, that can give hash collisions + a = np.float64(2.0) + b = np.float64(3.0) + assert hash(a) != hash(b) + + +def test_dict_hash(tmpdir): + # Check that dictionaries hash consistently, even though the ordering + # of the keys is not guaranteed + k = KlassWithCachedMethod(tmpdir.strpath) + + d = {'#s12069__c_maps.nii.gz': [33], + '#s12158__c_maps.nii.gz': [33], + '#s12258__c_maps.nii.gz': [33], + '#s12277__c_maps.nii.gz': [33], + '#s12300__c_maps.nii.gz': [33], + '#s12401__c_maps.nii.gz': [33], + '#s12430__c_maps.nii.gz': [33], + '#s13817__c_maps.nii.gz': [33], + '#s13903__c_maps.nii.gz': [33], + '#s13916__c_maps.nii.gz': [33], + '#s13981__c_maps.nii.gz': [33], + '#s13982__c_maps.nii.gz': [33], + '#s13983__c_maps.nii.gz': [33]} + + a = k.f(d) + b = k.f(a) + + assert hash(a) == hash(b) + + +def test_set_hash(tmpdir): + # Check that sets hash consistently, even though their ordering + # is not guaranteed + k = KlassWithCachedMethod(tmpdir.strpath) + + s = set(['#s12069__c_maps.nii.gz', + '#s12158__c_maps.nii.gz', + '#s12258__c_maps.nii.gz', + '#s12277__c_maps.nii.gz', + '#s12300__c_maps.nii.gz', + '#s12401__c_maps.nii.gz', + '#s12430__c_maps.nii.gz', + '#s13817__c_maps.nii.gz', + '#s13903__c_maps.nii.gz', + '#s13916__c_maps.nii.gz', + '#s13981__c_maps.nii.gz', + '#s13982__c_maps.nii.gz', + '#s13983__c_maps.nii.gz']) + + a = k.f(s) + b = k.f(a) + + assert hash(a) == hash(b) + + +def test_set_decimal_hash(): + # Check that sets containing decimals hash consistently, even though + # ordering is not guaranteed + assert (hash(set([Decimal(0), Decimal('NaN')])) == + hash(set([Decimal('NaN'), Decimal(0)]))) + + +def test_string(): + # Test that we obtain the same hash for object owning several strings, + # whatever the past of these strings (which are immutable in Python) + string = 'foo' + a = {string: 'bar'} + b = {string: 'bar'} + c = pickle.loads(pickle.dumps(b)) + assert hash([a, b]) == hash([a, c]) + + +@with_numpy +def test_numpy_dtype_pickling(): + # numpy dtype hashing is tricky to get right: see #231, #239, #251 #1080, + # #1082, and explanatory comments inside + # ``joblib.hashing.NumpyHasher.save``. + + # In this test, we make sure that the pickling of numpy dtypes is robust to + # object identity and object copy. + + dt1 = np.dtype('f4') + dt2 = np.dtype('f4') + + # simple dtypes objects are interned + assert dt1 is dt2 + assert hash(dt1) == hash(dt2) + + dt1_roundtripped = pickle.loads(pickle.dumps(dt1)) + assert dt1 is not dt1_roundtripped + assert hash(dt1) == hash(dt1_roundtripped) + + assert hash([dt1, dt1]) == hash([dt1_roundtripped, dt1_roundtripped]) + assert hash([dt1, dt1]) == hash([dt1, dt1_roundtripped]) + + complex_dt1 = np.dtype( + [('name', np.str_, 16), ('grades', np.float64, (2,))] + ) + complex_dt2 = np.dtype( + [('name', np.str_, 16), ('grades', np.float64, (2,))] + ) + + # complex dtypes objects are not interned + assert hash(complex_dt1) == hash(complex_dt2) + + complex_dt1_roundtripped = pickle.loads(pickle.dumps(complex_dt1)) + assert complex_dt1_roundtripped is not complex_dt1 + assert hash(complex_dt1) == hash(complex_dt1_roundtripped) + + assert hash([complex_dt1, complex_dt1]) == hash( + [complex_dt1_roundtripped, complex_dt1_roundtripped] + ) + assert hash([complex_dt1, complex_dt1]) == hash( + [complex_dt1_roundtripped, complex_dt1] + ) + + +@parametrize('to_hash,expected', + [('This is a string to hash', + '71b3f47df22cb19431d85d92d0b230b2'), + (u"C'est l\xe9t\xe9", + '2d8d189e9b2b0b2e384d93c868c0e576'), + ((123456, 54321, -98765), + 'e205227dd82250871fa25aa0ec690aa3'), + ([random.Random(42).random() for _ in range(5)], + 'a11ffad81f9682a7d901e6edc3d16c84'), + ({'abcde': 123, 'sadfas': [-9999, 2, 3]}, + 'aeda150553d4bb5c69f0e69d51b0e2ef')]) +def test_hashes_stay_the_same(to_hash, expected): + # We want to make sure that hashes don't change with joblib + # version. For end users, that would mean that they have to + # regenerate their cache from scratch, which potentially means + # lengthy recomputations. + # Expected results have been generated with joblib 0.9.2 + assert hash(to_hash) == expected + + +@with_numpy +def test_hashes_are_different_between_c_and_fortran_contiguous_arrays(): + # We want to be sure that the c-contiguous and f-contiguous versions of the + # same array produce 2 different hashes. + rng = np.random.RandomState(0) + arr_c = rng.random_sample((10, 10)) + arr_f = np.asfortranarray(arr_c) + assert hash(arr_c) != hash(arr_f) + + +@with_numpy +def test_0d_array(): + hash(np.array(0)) + + +@with_numpy +def test_0d_and_1d_array_hashing_is_different(): + assert hash(np.array(0)) != hash(np.array([0])) + + +@with_numpy +def test_hashes_stay_the_same_with_numpy_objects(): + # Note: joblib used to test numpy objects hashing by comparing the produced + # hash of an object with some hard-coded target value to guarantee that + # hashing remains the same across joblib versions. However, since numpy + # 1.20 and joblib 1.0, joblib relies on potentially unstable implementation + # details of numpy to hash np.dtype objects, which makes the stability of + # hash values across different environments hard to guarantee and to test. + # As a result, hashing stability across joblib versions becomes best-effort + # only, and we only test the consistency within a single environment by + # making sure: + # - the hash of two copies of the same objects is the same + # - hashing some object in two different python processes produces the same + # value. This should be viewed as a proxy for testing hash consistency + # through time between Python sessions (provided no change in the + # environment was done between sessions). + + def create_objects_to_hash(): + rng = np.random.RandomState(42) + # Being explicit about dtypes in order to avoid + # architecture-related differences. Also using 'f4' rather than + # 'f8' for float arrays because 'f8' arrays generated by + # rng.random.randn don't seem to be bit-identical on 32bit and + # 64bit machines. + to_hash_list = [ + rng.randint(-1000, high=1000, size=50).astype(' +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. +import re + +from joblib.logger import PrintTime + + +def test_print_time(tmpdir, capsys): + # A simple smoke test for PrintTime. + logfile = tmpdir.join('test.log').strpath + print_time = PrintTime(logfile=logfile) + print_time('Foo') + # Create a second time, to smoke test log rotation. + print_time = PrintTime(logfile=logfile) + print_time('Foo') + # And a third time + print_time = PrintTime(logfile=logfile) + print_time('Foo') + + out_printed_text, err_printed_text = capsys.readouterr() + # Use regexps to be robust to time variations + match = r"Foo: 0\..s, 0\..min\nFoo: 0\..s, 0..min\nFoo: " + \ + r".\..s, 0..min\n" + if not re.match(match, err_printed_text): + raise AssertionError('Excepted %s, got %s' % + (match, err_printed_text)) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memmapping.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memmapping.py new file mode 100644 index 0000000000000000000000000000000000000000..42a297a9e445d38c0daa03bc7d78e4c4f1fd4571 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memmapping.py @@ -0,0 +1,1191 @@ +import os +import mmap +import sys +import platform +import gc +import pickle +import itertools +from time import sleep +import subprocess +import threading +import faulthandler + +import pytest + +from joblib.test.common import with_numpy, np +from joblib.test.common import with_multiprocessing +from joblib.test.common import with_dev_shm +from joblib.testing import raises, parametrize, skipif +from joblib.backports import make_memmap +from joblib.parallel import Parallel, delayed + +from joblib.pool import MemmappingPool +from joblib.executor import _TestingMemmappingExecutor as TestExecutor +from joblib._memmapping_reducer import has_shareable_memory +from joblib._memmapping_reducer import ArrayMemmapForwardReducer +from joblib._memmapping_reducer import _strided_from_memmap +from joblib._memmapping_reducer import _get_temp_dir +from joblib._memmapping_reducer import _WeakArrayKeyMap +from joblib._memmapping_reducer import _get_backing_memmap +import joblib._memmapping_reducer as jmr + + +def setup_module(): + faulthandler.dump_traceback_later(timeout=300, exit=True) + + +def teardown_module(): + faulthandler.cancel_dump_traceback_later() + + +def check_memmap_and_send_back(array): + assert _get_backing_memmap(array) is not None + return array + + +def check_array(args): + """Dummy helper function to be executed in subprocesses + + Check that the provided array has the expected values in the provided + range. + + """ + data, position, expected = args + np.testing.assert_array_equal(data[position], expected) + + +def inplace_double(args): + """Dummy helper function to be executed in subprocesses + + + Check that the input array has the right values in the provided range + and perform an inplace modification to double the values in the range by + two. + + """ + data, position, expected = args + assert data[position] == expected + data[position] *= 2 + np.testing.assert_array_equal(data[position], 2 * expected) + + +@with_numpy +@with_multiprocessing +def test_memmap_based_array_reducing(tmpdir): + """Check that it is possible to reduce a memmap backed array""" + assert_array_equal = np.testing.assert_array_equal + filename = tmpdir.join('test.mmap').strpath + + # Create a file larger than what will be used by a + buffer = np.memmap(filename, dtype=np.float64, shape=500, mode='w+') + + # Fill the original buffer with negative markers to detect over of + # underflow in case of test failures + buffer[:] = - 1.0 * np.arange(buffer.shape[0], dtype=buffer.dtype) + buffer.flush() + + # Memmap a 2D fortran array on a offsetted subsection of the previous + # buffer + a = np.memmap(filename, dtype=np.float64, shape=(3, 5, 4), + mode='r+', order='F', offset=4) + a[:] = np.arange(60).reshape(a.shape) + + # Build various views that share the buffer with the original memmap + + # b is an memmap sliced view on an memmap instance + b = a[1:-1, 2:-1, 2:4] + + # c and d are array views + c = np.asarray(b) + d = c.T + + # Array reducer with auto dumping disabled + reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True) + + def reconstruct_array_or_memmap(x): + cons, args = reducer(x) + return cons(*args) + + # Reconstruct original memmap + a_reconstructed = reconstruct_array_or_memmap(a) + assert has_shareable_memory(a_reconstructed) + assert isinstance(a_reconstructed, np.memmap) + assert_array_equal(a_reconstructed, a) + + # Reconstruct strided memmap view + b_reconstructed = reconstruct_array_or_memmap(b) + assert has_shareable_memory(b_reconstructed) + assert_array_equal(b_reconstructed, b) + + # Reconstruct arrays views on memmap base + c_reconstructed = reconstruct_array_or_memmap(c) + assert not isinstance(c_reconstructed, np.memmap) + assert has_shareable_memory(c_reconstructed) + assert_array_equal(c_reconstructed, c) + + d_reconstructed = reconstruct_array_or_memmap(d) + assert not isinstance(d_reconstructed, np.memmap) + assert has_shareable_memory(d_reconstructed) + assert_array_equal(d_reconstructed, d) + + # Test graceful degradation on fake memmap instances with in-memory + # buffers + a3 = a * 3 + assert not has_shareable_memory(a3) + a3_reconstructed = reconstruct_array_or_memmap(a3) + assert not has_shareable_memory(a3_reconstructed) + assert not isinstance(a3_reconstructed, np.memmap) + assert_array_equal(a3_reconstructed, a * 3) + + # Test graceful degradation on arrays derived from fake memmap instances + b3 = np.asarray(a3) + assert not has_shareable_memory(b3) + + b3_reconstructed = reconstruct_array_or_memmap(b3) + assert isinstance(b3_reconstructed, np.ndarray) + assert not has_shareable_memory(b3_reconstructed) + assert_array_equal(b3_reconstructed, b3) + + +@with_multiprocessing +@skipif((sys.platform != "win32") or (), + reason="PermissionError only easily triggerable on Windows") +def test_resource_tracker_retries_when_permissionerror(tmpdir): + # Test resource_tracker retry mechanism when unlinking memmaps. See more + # thorough information in the ``unlink_file`` documentation of joblib. + filename = tmpdir.join('test.mmap').strpath + cmd = """if 1: + import os + import numpy as np + import time + from joblib.externals.loky.backend import resource_tracker + resource_tracker.VERBOSE = 1 + + # Start the resource tracker + resource_tracker.ensure_running() + time.sleep(1) + + # Create a file containing numpy data + memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+') + memmap[:] = np.arange(10).astype(np.int8).data + memmap.flush() + assert os.path.exists(r"{filename}") + del memmap + + # Create a np.memmap backed by this file + memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+') + resource_tracker.register(r"{filename}", "file") + + # Ask the resource_tracker to delete the file backing the np.memmap , this + # should raise PermissionError that the resource_tracker will log. + resource_tracker.maybe_unlink(r"{filename}", "file") + + # Wait for the resource_tracker to process the maybe_unlink before cleaning + # up the memmap + time.sleep(2) + """.format(filename=filename) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == 0 + assert out == b'' + msg = 'tried to unlink {}, got PermissionError'.format(filename) + assert msg in err.decode() + + +@with_numpy +@with_multiprocessing +def test_high_dimension_memmap_array_reducing(tmpdir): + assert_array_equal = np.testing.assert_array_equal + + filename = tmpdir.join('test.mmap').strpath + + # Create a high dimensional memmap + a = np.memmap(filename, dtype=np.float64, shape=(100, 15, 15, 3), + mode='w+') + a[:] = np.arange(100 * 15 * 15 * 3).reshape(a.shape) + + # Create some slices/indices at various dimensions + b = a[0:10] + c = a[:, 5:10] + d = a[:, :, :, 0] + e = a[1:3:4] + + # Array reducer with auto dumping disabled + reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True) + + def reconstruct_array_or_memmap(x): + cons, args = reducer(x) + return cons(*args) + + a_reconstructed = reconstruct_array_or_memmap(a) + assert has_shareable_memory(a_reconstructed) + assert isinstance(a_reconstructed, np.memmap) + assert_array_equal(a_reconstructed, a) + + b_reconstructed = reconstruct_array_or_memmap(b) + assert has_shareable_memory(b_reconstructed) + assert_array_equal(b_reconstructed, b) + + c_reconstructed = reconstruct_array_or_memmap(c) + assert has_shareable_memory(c_reconstructed) + assert_array_equal(c_reconstructed, c) + + d_reconstructed = reconstruct_array_or_memmap(d) + assert has_shareable_memory(d_reconstructed) + assert_array_equal(d_reconstructed, d) + + e_reconstructed = reconstruct_array_or_memmap(e) + assert has_shareable_memory(e_reconstructed) + assert_array_equal(e_reconstructed, e) + + +@with_numpy +def test__strided_from_memmap(tmpdir): + fname = tmpdir.join('test.mmap').strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + # This line creates the mmap file that is reused later + memmap_obj = np.memmap(fname, mode='w+', shape=size + offset) + # filename, dtype, mode, offset, order, shape, strides, total_buffer_len + memmap_obj = _strided_from_memmap(fname, dtype='uint8', mode='r', + offset=offset, order='C', shape=size, + strides=None, total_buffer_len=None, + unlink_on_gc_collect=False) + assert isinstance(memmap_obj, np.memmap) + assert memmap_obj.offset == offset + memmap_backed_obj = _strided_from_memmap( + fname, dtype='uint8', mode='r', offset=offset, order='C', + shape=(size // 2,), strides=(2,), total_buffer_len=size, + unlink_on_gc_collect=False + ) + assert _get_backing_memmap(memmap_backed_obj).offset == offset + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_pool_with_memmap(factory, tmpdir): + """Check that subprocess can access and update shared memory memmap""" + assert_array_equal = np.testing.assert_array_equal + + # Fork the subprocess before allocating the objects to be passed + pool_temp_folder = tmpdir.mkdir('pool').strpath + p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder) + try: + filename = tmpdir.join('test.mmap').strpath + a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+') + a.fill(1.0) + + p.map(inplace_double, [(a, (i, j), 1.0) + for i in range(a.shape[0]) + for j in range(a.shape[1])]) + + assert_array_equal(a, 2 * np.ones(a.shape)) + + # Open a copy-on-write view on the previous data + b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c') + + p.map(inplace_double, [(b, (i, j), 2.0) + for i in range(b.shape[0]) + for j in range(b.shape[1])]) + + # Passing memmap instances to the pool should not trigger the creation + # of new files on the FS + assert os.listdir(pool_temp_folder) == [] + + # the original data is untouched + assert_array_equal(a, 2 * np.ones(a.shape)) + assert_array_equal(b, 2 * np.ones(b.shape)) + + # readonly maps can be read but not updated + c = np.memmap(filename, dtype=np.float32, shape=(10,), mode='r', + offset=5 * 4) + + with raises(AssertionError): + p.map(check_array, [(c, i, 3.0) for i in range(c.shape[0])]) + + # depending on the version of numpy one can either get a RuntimeError + # or a ValueError + with raises((RuntimeError, ValueError)): + p.map(inplace_double, [(c, i, 2.0) for i in range(c.shape[0])]) + finally: + # Clean all filehandlers held by the pool + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_pool_with_memmap_array_view(factory, tmpdir): + """Check that subprocess can access and update shared memory array""" + assert_array_equal = np.testing.assert_array_equal + + # Fork the subprocess before allocating the objects to be passed + pool_temp_folder = tmpdir.mkdir('pool').strpath + p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder) + try: + + filename = tmpdir.join('test.mmap').strpath + a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+') + a.fill(1.0) + + # Create an ndarray view on the memmap instance + a_view = np.asarray(a) + assert not isinstance(a_view, np.memmap) + assert has_shareable_memory(a_view) + + p.map(inplace_double, [(a_view, (i, j), 1.0) + for i in range(a.shape[0]) + for j in range(a.shape[1])]) + + # Both a and the a_view have been updated + assert_array_equal(a, 2 * np.ones(a.shape)) + assert_array_equal(a_view, 2 * np.ones(a.shape)) + + # Passing memmap array view to the pool should not trigger the + # creation of new files on the FS + assert os.listdir(pool_temp_folder) == [] + + finally: + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_permission_error_windows_reference_cycle(backend): + # Non regression test for: + # https://github.com/joblib/joblib/issues/806 + # + # The issue happens when trying to delete a memory mapped file that has + # not yet been closed by one of the worker processes. + cmd = """if 1: + import numpy as np + from joblib import Parallel, delayed + + + data = np.random.rand(int(2e6)).reshape((int(1e6), 2)) + + # Build a complex cyclic reference that is likely to delay garbage + # collection of the memmapped array in the worker processes. + first_list = current_list = [data] + for i in range(10): + current_list = [current_list] + first_list.append(current_list) + + if __name__ == "__main__": + results = Parallel(n_jobs=2, backend="{b}")( + delayed(len)(current_list) for i in range(10)) + assert results == [1] * 10 + """.format(b=backend) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, out.decode() + "\n\n" + err.decode() + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_permission_error_windows_memmap_sent_to_parent(backend): + # Second non-regression test for: + # https://github.com/joblib/joblib/issues/806 + # previously, child process would not convert temporary memmaps to numpy + # arrays when sending the data back to the parent process. This would lead + # to permission errors on windows when deleting joblib's temporary folder, + # as the memmaped files handles would still opened in the parent process. + cmd = '''if 1: + import os + import time + + import numpy as np + + from joblib import Parallel, delayed + from testutils import return_slice_of_data + + data = np.ones(int(2e6)) + + if __name__ == '__main__': + # warm-up call to launch the workers and start the resource_tracker + _ = Parallel(n_jobs=2, verbose=5, backend='{b}')( + delayed(id)(i) for i in range(20)) + + time.sleep(0.5) + + slice_of_data = Parallel(n_jobs=2, verbose=5, backend='{b}')( + delayed(return_slice_of_data)(data, 0, 20) for _ in range(10)) + '''.format(b=backend) + + for _ in range(3): + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen([sys.executable, '-c', cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, env=env) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err + assert out == b'' + if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]: + # In early versions of Python 3.8, a reference leak + # https://github.com/cloudpipe/cloudpickle/issues/327, holds + # references to pickled objects, generating race condition during + # cleanup finalizers of joblib and noisy resource_tracker outputs. + assert b'resource_tracker' not in err + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_parallel_isolated_temp_folders(backend): + # Test that consecutive Parallel call use isolated subfolders, even + # for the loky backend that reuses its executor instance across calls. + array = np.arange(int(1e2)) + [filename_1] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + [filename_2] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + assert os.path.dirname(filename_2) != os.path.dirname(filename_1) + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_managed_backend_reuse_temp_folder(backend): + # Test that calls to a managed parallel object reuse the same memmaps. + array = np.arange(int(1e2)) + with Parallel(n_jobs=2, backend=backend, max_nbytes=10) as p: + [filename_1] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + [filename_2] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + assert os.path.dirname(filename_2) == os.path.dirname(filename_1) + + +@with_numpy +@with_multiprocessing +def test_memmapping_temp_folder_thread_safety(): + # Concurrent calls to Parallel with the loky backend will use the same + # executor, and thus the same reducers. Make sure that those reducers use + # different temporary folders depending on which Parallel objects called + # them, which is necessary to limit potential race conditions during the + # garbage collection of temporary memmaps. + array = np.arange(int(1e2)) + + temp_dirs_thread_1 = set() + temp_dirs_thread_2 = set() + + def concurrent_get_filename(array, temp_dirs): + with Parallel(backend='loky', n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(getattr)(array, 'filename') for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + t1 = threading.Thread( + target=concurrent_get_filename, args=(array, temp_dirs_thread_1) + ) + t2 = threading.Thread( + target=concurrent_get_filename, args=(array, temp_dirs_thread_2) + ) + + t1.start() + t2.start() + + t1.join() + t2.join() + + assert len(temp_dirs_thread_1) == 1 + assert len(temp_dirs_thread_2) == 1 + + assert temp_dirs_thread_1 != temp_dirs_thread_2 + + +@with_numpy +@with_multiprocessing +def test_multithreaded_parallel_termination_resource_tracker_silent(): + # test that concurrent termination attempts of a same executor does not + # emit any spurious error from the resource_tracker. We test various + # situations making 0, 1 or both parallel call sending a task that will + # make the worker (and thus the whole Parallel call) error out. + cmd = '''if 1: + import os + import numpy as np + from joblib import Parallel, delayed + from joblib.externals.loky.backend import resource_tracker + from concurrent.futures import ThreadPoolExecutor, wait + + resource_tracker.VERBOSE = 0 + + array = np.arange(int(1e2)) + + temp_dirs_thread_1 = set() + temp_dirs_thread_2 = set() + + + def raise_error(array): + raise ValueError + + + def parallel_get_filename(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(getattr)(array, "filename") for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + + def parallel_raise(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(raise_error)(array) for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + + executor = ThreadPoolExecutor(max_workers=2) + + # both function calls will use the same loky executor, but with a + # different Parallel object. + future_1 = executor.submit({f1}, array, temp_dirs_thread_1) + future_2 = executor.submit({f2}, array, temp_dirs_thread_2) + + # Wait for both threads to terminate their backend + wait([future_1, future_2]) + + future_1.result() + future_2.result() + ''' + functions_and_returncodes = [ + ("parallel_get_filename", "parallel_get_filename", 0), + ("parallel_get_filename", "parallel_raise", 1), + ("parallel_raise", "parallel_raise", 1) + ] + + for f1, f2, returncode in functions_and_returncodes: + p = subprocess.Popen([sys.executable, '-c', cmd.format(f1=f1, f2=f2)], + stderr=subprocess.PIPE, stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + assert p.returncode == returncode, out.decode() + assert b"resource_tracker" not in err, err.decode() + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_many_parallel_calls_on_same_object(backend): + # After #966 got merged, consecutive Parallel objects were sharing temp + # folder, which would lead to race conditions happening during the + # temporary resources management with the resource_tracker. This is a + # non-regression test that makes sure that consecutive Parallel operations + # on the same object do not error out. + cmd = '''if 1: + import os + import time + + import numpy as np + + from joblib import Parallel, delayed + from testutils import return_slice_of_data + + data = np.ones(100) + + if __name__ == '__main__': + for i in range(5): + slice_of_data = Parallel( + n_jobs=2, max_nbytes=1, backend='{b}')( + delayed(return_slice_of_data)(data, 0, 20) + for _ in range(10) + ) + '''.format(b=backend) + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen( + [sys.executable, '-c', cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + env=env, + ) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err + assert out == b'' + if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]: + # In early versions of Python 3.8, a reference leak + # https://github.com/cloudpipe/cloudpickle/issues/327, holds + # references to pickled objects, generating race condition during + # cleanup finalizers of joblib and noisy resource_tracker outputs. + assert b'resource_tracker' not in err + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_memmap_returned_as_regular_array(backend): + data = np.ones(int(1e3)) + # Check that child processes send temporary memmaps back as numpy arrays. + [result] = Parallel(n_jobs=2, backend=backend, max_nbytes=100)( + delayed(check_memmap_and_send_back)(data) for _ in range(1)) + assert _get_backing_memmap(result) is None + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_resource_tracker_silent_when_reference_cycles(backend): + # There is a variety of reasons that can make joblib with loky backend + # output noisy warnings when a reference cycle is preventing a memmap from + # being garbage collected. Especially, joblib's main process finalizer + # deletes the temporary folder if it was not done before, which can + # interact badly with the resource_tracker. We don't risk leaking any + # resources, but this will likely make joblib output a lot of low-level + # confusing messages. + # + # This test makes sure that the resource_tracker is silent when a reference + # has been collected concurrently on non-Windows platforms. + # + # Note that the script in ``cmd`` is the exact same script as in + # test_permission_error_windows_reference_cycle. + if backend == "loky" and sys.platform.startswith('win'): + # XXX: on Windows, reference cycles can delay timely garbage collection + # and make it impossible to properly delete the temporary folder in the + # main process because of permission errors. + pytest.xfail( + "The temporary folder cannot be deleted on Windows in the " + "presence of a reference cycle" + ) + + cmd = """if 1: + import numpy as np + from joblib import Parallel, delayed + + + data = np.random.rand(int(2e6)).reshape((int(1e6), 2)) + + # Build a complex cyclic reference that is likely to delay garbage + # collection of the memmapped array in the worker processes. + first_list = current_list = [data] + for i in range(10): + current_list = [current_list] + first_list.append(current_list) + + if __name__ == "__main__": + results = Parallel(n_jobs=2, backend="{b}")( + delayed(len)(current_list) for i in range(10)) + assert results == [1] * 10 + """.format(b=backend) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + p.wait() + out, err = p.communicate() + out = out.decode() + err = err.decode() + assert p.returncode == 0, out + "\n\n" + err + assert "resource_tracker" not in err, err + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays(factory, tmpdir): + """Check that large arrays are not copied in memory""" + + # Check that the tempfolder is empty + assert os.listdir(tmpdir.strpath) == [] + + # Build an array reducers that automatically dump large array content + # to filesystem backed memmap instances to avoid memory explosion + p = factory(3, max_nbytes=40, temp_folder=tmpdir.strpath, verbose=2) + try: + # The temporary folder for the pool is not provisioned in advance + assert os.listdir(tmpdir.strpath) == [] + assert not os.path.exists(p._temp_folder) + + small = np.ones(5, dtype=np.float32) + assert small.nbytes == 20 + p.map(check_array, [(small, i, 1.0) for i in range(small.shape[0])]) + + # Memory has been copied, the pool filesystem folder is unused + assert os.listdir(tmpdir.strpath) == [] + + # Try with a file larger than the memmap threshold of 40 bytes + large = np.ones(100, dtype=np.float64) + assert large.nbytes == 800 + p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])]) + + # The data has been dumped in a temp folder for subprocess to share it + # without per-child memory copies + assert os.path.isdir(p._temp_folder) + dumped_filenames = os.listdir(p._temp_folder) + assert len(dumped_filenames) == 1 + + # Check that memory mapping is not triggered for arrays with + # dtype='object' + objects = np.array(['abc'] * 100, dtype='object') + results = p.map(has_shareable_memory, [objects]) + assert not results[0] + + finally: + # check FS garbage upon pool termination + p.terminate() + for i in range(10): + sleep(.1) + if not os.path.exists(p._temp_folder): + break + else: # pragma: no cover + raise AssertionError( + 'temporary folder {} was not deleted'.format(p._temp_folder) + ) + del p + + +@with_numpy +@with_multiprocessing +@parametrize( + "backend", + [ + pytest.param( + "multiprocessing", + marks=pytest.mark.xfail( + reason='https://github.com/joblib/joblib/issues/1086' + ), + ), + "loky", + ] +) +def test_child_raises_parent_exits_cleanly(backend): + # When a task executed by a child process raises an error, the parent + # process's backend is notified, and calls abort_everything. + # In loky, abort_everything itself calls shutdown(kill_workers=True) which + # sends SIGKILL to the worker, preventing it from running the finalizers + # supposed to signal the resource_tracker when the worker is done using + # objects relying on a shared resource (e.g np.memmaps). Because this + # behavior is prone to : + # - cause a resource leak + # - make the resource tracker emit noisy resource warnings + # we explicitly test that, when the said situation occurs: + # - no resources are actually leaked + # - the temporary resources are deleted as soon as possible (typically, at + # the end of the failing Parallel call) + # - the resource_tracker does not emit any warnings. + cmd = """if 1: + import os + from pathlib import Path + from time import sleep + + import numpy as np + from joblib import Parallel, delayed + from testutils import print_filename_and_raise + + data = np.random.rand(1000) + + def get_temp_folder(parallel_obj, backend): + if "{b}" == "loky": + return Path(parallel_obj._backend._workers._temp_folder) + else: + return Path(parallel_obj._backend._pool._temp_folder) + + + if __name__ == "__main__": + try: + with Parallel(n_jobs=2, backend="{b}", max_nbytes=100) as p: + temp_folder = get_temp_folder(p, "{b}") + p(delayed(print_filename_and_raise)(data) + for i in range(1)) + except ValueError as e: + # the temporary folder should be deleted by the end of this + # call but apparently on some file systems, this takes + # some time to be visible. + # + # We attempt to write into the temporary folder to test for + # its existence and we wait for a maximum of 10 seconds. + for i in range(100): + try: + with open(temp_folder / "some_file.txt", "w") as f: + f.write("some content") + except FileNotFoundError: + # temp_folder has been deleted, all is fine + break + + # ... else, wait a bit and try again + sleep(.1) + else: + raise AssertionError( + str(temp_folder) + " was not deleted" + ) from e + """.format(b=backend) + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(__file__) + p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE, + stdout=subprocess.PIPE, env=env) + p.wait() + out, err = p.communicate() + out, err = out.decode(), err.decode() + filename = out.split('\n')[0] + assert p.returncode == 0, err or out + assert err == '' # no resource_tracker warnings. + assert not os.path.exists(filename) + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays_disabled(factory, tmpdir): + """Check that large arrays memmapping can be disabled""" + # Set max_nbytes to None to disable the auto memmapping feature + p = factory(3, max_nbytes=None, temp_folder=tmpdir.strpath) + try: + + # Check that the tempfolder is empty + assert os.listdir(tmpdir.strpath) == [] + + # Try with a file largish than the memmap threshold of 40 bytes + large = np.ones(100, dtype=np.float64) + assert large.nbytes == 800 + p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])]) + + # Check that the tempfolder is still empty + assert os.listdir(tmpdir.strpath) == [] + + finally: + # Cleanup open file descriptors + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@with_dev_shm +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_on_large_enough_dev_shm(factory): + """Check that memmapping uses /dev/shm when possible""" + orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE + try: + # Make joblib believe that it can use /dev/shm even when running on a + # CI container where the size of the /dev/shm is not very large (that + # is at least 32 MB instead of 2 GB by default). + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(32e6) + p = factory(3, max_nbytes=10) + try: + # Check that the pool has correctly detected the presence of the + # shared memory filesystem. + pool_temp_folder = p._temp_folder + folder_prefix = '/dev/shm/joblib_memmapping_folder_' + assert pool_temp_folder.startswith(folder_prefix) + assert os.path.exists(pool_temp_folder) + + # Try with a file larger than the memmap threshold of 10 bytes + a = np.ones(100, dtype=np.float64) + assert a.nbytes == 800 + p.map(id, [a] * 10) + # a should have been memmapped to the pool temp folder: the joblib + # pickling procedure generate one .pkl file: + assert len(os.listdir(pool_temp_folder)) == 1 + + # create a new array with content that is different from 'a' so + # that it is mapped to a different file in the temporary folder of + # the pool. + b = np.ones(100, dtype=np.float64) * 2 + assert b.nbytes == 800 + p.map(id, [b] * 10) + # A copy of both a and b are now stored in the shared memory folder + assert len(os.listdir(pool_temp_folder)) == 2 + finally: + # Cleanup open file descriptors + p.terminate() + del p + + for i in range(100): + # The temp folder is cleaned up upon pool termination + if not os.path.exists(pool_temp_folder): + break + sleep(.1) + else: # pragma: no cover + raise AssertionError('temporary folder of pool was not deleted') + finally: + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size + + +@with_numpy +@with_multiprocessing +@with_dev_shm +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_on_too_small_dev_shm(factory): + orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE + try: + # Make joblib believe that it cannot use /dev/shm unless there is + # 42 exabytes of available shared memory in /dev/shm + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(42e18) + + p = factory(3, max_nbytes=10) + try: + # Check that the pool has correctly detected the presence of the + # shared memory filesystem. + pool_temp_folder = p._temp_folder + assert not pool_temp_folder.startswith('/dev/shm') + finally: + # Cleanup open file descriptors + p.terminate() + del p + + # The temp folder is cleaned up upon pool termination + assert not os.path.exists(pool_temp_folder) + finally: + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_memmapping_pool_for_large_arrays_in_return(factory, tmpdir): + """Check that large arrays are not copied in memory in return""" + assert_array_equal = np.testing.assert_array_equal + + # Build an array reducers that automatically dump large array content + # but check that the returned datastructure are regular arrays to avoid + # passing a memmap array pointing to a pool controlled temp folder that + # might be confusing to the user + + # The MemmappingPool user can always return numpy.memmap object explicitly + # to avoid memory copy + p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath) + try: + res = p.apply_async(np.ones, args=(1000,)) + large = res.get() + assert not has_shareable_memory(large) + assert_array_equal(large, np.ones(1000)) + finally: + p.terminate() + del p + + +def _worker_multiply(a, n_times): + """Multiplication function to be executed by subprocess""" + assert has_shareable_memory(a) + return a * n_times + + +@with_numpy +@with_multiprocessing +@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"]) +def test_workaround_against_bad_memmap_with_copied_buffers(factory, tmpdir): + """Check that memmaps with a bad buffer are returned as regular arrays + + Unary operations and ufuncs on memmap instances return a new memmap + instance with an in-memory buffer (probably a numpy bug). + """ + assert_array_equal = np.testing.assert_array_equal + + p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath) + try: + # Send a complex, large-ish view on a array that will be converted to + # a memmap in the worker process + a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), + order='F')[:, :1, :] + + # Call a non-inplace multiply operation on the worker and memmap and + # send it back to the parent. + b = p.apply_async(_worker_multiply, args=(a, 3)).get() + assert not has_shareable_memory(b) + assert_array_equal(b, 3 * a) + finally: + p.terminate() + del p + + +def identity(arg): + return arg + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory,retry_no", + list(itertools.product( + [MemmappingPool, TestExecutor.get_memmapping_executor], range(3))), + ids=['{}, {}'.format(x, y) for x, y in itertools.product( + ["multiprocessing", "loky"], map(str, range(3)))]) +def test_pool_memmap_with_big_offset(factory, retry_no, tmpdir): + # Test that numpy memmap offset is set correctly if greater than + # mmap.ALLOCATIONGRANULARITY, see + # https://github.com/joblib/joblib/issues/451 and + # https://github.com/numpy/numpy/pull/8443 for more details. + fname = tmpdir.join('test.mmap').strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + obj = make_memmap(fname, mode='w+', shape=size, dtype='uint8', + offset=offset) + + p = factory(2, temp_folder=tmpdir.strpath) + result = p.apply_async(identity, args=(obj,)).get() + assert isinstance(result, np.memmap) + assert result.offset == offset + np.testing.assert_array_equal(obj, result) + p.terminate() + + +def test_pool_get_temp_dir(tmpdir): + pool_folder_name = 'test.tmpdir' + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, tmpdir.strpath) + assert shared_mem is False + assert pool_folder == tmpdir.join('test.tmpdir').strpath + + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None) + if sys.platform.startswith('win'): + assert shared_mem is False + assert pool_folder.endswith(pool_folder_name) + + +def test_pool_get_temp_dir_no_statvfs(tmpdir, monkeypatch): + """Check that _get_temp_dir works when os.statvfs is not defined + + Regression test for #902 + """ + pool_folder_name = 'test.tmpdir' + import joblib._memmapping_reducer + if hasattr(joblib._memmapping_reducer.os, 'statvfs'): + # We are on Unix, since Windows doesn't have this function + monkeypatch.delattr(joblib._memmapping_reducer.os, 'statvfs') + + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None) + if sys.platform.startswith('win'): + assert shared_mem is False + assert pool_folder.endswith(pool_folder_name) + + +@with_numpy +@skipif(sys.platform == 'win32', reason='This test fails with a ' + 'PermissionError on Windows') +@parametrize("mmap_mode", ["r+", "w+"]) +def test_numpy_arrays_use_different_memory(mmap_mode): + def func(arr, value): + arr[:] = value + return arr + + arrays = [np.zeros((10, 10), dtype='float64') for i in range(10)] + + results = Parallel(mmap_mode=mmap_mode, max_nbytes=0, n_jobs=2)( + delayed(func)(arr, i) for i, arr in enumerate(arrays)) + + for i, arr in enumerate(results): + np.testing.assert_array_equal(arr, i) + + +@with_numpy +def test_weak_array_key_map(): + + def assert_empty_after_gc_collect(container, retries=100): + for i in range(retries): + if len(container) == 0: + return + gc.collect() + sleep(.1) + assert len(container) == 0 + + a = np.ones(42) + m = _WeakArrayKeyMap() + m.set(a, 'a') + assert m.get(a) == 'a' + + b = a + assert m.get(b) == 'a' + m.set(b, 'b') + assert m.get(a) == 'b' + + del a + gc.collect() + assert len(m._data) == 1 + assert m.get(b) == 'b' + + del b + assert_empty_after_gc_collect(m._data) + + c = np.ones(42) + m.set(c, 'c') + assert len(m._data) == 1 + assert m.get(c) == 'c' + + with raises(KeyError): + m.get(np.ones(42)) + + del c + assert_empty_after_gc_collect(m._data) + + # Check that creating and dropping numpy arrays with potentially the same + # object id will not cause the map to get confused. + def get_set_get_collect(m, i): + a = np.ones(42) + with raises(KeyError): + m.get(a) + m.set(a, i) + assert m.get(a) == i + return id(a) + + unique_ids = set([get_set_get_collect(m, i) for i in range(1000)]) + if platform.python_implementation() == 'CPython': + # On CPython (at least) the same id is often reused many times for the + # temporary arrays created under the local scope of the + # get_set_get_collect function without causing any spurious lookups / + # insertions in the map. Apparently on Python nogil, the id is not + # reused as often. + max_len_unique_ids = 400 if getattr(sys.flags, 'nogil', False) else 100 + assert len(unique_ids) < max_len_unique_ids + + +def test_weak_array_key_map_no_pickling(): + m = _WeakArrayKeyMap() + with raises(pickle.PicklingError): + pickle.dumps(m) + + +@with_numpy +@with_multiprocessing +def test_direct_mmap(tmpdir): + testfile = str(tmpdir.join('arr.dat')) + a = np.arange(10, dtype='uint8') + a.tofile(testfile) + + def _read_array(): + with open(testfile) as fd: + mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ, offset=0) + return np.ndarray((10,), dtype=np.uint8, buffer=mm, offset=0) + + def func(x): + return x**2 + + arr = _read_array() + + # this is expected to work and gives the reference + ref = Parallel(n_jobs=2)(delayed(func)(x) for x in [a]) + + # now test that it work with the mmap array + results = Parallel(n_jobs=2)(delayed(func)(x) for x in [arr]) + np.testing.assert_array_equal(results, ref) + + # also test with a mmap array read in the subprocess + def worker(): + return _read_array() + + results = Parallel(n_jobs=2)(delayed(worker)() for _ in range(1)) + np.testing.assert_array_equal(results[0], arr) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memory.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..f360e2b2619d1883c4475cc88f35251cbcb9d1a7 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memory.py @@ -0,0 +1,1526 @@ +""" +Test the memory module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import functools +import gc +import logging +import shutil +import os +import os.path +import pathlib +import pickle +import sys +import time +import datetime +import textwrap + +import pytest + +from joblib.memory import Memory +from joblib.memory import expires_after +from joblib.memory import MemorizedFunc, NotMemorizedFunc +from joblib.memory import MemorizedResult, NotMemorizedResult +from joblib.memory import _FUNCTION_HASHES +from joblib.memory import register_store_backend, _STORE_BACKENDS +from joblib.memory import _build_func_identifier, _store_backend_factory +from joblib.memory import JobLibCollisionWarning +from joblib.parallel import Parallel, delayed +from joblib._store_backends import StoreBackendBase, FileSystemStoreBackend +from joblib.test.common import with_numpy, np +from joblib.test.common import with_multiprocessing +from joblib.testing import parametrize, raises, warns +from joblib.hashing import hash + + +############################################################################### +# Module-level variables for the tests +def f(x, y=1): + """ A module-level function for testing purposes. + """ + return x ** 2 + y + + +############################################################################### +# Helper function for the tests +def check_identity_lazy(func, accumulator, location): + """ Given a function and an accumulator (a list that grows every + time the function is called), check that the function can be + decorated by memory to be a lazy identity. + """ + # Call each function with several arguments, and check that it is + # evaluated only once per argument. + memory = Memory(location=location, verbose=0) + func = memory.cache(func) + for i in range(3): + for _ in range(2): + assert func(i) == i + assert len(accumulator) == i + 1 + + +def corrupt_single_cache_item(memory): + single_cache_item, = memory.store_backend.get_items() + output_filename = os.path.join(single_cache_item.path, 'output.pkl') + with open(output_filename, 'w') as f: + f.write('garbage') + + +def monkeypatch_cached_func_warn(func, monkeypatch_fixture): + # Need monkeypatch because pytest does not + # capture stdlib logging output (see + # https://github.com/pytest-dev/pytest/issues/2079) + + recorded = [] + + def append_to_record(item): + recorded.append(item) + monkeypatch_fixture.setattr(func, 'warn', append_to_record) + return recorded + + +############################################################################### +# Tests +def test_memory_integration(tmpdir): + """ Simple test of memory lazy evaluation. + """ + accumulator = list() + + # Rmk: this function has the same name than a module-level function, + # thus it serves as a test to see that both are identified + # as different. + def f(arg): + accumulator.append(1) + return arg + + check_identity_lazy(f, accumulator, tmpdir.strpath) + + # Now test clearing + for compress in (False, True): + for mmap_mode in ('r', None): + memory = Memory(location=tmpdir.strpath, verbose=10, + mmap_mode=mmap_mode, compress=compress) + # First clear the cache directory, to check that our code can + # handle that + # NOTE: this line would raise an exception, as the database file is + # still open; we ignore the error since we want to test what + # happens if the directory disappears + shutil.rmtree(tmpdir.strpath, ignore_errors=True) + g = memory.cache(f) + g(1) + g.clear(warn=False) + current_accumulator = len(accumulator) + out = g(1) + + assert len(accumulator) == current_accumulator + 1 + # Also, check that Memory.eval works similarly + assert memory.eval(f, 1) == out + assert len(accumulator) == current_accumulator + 1 + + # Now do a smoke test with a function defined in __main__, as the name + # mangling rules are more complex + f.__module__ = '__main__' + memory = Memory(location=tmpdir.strpath, verbose=0) + memory.cache(f)(1) + + +@parametrize("call_before_reducing", [True, False]) +def test_parallel_call_cached_function_defined_in_jupyter( + tmpdir, call_before_reducing +): + # Calling an interactively defined memory.cache()'d function inside a + # Parallel call used to clear the existing cache related to the said + # function (https://github.com/joblib/joblib/issues/1035) + + # This tests checks that this is no longer the case. + + # TODO: test that the cache related to the function cache persists across + # ipython sessions (provided that no code change were made to the + # function's source)? + + # The first part of the test makes the necessary low-level calls to emulate + # the definition of a function in an jupyter notebook cell. Joblib has + # some custom code to treat functions defined specifically in jupyter + # notebooks/ipython session -- we want to test this code, which requires + # the emulation to be rigorous. + for session_no in [0, 1]: + ipython_cell_source = ''' + def f(x): + return x + ''' + + ipython_cell_id = ''.format(session_no) + + exec( + compile( + textwrap.dedent(ipython_cell_source), + filename=ipython_cell_id, + mode='exec' + ) + ) + # f is now accessible in the locals mapping - but for some unknown + # reason, f = locals()['f'] throws a KeyError at runtime, we need to + # bind locals()['f'] to a different name in the local namespace + aliased_f = locals()['f'] + aliased_f.__module__ = "__main__" + + # Preliminary sanity checks, and tests checking that joblib properly + # identified f as an interactive function defined in a jupyter notebook + assert aliased_f(1) == 1 + assert aliased_f.__code__.co_filename == ipython_cell_id + + memory = Memory(location=tmpdir.strpath, verbose=0) + cached_f = memory.cache(aliased_f) + + assert len(os.listdir(tmpdir / 'joblib')) == 1 + f_cache_relative_directory = os.listdir(tmpdir / 'joblib')[0] + assert 'ipython-input' in f_cache_relative_directory + + f_cache_directory = tmpdir / 'joblib' / f_cache_relative_directory + + if session_no == 0: + # The cache should be empty as cached_f has not been called yet. + assert os.listdir(f_cache_directory) == ['f'] + assert os.listdir(f_cache_directory / 'f') == [] + + if call_before_reducing: + cached_f(3) + # Two files were just created, func_code.py, and a folder + # containing the information (inputs hash/ouptput) of + # cached_f(3) + assert len(os.listdir(f_cache_directory / 'f')) == 2 + + # Now, testing #1035: when calling a cached function, joblib + # used to dynamically inspect the underlying function to + # extract its source code (to verify it matches the source code + # of the function as last inspected by joblib) -- however, + # source code introspection fails for dynamic functions sent to + # child processes - which would eventually make joblib clear + # the cache associated to f + res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2]) + else: + # Submit the function to the joblib child processes, although + # the function has never been called in the parent yet. This + # triggers a specific code branch inside + # MemorizedFunc.__reduce__. + res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2]) + assert len(os.listdir(f_cache_directory / 'f')) == 3 + + cached_f(3) + + # Making sure f's cache does not get cleared after the parallel + # calls, and contains ALL cached functions calls (f(1), f(2), f(3)) + # and 'func_code.py' + assert len(os.listdir(f_cache_directory / 'f')) == 4 + else: + # For the second session, there should be an already existing cache + assert len(os.listdir(f_cache_directory / 'f')) == 4 + + cached_f(3) + + # The previous cache should not be invalidated after calling the + # function in a new session + assert len(os.listdir(f_cache_directory / 'f')) == 4 + + +def test_no_memory(): + """ Test memory with location=None: no memoize """ + accumulator = list() + + def ff(arg): + accumulator.append(1) + return arg + + memory = Memory(location=None, verbose=0) + gg = memory.cache(ff) + for _ in range(4): + current_accumulator = len(accumulator) + gg(1) + assert len(accumulator) == current_accumulator + 1 + + +def test_memory_kwarg(tmpdir): + " Test memory with a function with keyword arguments." + accumulator = list() + + def g(arg1=None, arg2=1): + accumulator.append(1) + return arg1 + + check_identity_lazy(g, accumulator, tmpdir.strpath) + + memory = Memory(location=tmpdir.strpath, verbose=0) + g = memory.cache(g) + # Smoke test with an explicit keyword argument: + assert g(arg1=30, arg2=2) == 30 + + +def test_memory_lambda(tmpdir): + " Test memory with a function with a lambda." + accumulator = list() + + def helper(x): + """ A helper function to define l as a lambda. + """ + accumulator.append(1) + return x + + check_identity_lazy(lambda x: helper(x), accumulator, tmpdir.strpath) + + +def test_memory_name_collision(tmpdir): + " Check that name collisions with functions will raise warnings" + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def name_collision(x): + """ A first function called name_collision + """ + return x + + a = name_collision + + @memory.cache + def name_collision(x): + """ A second function called name_collision + """ + return x + + b = name_collision + + with warns(JobLibCollisionWarning) as warninfo: + a(1) + b(1) + + assert len(warninfo) == 1 + assert "collision" in str(warninfo[0].message) + + +def test_memory_warning_lambda_collisions(tmpdir): + # Check that multiple use of lambda will raise collisions + memory = Memory(location=tmpdir.strpath, verbose=0) + a = memory.cache(lambda x: x) + b = memory.cache(lambda x: x + 1) + + with warns(JobLibCollisionWarning) as warninfo: + assert a(0) == 0 + assert b(1) == 2 + assert a(1) == 1 + + # In recent Python versions, we can retrieve the code of lambdas, + # thus nothing is raised + assert len(warninfo) == 4 + + +def test_memory_warning_collision_detection(tmpdir): + # Check that collisions impossible to detect will raise appropriate + # warnings. + memory = Memory(location=tmpdir.strpath, verbose=0) + a1 = eval('lambda x: x') + a1 = memory.cache(a1) + b1 = eval('lambda x: x+1') + b1 = memory.cache(b1) + + with warns(JobLibCollisionWarning) as warninfo: + a1(1) + b1(1) + a1(0) + + assert len(warninfo) == 2 + assert "cannot detect" in str(warninfo[0].message).lower() + + +def test_memory_partial(tmpdir): + " Test memory with functools.partial." + accumulator = list() + + def func(x, y): + """ A helper function to define l as a lambda. + """ + accumulator.append(1) + return y + + import functools + function = functools.partial(func, 1) + + check_identity_lazy(function, accumulator, tmpdir.strpath) + + +def test_memory_eval(tmpdir): + " Smoke test memory with a function with a function defined in an eval." + memory = Memory(location=tmpdir.strpath, verbose=0) + + m = eval('lambda x: x') + mm = memory.cache(m) + + assert mm(1) == 1 + + +def count_and_append(x=[]): + """ A function with a side effect in its arguments. + + Return the length of its argument and append one element. + """ + len_x = len(x) + x.append(None) + return len_x + + +def test_argument_change(tmpdir): + """ Check that if a function has a side effect in its arguments, it + should use the hash of changing arguments. + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + func = memory.cache(count_and_append) + # call the function for the first time, is should cache it with + # argument x=[] + assert func() == 0 + # the second time the argument is x=[None], which is not cached + # yet, so the functions should be called a second time + assert func() == 1 + + +@with_numpy +@parametrize('mmap_mode', [None, 'r']) +def test_memory_numpy(tmpdir, mmap_mode): + " Test memory with a function with numpy arrays." + accumulator = list() + + def n(arg=None): + accumulator.append(1) + return arg + + memory = Memory(location=tmpdir.strpath, mmap_mode=mmap_mode, + verbose=0) + cached_n = memory.cache(n) + + rnd = np.random.RandomState(0) + for i in range(3): + a = rnd.random_sample((10, 10)) + for _ in range(3): + assert np.all(cached_n(a) == a) + assert len(accumulator) == i + 1 + + +@with_numpy +def test_memory_numpy_check_mmap_mode(tmpdir, monkeypatch): + """Check that mmap_mode is respected even at the first call""" + + memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0) + + @memory.cache() + def twice(a): + return a * 2 + + a = np.ones(3) + + b = twice(a) + c = twice(a) + + assert isinstance(c, np.memmap) + assert c.mode == 'r' + + assert isinstance(b, np.memmap) + assert b.mode == 'r' + + # Corrupts the file, Deleting b and c mmaps + # is necessary to be able edit the file + del b + del c + gc.collect() + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch) + d = twice(a) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + # Asserts that the recomputation returns a mmap + assert isinstance(d, np.memmap) + assert d.mode == 'r' + + +def test_memory_exception(tmpdir): + """ Smoketest the exception handling of Memory. + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + + class MyException(Exception): + pass + + @memory.cache + def h(exc=0): + if exc: + raise MyException + + # Call once, to initialise the cache + h() + + for _ in range(3): + # Call 3 times, to be sure that the Exception is always raised + with raises(MyException): + h(1) + + +def test_memory_ignore(tmpdir): + " Test the ignore feature of memory " + memory = Memory(location=tmpdir.strpath, verbose=0) + accumulator = list() + + @memory.cache(ignore=['y']) + def z(x, y=1): + accumulator.append(1) + + assert z.ignore == ['y'] + + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=2) + assert len(accumulator) == 1 + + +def test_memory_ignore_decorated(tmpdir): + " Test the ignore feature of memory on a decorated function " + memory = Memory(location=tmpdir.strpath, verbose=0) + accumulator = list() + + def decorate(f): + @functools.wraps(f) + def wrapped(*args, **kwargs): + return f(*args, **kwargs) + return wrapped + + @memory.cache(ignore=['y']) + @decorate + def z(x, y=1): + accumulator.append(1) + + assert z.ignore == ['y'] + + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=2) + assert len(accumulator) == 1 + + +def test_memory_args_as_kwargs(tmpdir): + """Non-regression test against 0.12.0 changes. + + https://github.com/joblib/joblib/pull/751 + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def plus_one(a): + return a + 1 + + # It's possible to call a positional arg as a kwarg. + assert plus_one(1) == 2 + assert plus_one(a=1) == 2 + + # However, a positional argument that joblib hadn't seen + # before would cause a failure if it was passed as a kwarg. + assert plus_one(a=2) == 3 + + +@parametrize('ignore, verbose, mmap_mode', [(['x'], 100, 'r'), + ([], 10, None)]) +def test_partial_decoration(tmpdir, ignore, verbose, mmap_mode): + "Check cache may be called with kwargs before decorating" + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache(ignore=ignore, verbose=verbose, mmap_mode=mmap_mode) + def z(x): + pass + + assert z.ignore == ignore + assert z._verbose == verbose + assert z.mmap_mode == mmap_mode + + +def test_func_dir(tmpdir): + # Test the creation of the memory cache directory for the function. + memory = Memory(location=tmpdir.strpath, verbose=0) + path = __name__.split('.') + path.append('f') + path = tmpdir.join('joblib', *path).strpath + + g = memory.cache(f) + # Test that the function directory is created on demand + func_id = _build_func_identifier(f) + location = os.path.join(g.store_backend.location, func_id) + assert location == path + assert os.path.exists(path) + assert memory.location == os.path.dirname(g.store_backend.location) + + # Test that the code is stored. + # For the following test to be robust to previous execution, we clear + # the in-memory store + _FUNCTION_HASHES.clear() + assert not g._check_previous_func_code() + assert os.path.exists(os.path.join(path, 'func_code.py')) + assert g._check_previous_func_code() + + # Test the robustness to failure of loading previous results. + args_id = g._get_args_id(1) + output_dir = os.path.join(g.store_backend.location, g.func_id, args_id) + a = g(1) + assert os.path.exists(output_dir) + os.remove(os.path.join(output_dir, 'output.pkl')) + assert a == g(1) + + +def test_persistence(tmpdir): + # Test the memorized functions can be pickled and restored. + memory = Memory(location=tmpdir.strpath, verbose=0) + g = memory.cache(f) + output = g(1) + + h = pickle.loads(pickle.dumps(g)) + + args_id = h._get_args_id(1) + output_dir = os.path.join(h.store_backend.location, h.func_id, args_id) + assert os.path.exists(output_dir) + assert output == h.store_backend.load_item([h.func_id, args_id]) + memory2 = pickle.loads(pickle.dumps(memory)) + assert memory.store_backend.location == memory2.store_backend.location + + # Smoke test that pickling a memory with location=None works + memory = Memory(location=None, verbose=0) + pickle.loads(pickle.dumps(memory)) + g = memory.cache(f) + gp = pickle.loads(pickle.dumps(g)) + gp(1) + + +def test_check_call_in_cache(tmpdir): + for func in (MemorizedFunc(f, tmpdir.strpath), + Memory(location=tmpdir.strpath, verbose=0).cache(f)): + result = func.check_call_in_cache(2) + assert not result + assert isinstance(result, bool) + assert func(2) == 5 + result = func.check_call_in_cache(2) + assert result + assert isinstance(result, bool) + func.clear() + + +def test_call_and_shelve(tmpdir): + # Test MemorizedFunc outputting a reference to cache. + + for func, Result in zip((MemorizedFunc(f, tmpdir.strpath), + NotMemorizedFunc(f), + Memory(location=tmpdir.strpath, + verbose=0).cache(f), + Memory(location=None).cache(f), + ), + (MemorizedResult, NotMemorizedResult, + MemorizedResult, NotMemorizedResult)): + assert func(2) == 5 + result = func.call_and_shelve(2) + assert isinstance(result, Result) + assert result.get() == 5 + + result.clear() + with raises(KeyError): + result.get() + result.clear() # Do nothing if there is no cache. + + +def test_call_and_shelve_argument_hash(tmpdir): + # Verify that a warning is raised when accessing arguments_hash + # attribute from MemorizedResult + func = Memory(location=tmpdir.strpath, verbose=0).cache(f) + result = func.call_and_shelve(2) + assert isinstance(result, MemorizedResult) + with warns(DeprecationWarning) as w: + assert result.argument_hash == result.args_id + assert len(w) == 1 + assert "The 'argument_hash' attribute has been deprecated" \ + in str(w[-1].message) + + +def test_call_and_shelve_lazily_load_stored_result(tmpdir): + """Check call_and_shelve only load stored data if needed.""" + test_access_time_file = tmpdir.join('test_access') + test_access_time_file.write('test_access') + test_access_time = os.stat(test_access_time_file.strpath).st_atime + # check file system access time stats resolution is lower than test wait + # timings. + time.sleep(0.5) + assert test_access_time_file.read() == 'test_access' + + if test_access_time == os.stat(test_access_time_file.strpath).st_atime: + # Skip this test when access time cannot be retrieved with enough + # precision from the file system (e.g. NTFS on windows). + pytest.skip("filesystem does not support fine-grained access time " + "attribute") + + memory = Memory(location=tmpdir.strpath, verbose=0) + func = memory.cache(f) + args_id = func._get_args_id(2) + result_path = os.path.join(memory.store_backend.location, + func.func_id, args_id, 'output.pkl') + assert func(2) == 5 + first_access_time = os.stat(result_path).st_atime + time.sleep(1) + + # Should not access the stored data + result = func.call_and_shelve(2) + assert isinstance(result, MemorizedResult) + assert os.stat(result_path).st_atime == first_access_time + time.sleep(1) + + # Read the stored data => last access time is greater than first_access + assert result.get() == 5 + assert os.stat(result_path).st_atime > first_access_time + + +def test_memorized_pickling(tmpdir): + for func in (MemorizedFunc(f, tmpdir.strpath), NotMemorizedFunc(f)): + filename = tmpdir.join('pickling_test.dat').strpath + result = func.call_and_shelve(2) + with open(filename, 'wb') as fp: + pickle.dump(result, fp) + with open(filename, 'rb') as fp: + result2 = pickle.load(fp) + assert result2.get() == result.get() + os.remove(filename) + + +def test_memorized_repr(tmpdir): + func = MemorizedFunc(f, tmpdir.strpath) + result = func.call_and_shelve(2) + + func2 = MemorizedFunc(f, tmpdir.strpath) + result2 = func2.call_and_shelve(2) + assert result.get() == result2.get() + assert repr(func) == repr(func2) + + # Smoke test with NotMemorizedFunc + func = NotMemorizedFunc(f) + repr(func) + repr(func.call_and_shelve(2)) + + # Smoke test for message output (increase code coverage) + func = MemorizedFunc(f, tmpdir.strpath, verbose=11, timestamp=time.time()) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=11) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=5, timestamp=time.time()) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=5) + result = func.call_and_shelve(11) + result.get() + + +def test_memory_file_modification(capsys, tmpdir, monkeypatch): + # Test that modifying a Python file after loading it does not lead to + # Recomputation + dir_name = tmpdir.mkdir('tmp_import').strpath + filename = os.path.join(dir_name, 'tmp_joblib_.py') + content = 'def f(x):\n print(x)\n return x\n' + with open(filename, 'w') as module_file: + module_file.write(content) + + # Load the module: + monkeypatch.syspath_prepend(dir_name) + import tmp_joblib_ as tmp + + memory = Memory(location=tmpdir.strpath, verbose=0) + f = memory.cache(tmp.f) + # First call f a few times + f(1) + f(2) + f(1) + + # Now modify the module where f is stored without modifying f + with open(filename, 'w') as module_file: + module_file.write('\n\n' + content) + + # And call f a couple more times + f(1) + f(1) + + # Flush the .pyc files + shutil.rmtree(dir_name) + os.mkdir(dir_name) + # Now modify the module where f is stored, modifying f + content = 'def f(x):\n print("x=%s" % x)\n return x\n' + with open(filename, 'w') as module_file: + module_file.write(content) + + # And call f more times prior to reloading: the cache should not be + # invalidated at this point as the active function definition has not + # changed in memory yet. + f(1) + f(1) + + # Now reload + sys.stdout.write('Reloading\n') + sys.modules.pop('tmp_joblib_') + import tmp_joblib_ as tmp + f = memory.cache(tmp.f) + + # And call f more times + f(1) + f(1) + + out, err = capsys.readouterr() + assert out == '1\n2\nReloading\nx=1\n' + + +def _function_to_cache(a, b): + # Just a place holder function to be mutated by tests + pass + + +def _sum(a, b): + return a + b + + +def _product(a, b): + return a * b + + +def test_memory_in_memory_function_code_change(tmpdir): + _function_to_cache.__code__ = _sum.__code__ + + memory = Memory(location=tmpdir.strpath, verbose=0) + f = memory.cache(_function_to_cache) + + assert f(1, 2) == 3 + assert f(1, 2) == 3 + + with warns(JobLibCollisionWarning): + # Check that inline function modification triggers a cache invalidation + _function_to_cache.__code__ = _product.__code__ + assert f(1, 2) == 2 + assert f(1, 2) == 2 + + +def test_clear_memory_with_none_location(): + memory = Memory(location=None) + memory.clear() + + +def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): + return a, b, kw1, kw2 + + +def func_with_signature(a: int, b: float) -> float: + return a + b + + +def test_memory_func_with_kwonly_args(tmpdir): + memory = Memory(location=tmpdir.strpath, verbose=0) + func_cached = memory.cache(func_with_kwonly_args) + + assert func_cached(1, 2, kw1=3) == (1, 2, 3, 'kw2') + + # Making sure that providing a keyword-only argument by + # position raises an exception + with raises(ValueError) as excinfo: + func_cached(1, 2, 3, kw2=4) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + # Keyword-only parameter passed by position with cached call + # should still raise ValueError + func_cached(1, 2, kw1=3, kw2=4) + + with raises(ValueError) as excinfo: + func_cached(1, 2, 3, kw2=4) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional " + "parameter") + + # Test 'ignore' parameter + func_cached = memory.cache(func_with_kwonly_args, ignore=['kw2']) + assert func_cached(1, 2, kw1=3, kw2=4) == (1, 2, 3, 4) + assert func_cached(1, 2, kw1=3, kw2='ignored') == (1, 2, 3, 4) + + +def test_memory_func_with_signature(tmpdir): + memory = Memory(location=tmpdir.strpath, verbose=0) + func_cached = memory.cache(func_with_signature) + + assert func_cached(1, 2.) == 3. + + +def _setup_toy_cache(tmpdir, num_inputs=10): + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache() + def get_1000_bytes(arg): + return 'a' * 1000 + + inputs = list(range(num_inputs)) + for arg in inputs: + get_1000_bytes(arg) + + func_id = _build_func_identifier(get_1000_bytes) + hash_dirnames = [get_1000_bytes._get_args_id(arg) + for arg in inputs] + + full_hashdirs = [os.path.join(get_1000_bytes.store_backend.location, + func_id, dirname) + for dirname in hash_dirnames] + return memory, full_hashdirs, get_1000_bytes + + +def test__get_items(tmpdir): + memory, expected_hash_dirs, _ = _setup_toy_cache(tmpdir) + items = memory.store_backend.get_items() + hash_dirs = [ci.path for ci in items] + assert set(hash_dirs) == set(expected_hash_dirs) + + def get_files_size(directory): + full_paths = [os.path.join(directory, fn) + for fn in os.listdir(directory)] + return sum(os.path.getsize(fp) for fp in full_paths) + + expected_hash_cache_sizes = [get_files_size(hash_dir) + for hash_dir in hash_dirs] + hash_cache_sizes = [ci.size for ci in items] + assert hash_cache_sizes == expected_hash_cache_sizes + + output_filenames = [os.path.join(hash_dir, 'output.pkl') + for hash_dir in hash_dirs] + + expected_last_accesses = [ + datetime.datetime.fromtimestamp(os.path.getatime(fn)) + for fn in output_filenames] + last_accesses = [ci.last_access for ci in items] + assert last_accesses == expected_last_accesses + + +def test__get_items_to_delete(tmpdir): + # test empty cache + memory, _, _ = _setup_toy_cache(tmpdir, num_inputs=0) + items_to_delete = memory.store_backend._get_items_to_delete('1K') + assert items_to_delete == [] + + memory, expected_hash_cachedirs, _ = _setup_toy_cache(tmpdir) + items = memory.store_backend.get_items() + # bytes_limit set to keep only one cache item (each hash cache + # folder is about 1000 bytes + metadata) + items_to_delete = memory.store_backend._get_items_to_delete('2K') + nb_hashes = len(expected_hash_cachedirs) + assert set.issubset(set(items_to_delete), set(items)) + assert len(items_to_delete) == nb_hashes - 1 + + # Sanity check bytes_limit=2048 is the same as bytes_limit='2K' + items_to_delete_2048b = memory.store_backend._get_items_to_delete(2048) + assert sorted(items_to_delete) == sorted(items_to_delete_2048b) + + # bytes_limit greater than the size of the cache + items_to_delete_empty = memory.store_backend._get_items_to_delete('1M') + assert items_to_delete_empty == [] + + # All the cache items need to be deleted + bytes_limit_too_small = 500 + items_to_delete_500b = memory.store_backend._get_items_to_delete( + bytes_limit_too_small + ) + assert set(items_to_delete_500b), set(items) + + # Test LRU property: surviving cache items should all have a more + # recent last_access that the ones that have been deleted + items_to_delete_6000b = memory.store_backend._get_items_to_delete(6000) + surviving_items = set(items).difference(items_to_delete_6000b) + + assert (max(ci.last_access for ci in items_to_delete_6000b) <= + min(ci.last_access for ci in surviving_items)) + + +def test_memory_reduce_size_bytes_limit(tmpdir): + memory, _, _ = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default memory.bytes_limit is None and reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if bytes_limit greater than the size of + # the cache + memory.reduce_size(bytes_limit='1M') + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # bytes_limit is set so that only two cache items are kept + memory.reduce_size(bytes_limit='3K') + cache_items = memory.store_backend.get_items() + assert set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # bytes_limit set so that no cache item is kept + bytes_limit_too_small = 500 + memory.reduce_size(bytes_limit=bytes_limit_too_small) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_reduce_size_items_limit(tmpdir): + memory, _, _ = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if items_limit greater than the size of + # the cache + memory.reduce_size(items_limit=10) + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # items_limit is set so that only two cache items are kept + memory.reduce_size(items_limit=2) + cache_items = memory.store_backend.get_items() + assert set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # item_limit set so that no cache item is kept + memory.reduce_size(items_limit=0) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_reduce_size_age_limit(tmpdir): + import time + import datetime + memory, _, put_cache = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if age_limit big. + memory.reduce_size(age_limit=datetime.timedelta(days=1)) + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # age_limit is set so that only two cache items are kept + time.sleep(1) + put_cache(-1) + put_cache(-2) + memory.reduce_size(age_limit=datetime.timedelta(seconds=1)) + cache_items = memory.store_backend.get_items() + assert not set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # age_limit set so that no cache item is kept + memory.reduce_size(age_limit=datetime.timedelta(seconds=0)) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_clear(tmpdir): + memory, _, g = _setup_toy_cache(tmpdir) + memory.clear() + + assert os.listdir(memory.store_backend.location) == [] + + # Check that the cache for functions hash is also reset. + assert not g._check_previous_func_code(stacklevel=4) + + +def fast_func_with_complex_output(): + complex_obj = ['a' * 1000] * 1000 + return complex_obj + + +def fast_func_with_conditional_complex_output(complex_output=True): + complex_obj = {str(i): i for i in range(int(1e5))} + return complex_obj if complex_output else 'simple output' + + +@with_multiprocessing +def test_cached_function_race_condition_when_persisting_output(tmpdir, capfd): + # Test race condition where multiple processes are writing into + # the same output.pkl. See + # https://github.com/joblib/joblib/issues/490 for more details. + memory = Memory(location=tmpdir.strpath) + func_cached = memory.cache(fast_func_with_complex_output) + + Parallel(n_jobs=2)(delayed(func_cached)() for i in range(3)) + + stdout, stderr = capfd.readouterr() + + # Checking both stdout and stderr (ongoing PR #434 may change + # logging destination) to make sure there is no exception while + # loading the results + exception_msg = 'Exception while loading results' + assert exception_msg not in stdout + assert exception_msg not in stderr + + +@with_multiprocessing +def test_cached_function_race_condition_when_persisting_output_2(tmpdir, + capfd): + # Test race condition in first attempt at solving + # https://github.com/joblib/joblib/issues/490. The race condition + # was due to the delay between seeing the cache directory created + # (interpreted as the result being cached) and the output.pkl being + # pickled. + memory = Memory(location=tmpdir.strpath) + func_cached = memory.cache(fast_func_with_conditional_complex_output) + + Parallel(n_jobs=2)(delayed(func_cached)(True if i % 2 == 0 else False) + for i in range(3)) + + stdout, stderr = capfd.readouterr() + + # Checking both stdout and stderr (ongoing PR #434 may change + # logging destination) to make sure there is no exception while + # loading the results + exception_msg = 'Exception while loading results' + assert exception_msg not in stdout + assert exception_msg not in stderr + + +def test_memory_recomputes_after_an_error_while_loading_results( + tmpdir, monkeypatch): + memory = Memory(location=tmpdir.strpath) + + def func(arg): + # This makes sure that the timestamp returned by two calls of + # func are different. This is needed on Windows where + # time.time resolution may not be accurate enough + time.sleep(0.01) + return arg, time.time() + + cached_func = memory.cache(func) + input_arg = 'arg' + arg, timestamp = cached_func(input_arg) + + # Make sure the function is correctly cached + assert arg == input_arg + + # Corrupting output.pkl to make sure that an error happens when + # loading the cached result + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(cached_func, monkeypatch) + recomputed_arg, recomputed_timestamp = cached_func(arg) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + assert recomputed_arg == arg + assert recomputed_timestamp > timestamp + + # Corrupting output.pkl to make sure that an error happens when + # loading the cached result + corrupt_single_cache_item(memory) + reference = cached_func.call_and_shelve(arg) + try: + reference.get() + raise AssertionError( + "It normally not possible to load a corrupted" + " MemorizedResult" + ) + except KeyError as e: + message = "is corrupted" + assert message in str(e.args) + + +class IncompleteStoreBackend(StoreBackendBase): + """This backend cannot be instantiated and should raise a TypeError.""" + pass + + +class DummyStoreBackend(StoreBackendBase): + """A dummy store backend that does nothing.""" + + def _open_item(self, *args, **kwargs): + """Open an item on store.""" + "Does nothing" + + def _item_exists(self, location): + """Check if an item location exists.""" + "Does nothing" + + def _move_item(self, src, dst): + """Move an item from src to dst in store.""" + "Does nothing" + + def create_location(self, location): + """Create location on store.""" + "Does nothing" + + def exists(self, obj): + """Check if an object exists in the store""" + return False + + def clear_location(self, obj): + """Clear object on store""" + "Does nothing" + + def get_items(self): + """Returns the whole list of items available in cache.""" + return [] + + def configure(self, location, *args, **kwargs): + """Configure the store""" + "Does nothing" + + +@parametrize("invalid_prefix", [None, dict(), list()]) +def test_register_invalid_store_backends_key(invalid_prefix): + # verify the right exceptions are raised when passing a wrong backend key. + with raises(ValueError) as excinfo: + register_store_backend(invalid_prefix, None) + excinfo.match(r'Store backend name should be a string*') + + +def test_register_invalid_store_backends_object(): + # verify the right exceptions are raised when passing a wrong backend + # object. + with raises(ValueError) as excinfo: + register_store_backend("fs", None) + excinfo.match(r'Store backend should inherit StoreBackendBase*') + + +def test_memory_default_store_backend(): + # test an unknown backend falls back into a FileSystemStoreBackend + with raises(TypeError) as excinfo: + Memory(location='/tmp/joblib', backend='unknown') + excinfo.match(r"Unknown location*") + + +def test_warning_on_unknown_location_type(): + class NonSupportedLocationClass: + pass + unsupported_location = NonSupportedLocationClass() + + with warns(UserWarning) as warninfo: + _store_backend_factory("local", location=unsupported_location) + + expected_mesage = ("Instantiating a backend using a " + "NonSupportedLocationClass as a location is not " + "supported by joblib") + assert expected_mesage in str(warninfo[0].message) + + +def test_instanciate_incomplete_store_backend(): + # Verify that registering an external incomplete store backend raises an + # exception when one tries to instantiate it. + backend_name = "isb" + register_store_backend(backend_name, IncompleteStoreBackend) + assert (backend_name, IncompleteStoreBackend) in _STORE_BACKENDS.items() + with raises(TypeError) as excinfo: + _store_backend_factory(backend_name, "fake_location") + excinfo.match(r"Can't instantiate abstract class IncompleteStoreBackend " + "(without an implementation for|with) abstract methods*") + + +def test_dummy_store_backend(): + # Verify that registering an external store backend works. + + backend_name = "dsb" + register_store_backend(backend_name, DummyStoreBackend) + assert (backend_name, DummyStoreBackend) in _STORE_BACKENDS.items() + + backend_obj = _store_backend_factory(backend_name, "dummy_location") + assert isinstance(backend_obj, DummyStoreBackend) + + +def test_instanciate_store_backend_with_pathlib_path(): + # Instantiate a FileSystemStoreBackend using a pathlib.Path object + path = pathlib.Path("some_folder") + backend_obj = _store_backend_factory("local", path) + assert backend_obj.location == "some_folder" + + +def test_filesystem_store_backend_repr(tmpdir): + # Verify string representation of a filesystem store backend. + + repr_pattern = 'FileSystemStoreBackend(location="{location}")' + backend = FileSystemStoreBackend() + assert backend.location is None + + repr(backend) # Should not raise an exception + + assert str(backend) == repr_pattern.format(location=None) + + # backend location is passed explicitly via the configure method (called + # by the internal _store_backend_factory function) + backend.configure(tmpdir.strpath) + + assert str(backend) == repr_pattern.format(location=tmpdir.strpath) + + repr(backend) # Should not raise an exception + + +def test_memory_objects_repr(tmpdir): + # Verify printable reprs of MemorizedResult, MemorizedFunc and Memory. + + def my_func(a, b): + return a + b + + memory = Memory(location=tmpdir.strpath, verbose=0) + memorized_func = memory.cache(my_func) + + memorized_func_repr = 'MemorizedFunc(func={func}, location={location})' + + assert str(memorized_func) == memorized_func_repr.format( + func=my_func, + location=memory.store_backend.location) + + memorized_result = memorized_func.call_and_shelve(42, 42) + + memorized_result_repr = ('MemorizedResult(location="{location}", ' + 'func="{func}", args_id="{args_id}")') + + assert str(memorized_result) == memorized_result_repr.format( + location=memory.store_backend.location, + func=memorized_result.func_id, + args_id=memorized_result.args_id) + + assert str(memory) == 'Memory(location={location})'.format( + location=memory.store_backend.location) + + +def test_memorized_result_pickle(tmpdir): + # Verify a MemoryResult object can be pickled/depickled. Non regression + # test introduced following issue + # https://github.com/joblib/joblib/issues/747 + + memory = Memory(location=tmpdir.strpath) + + @memory.cache + def g(x): + return x**2 + + memorized_result = g.call_and_shelve(4) + memorized_result_pickle = pickle.dumps(memorized_result) + memorized_result_loads = pickle.loads(memorized_result_pickle) + + assert memorized_result.store_backend.location == \ + memorized_result_loads.store_backend.location + assert memorized_result.func == memorized_result_loads.func + assert memorized_result.args_id == memorized_result_loads.args_id + assert str(memorized_result) == str(memorized_result_loads) + + +def compare(left, right, ignored_attrs=None): + if ignored_attrs is None: + ignored_attrs = [] + + left_vars = vars(left) + right_vars = vars(right) + assert set(left_vars.keys()) == set(right_vars.keys()) + for attr in left_vars.keys(): + if attr in ignored_attrs: + continue + assert left_vars[attr] == right_vars[attr] + + +@pytest.mark.parametrize('memory_kwargs', + [{'compress': 3, 'verbose': 2}, + {'mmap_mode': 'r', 'verbose': 5, + 'backend_options': {'parameter': 'unused'}}]) +def test_memory_pickle_dump_load(tmpdir, memory_kwargs): + memory = Memory(location=tmpdir.strpath, **memory_kwargs) + + memory_reloaded = pickle.loads(pickle.dumps(memory)) + + # Compare Memory instance before and after pickle roundtrip + compare(memory.store_backend, memory_reloaded.store_backend) + compare(memory, memory_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(memory) == hash(memory_reloaded) + + func_cached = memory.cache(f) + + func_cached_reloaded = pickle.loads(pickle.dumps(func_cached)) + + # Compare MemorizedFunc instance before/after pickle roundtrip + compare(func_cached.store_backend, func_cached_reloaded.store_backend) + compare(func_cached, func_cached_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(func_cached) == hash(func_cached_reloaded) + + # Compare MemorizedResult instance before/after pickle roundtrip + memorized_result = func_cached.call_and_shelve(1) + memorized_result_reloaded = pickle.loads(pickle.dumps(memorized_result)) + + compare(memorized_result.store_backend, + memorized_result_reloaded.store_backend) + compare(memorized_result, memorized_result_reloaded, + ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id'])) + assert hash(memorized_result) == hash(memorized_result_reloaded) + + +def test_info_log(tmpdir, caplog): + caplog.set_level(logging.INFO) + x = 3 + + memory = Memory(location=tmpdir.strpath, verbose=20) + + @memory.cache + def f(x): + return x ** 2 + + _ = f(x) + assert "Querying" in caplog.text + caplog.clear() + + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def f(x): + return x ** 2 + + _ = f(x) + assert "Querying" not in caplog.text + caplog.clear() + + +def test_deprecated_bytes_limit(tmpdir): + from joblib import __version__ + if __version__ >= "1.5": + raise DeprecationWarning( + "Bytes limit is deprecated and should be removed by 1.4" + ) + with pytest.warns(DeprecationWarning, match="bytes_limit"): + _ = Memory(location=tmpdir.strpath, bytes_limit='1K') + + +class TestCacheValidationCallback: + "Tests on parameter `cache_validation_callback`" + + def foo(self, x, d, delay=None): + d["run"] = True + if delay is not None: + time.sleep(delay) + return x * 2 + + def test_invalid_cache_validation_callback(self, memory): + "Test invalid values for `cache_validation_callback" + match = "cache_validation_callback needs to be callable. Got True." + with pytest.raises(ValueError, match=match): + memory.cache(cache_validation_callback=True) + + @pytest.mark.parametrize("consider_cache_valid", [True, False]) + def test_constant_cache_validation_callback( + self, memory, consider_cache_valid + ): + "Test expiry of old results" + f = memory.cache( + self.foo, cache_validation_callback=lambda _: consider_cache_valid, + ignore=["d"] + ) + + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1) == 4 + assert f(2, d2) == 4 + + assert d1["run"] + assert d2["run"] != consider_cache_valid + + def test_memory_only_cache_long_run(self, memory): + "Test cache validity based on run duration." + + def cache_validation_callback(metadata): + duration = metadata['duration'] + if duration > 0.1: + return True + + f = memory.cache( + self.foo, cache_validation_callback=cache_validation_callback, + ignore=["d"] + ) + + # Short run are not cached + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1, delay=0) == 4 + assert f(2, d2, delay=0) == 4 + assert d1["run"] + assert d2["run"] + + # Longer run are cached + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1, delay=0.2) == 4 + assert f(2, d2, delay=0.2) == 4 + assert d1["run"] + assert not d2["run"] + + def test_memory_expires_after(self, memory): + "Test expiry of old cached results" + + f = memory.cache( + self.foo, cache_validation_callback=expires_after(seconds=.3), + ignore=["d"] + ) + + d1, d2, d3 = {"run": False}, {"run": False}, {"run": False} + assert f(2, d1) == 4 + assert f(2, d2) == 4 + time.sleep(.5) + assert f(2, d3) == 4 + + assert d1["run"] + assert not d2["run"] + assert d3["run"] + + +class TestMemorizedFunc: + "Tests for the MemorizedFunc and NotMemorizedFunc classes" + + @staticmethod + def f(x, counter): + counter[x] = counter.get(x, 0) + 1 + return counter[x] + + def test_call_method_memorized(self, memory): + "Test calling the function" + + f = memory.cache(self.f, ignore=['counter']) + + counter = {} + assert f(2, counter) == 1 + assert f(2, counter) == 1 + + x, meta = f.call(2, counter) + assert x == 2, "f has not been called properly" + assert isinstance(meta, dict), ( + "Metadata are not returned by MemorizedFunc.call." + ) + + def test_call_method_not_memorized(self, memory): + "Test calling the function" + + f = NotMemorizedFunc(self.f) + + counter = {} + assert f(2, counter) == 1 + assert f(2, counter) == 2 + + x, meta = f.call(2, counter) + assert x == 3, "f has not been called properly" + assert isinstance(meta, dict), ( + "Metadata are not returned by MemorizedFunc.call." + ) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memory_async.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memory_async.py new file mode 100644 index 0000000000000000000000000000000000000000..53ddeffda4a3e75590d0d02a3628d0d1e81ab6a2 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_memory_async.py @@ -0,0 +1,170 @@ +import asyncio +import gc +import shutil + +import pytest + +from joblib.memory import (AsyncMemorizedFunc, AsyncNotMemorizedFunc, + MemorizedResult, Memory, NotMemorizedResult) +from joblib.test.common import np, with_numpy +from joblib.testing import raises + +from .test_memory import (corrupt_single_cache_item, + monkeypatch_cached_func_warn) + + +async def check_identity_lazy_async(func, accumulator, location): + """ Similar to check_identity_lazy_async for coroutine functions""" + memory = Memory(location=location, verbose=0) + func = memory.cache(func) + for i in range(3): + for _ in range(2): + value = await func(i) + assert value == i + assert len(accumulator) == i + 1 + + +@pytest.mark.asyncio +async def test_memory_integration_async(tmpdir): + accumulator = list() + + async def f(n): + await asyncio.sleep(0.1) + accumulator.append(1) + return n + + await check_identity_lazy_async(f, accumulator, tmpdir.strpath) + + # Now test clearing + for compress in (False, True): + for mmap_mode in ('r', None): + memory = Memory(location=tmpdir.strpath, verbose=10, + mmap_mode=mmap_mode, compress=compress) + # First clear the cache directory, to check that our code can + # handle that + # NOTE: this line would raise an exception, as the database + # file is still open; we ignore the error since we want to + # test what happens if the directory disappears + shutil.rmtree(tmpdir.strpath, ignore_errors=True) + g = memory.cache(f) + await g(1) + g.clear(warn=False) + current_accumulator = len(accumulator) + out = await g(1) + + assert len(accumulator) == current_accumulator + 1 + # Also, check that Memory.eval works similarly + evaled = await memory.eval(f, 1) + assert evaled == out + assert len(accumulator) == current_accumulator + 1 + + # Now do a smoke test with a function defined in __main__, as the name + # mangling rules are more complex + f.__module__ = '__main__' + memory = Memory(location=tmpdir.strpath, verbose=0) + await memory.cache(f)(1) + + +@pytest.mark.asyncio +async def test_no_memory_async(): + accumulator = list() + + async def ff(x): + await asyncio.sleep(0.1) + accumulator.append(1) + return x + + memory = Memory(location=None, verbose=0) + gg = memory.cache(ff) + for _ in range(4): + current_accumulator = len(accumulator) + await gg(1) + assert len(accumulator) == current_accumulator + 1 + + +@with_numpy +@pytest.mark.asyncio +async def test_memory_numpy_check_mmap_mode_async(tmpdir, monkeypatch): + """Check that mmap_mode is respected even at the first call""" + + memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0) + + @memory.cache() + async def twice(a): + return a * 2 + + a = np.ones(3) + b = await twice(a) + c = await twice(a) + + assert isinstance(c, np.memmap) + assert c.mode == 'r' + + assert isinstance(b, np.memmap) + assert b.mode == 'r' + + # Corrupts the file, Deleting b and c mmaps + # is necessary to be able edit the file + del b + del c + gc.collect() + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch) + d = await twice(a) + assert len(recorded_warnings) == 1 + exception_msg = 'Exception while loading results' + assert exception_msg in recorded_warnings[0] + # Asserts that the recomputation returns a mmap + assert isinstance(d, np.memmap) + assert d.mode == 'r' + + +@pytest.mark.asyncio +async def test_call_and_shelve_async(tmpdir): + async def f(x, y=1): + await asyncio.sleep(0.1) + return x ** 2 + y + + # Test MemorizedFunc outputting a reference to cache. + for func, Result in zip((AsyncMemorizedFunc(f, tmpdir.strpath), + AsyncNotMemorizedFunc(f), + Memory(location=tmpdir.strpath, + verbose=0).cache(f), + Memory(location=None).cache(f), + ), + (MemorizedResult, NotMemorizedResult, + MemorizedResult, NotMemorizedResult, + )): + for _ in range(2): + result = await func.call_and_shelve(2) + assert isinstance(result, Result) + assert result.get() == 5 + + result.clear() + with raises(KeyError): + result.get() + result.clear() # Do nothing if there is no cache. + + +@pytest.mark.asyncio +async def test_memorized_func_call_async(memory): + + async def ff(x, counter): + await asyncio.sleep(0.1) + counter[x] = counter.get(x, 0) + 1 + return counter[x] + + gg = memory.cache(ff, ignore=['counter']) + + counter = {} + assert await gg(2, counter) == 1 + assert await gg(2, counter) == 1 + + x, meta = await gg.call(2, counter) + assert x == 2, "f has not been called properly" + assert isinstance(meta, dict), ( + "Metadata are not returned by MemorizedFunc.call." + ) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..251925ced5208b4aaf09d9aab305eb44c7102818 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py @@ -0,0 +1,32 @@ +""" +Pyodide and other single-threaded Python builds will be missing the +_multiprocessing module. Test that joblib still works in this environment. +""" + +import os +import subprocess +import sys + + +def test_missing_multiprocessing(tmp_path): + """ + Test that import joblib works even if _multiprocessing is missing. + + pytest has already imported everything from joblib. The most reasonable way + to test importing joblib with modified environment is to invoke a separate + Python process. This also ensures that we don't break other tests by + importing a bad `_multiprocessing` module. + """ + (tmp_path / "_multiprocessing.py").write_text( + 'raise ImportError("No _multiprocessing module!")' + ) + env = dict(os.environ) + # For subprocess, use current sys.path with our custom version of + # multiprocessing inserted. + env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path) + subprocess.check_call( + [sys.executable, "-c", + "import joblib, math; " + "joblib.Parallel(n_jobs=1)(" + "joblib.delayed(math.sqrt)(i**2) for i in range(10))" + ], env=env) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_module.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_module.py new file mode 100644 index 0000000000000000000000000000000000000000..a2257a4142d79996f3d299cb820927ae48a05810 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_module.py @@ -0,0 +1,53 @@ +import sys +import joblib +from joblib.testing import check_subprocess_call +from joblib.test.common import with_multiprocessing + + +def test_version(): + assert hasattr(joblib, '__version__'), ( + "There are no __version__ argument on the joblib module") + + +@with_multiprocessing +def test_no_start_method_side_effect_on_import(): + # check that importing joblib does not implicitly set the global + # start_method for multiprocessing. + code = """if True: + import joblib + import multiprocessing as mp + # The following line would raise RuntimeError if the + # start_method is already set. + mp.set_start_method("loky") + """ + check_subprocess_call([sys.executable, '-c', code]) + + +@with_multiprocessing +def test_no_semaphore_tracker_on_import(): + # check that importing joblib does not implicitly spawn a resource tracker + # or a semaphore tracker + code = """if True: + import joblib + from multiprocessing import semaphore_tracker + # The following line would raise RuntimeError if the + # start_method is already set. + msg = "multiprocessing.semaphore_tracker has been spawned on import" + assert semaphore_tracker._semaphore_tracker._fd is None, msg""" + if sys.version_info >= (3, 8): + # semaphore_tracker was renamed in Python 3.8: + code = code.replace("semaphore_tracker", "resource_tracker") + check_subprocess_call([sys.executable, '-c', code]) + + +@with_multiprocessing +def test_no_resource_tracker_on_import(): + code = """if True: + import joblib + from joblib.externals.loky.backend import resource_tracker + # The following line would raise RuntimeError if the + # start_method is already set. + msg = "loky.resource_tracker has been spawned on import" + assert resource_tracker._resource_tracker._fd is None, msg + """ + check_subprocess_call([sys.executable, '-c', code]) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..9fee585c79ad219d3a9f8cdc6a55655b50099c09 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py @@ -0,0 +1,1159 @@ +"""Test the numpy pickler as a replacement of the standard pickler.""" + +import copy +import os +import random +import re +import io +import sys +import warnings +import gzip +import zlib +import bz2 +import pickle +import socket +from contextlib import closing +import mmap +from pathlib import Path + +try: + import lzma +except ImportError: + lzma = None + +import pytest + +from joblib.test.common import np, with_numpy, with_lz4, without_lz4 +from joblib.test.common import with_memory_profiler, memory_used +from joblib.testing import parametrize, raises, warns + +# numpy_pickle is not a drop-in replacement of pickle, as it takes +# filenames instead of open files as arguments. +from joblib import numpy_pickle, register_compressor +from joblib.test import data + +from joblib.numpy_pickle_utils import _IO_BUFFER_SIZE +from joblib.numpy_pickle_utils import _detect_compressor +from joblib.numpy_pickle_utils import _is_numpy_array_byte_order_mismatch +from joblib.numpy_pickle_utils import _ensure_native_byte_order +from joblib.compressor import (_COMPRESSORS, _LZ4_PREFIX, CompressorWrapper, + LZ4_NOT_INSTALLED_ERROR, BinaryZlibFile) + + +############################################################################### +# Define a list of standard types. +# Borrowed from dill, initial author: Micheal McKerns: +# http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py + +typelist = [] + +# testing types +_none = None +typelist.append(_none) +_type = type +typelist.append(_type) +_bool = bool(1) +typelist.append(_bool) +_int = int(1) +typelist.append(_int) +_float = float(1) +typelist.append(_float) +_complex = complex(1) +typelist.append(_complex) +_string = str(1) +typelist.append(_string) +_tuple = () +typelist.append(_tuple) +_list = [] +typelist.append(_list) +_dict = {} +typelist.append(_dict) +_builtin = len +typelist.append(_builtin) + + +def _function(x): + yield x + + +class _class: + def _method(self): + pass + + +class _newclass(object): + def _method(self): + pass + + +typelist.append(_function) +typelist.append(_class) +typelist.append(_newclass) # +_instance = _class() +typelist.append(_instance) +_object = _newclass() +typelist.append(_object) # + + +############################################################################### +# Tests + +@parametrize('compress', [0, 1]) +@parametrize('member', typelist) +def test_standard_types(tmpdir, compress, member): + # Test pickling and saving with standard types. + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(member, filename, compress=compress) + _member = numpy_pickle.load(filename) + # We compare the pickled instance to the reloaded one only if it + # can be compared to a copied one + if member == copy.deepcopy(member): + assert member == _member + + +def test_value_error(): + # Test inverting the input arguments to dump + with raises(ValueError): + numpy_pickle.dump('foo', dict()) + + +@parametrize('wrong_compress', [-1, 10, dict()]) +def test_compress_level_error(wrong_compress): + # Verify that passing an invalid compress argument raises an error. + exception_msg = ('Non valid compress level given: ' + '"{0}"'.format(wrong_compress)) + with raises(ValueError) as excinfo: + numpy_pickle.dump('dummy', 'foo', compress=wrong_compress) + excinfo.match(exception_msg) + + +@with_numpy +@parametrize('compress', [False, True, 0, 3, 'zlib']) +def test_numpy_persistence(tmpdir, compress): + filename = tmpdir.join('test.pkl').strpath + rnd = np.random.RandomState(0) + a = rnd.random_sample((10, 2)) + # We use 'a.T' to have a non C-contiguous array. + for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])): + filenames = numpy_pickle.dump(obj, filename, compress=compress) + + # All is cached in one file + assert len(filenames) == 1 + # Check that only one file was created + assert filenames[0] == filename + # Check that this file does exist + assert os.path.exists(filenames[0]) + + # Unpickle the object + obj_ = numpy_pickle.load(filename) + # Check that the items are indeed arrays + for item in obj_: + assert isinstance(item, np.ndarray) + # And finally, check that all the values are equal. + np.testing.assert_array_equal(np.array(obj), np.array(obj_)) + + # Now test with an array subclass + obj = np.memmap(filename + 'mmap', mode='w+', shape=4, dtype=np.float64) + filenames = numpy_pickle.dump(obj, filename, compress=compress) + # All is cached in one file + assert len(filenames) == 1 + + obj_ = numpy_pickle.load(filename) + if (type(obj) is not np.memmap and + hasattr(obj, '__array_prepare__')): + # We don't reconstruct memmaps + assert isinstance(obj_, type(obj)) + + np.testing.assert_array_equal(obj_, obj) + + # Test with an object containing multiple numpy arrays + obj = ComplexTestObject() + filenames = numpy_pickle.dump(obj, filename, compress=compress) + # All is cached in one file + assert len(filenames) == 1 + + obj_loaded = numpy_pickle.load(filename) + assert isinstance(obj_loaded, type(obj)) + np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float) + np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int) + np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj) + + +@with_numpy +def test_numpy_persistence_bufferred_array_compression(tmpdir): + big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(big_array, filename, compress=True) + arr_reloaded = numpy_pickle.load(filename) + + np.testing.assert_array_equal(big_array, arr_reloaded) + + +@with_numpy +def test_memmap_persistence(tmpdir): + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + filename = tmpdir.join('test1.pkl').strpath + numpy_pickle.dump(a, filename) + b = numpy_pickle.load(filename, mmap_mode='r') + + assert isinstance(b, np.memmap) + + # Test with an object containing multiple numpy arrays + filename = tmpdir.join('test2.pkl').strpath + obj = ComplexTestObject() + numpy_pickle.dump(obj, filename) + obj_loaded = numpy_pickle.load(filename, mmap_mode='r') + assert isinstance(obj_loaded, type(obj)) + assert isinstance(obj_loaded.array_float, np.memmap) + assert not obj_loaded.array_float.flags.writeable + assert isinstance(obj_loaded.array_int, np.memmap) + assert not obj_loaded.array_int.flags.writeable + # Memory map not allowed for numpy object arrays + assert not isinstance(obj_loaded.array_obj, np.memmap) + np.testing.assert_array_equal(obj_loaded.array_float, + obj.array_float) + np.testing.assert_array_equal(obj_loaded.array_int, + obj.array_int) + np.testing.assert_array_equal(obj_loaded.array_obj, + obj.array_obj) + + # Test we can write in memmapped arrays + obj_loaded = numpy_pickle.load(filename, mmap_mode='r+') + assert obj_loaded.array_float.flags.writeable + obj_loaded.array_float[0:10] = 10.0 + assert obj_loaded.array_int.flags.writeable + obj_loaded.array_int[0:10] = 10 + + obj_reloaded = numpy_pickle.load(filename, mmap_mode='r') + np.testing.assert_array_equal(obj_reloaded.array_float, + obj_loaded.array_float) + np.testing.assert_array_equal(obj_reloaded.array_int, + obj_loaded.array_int) + + # Test w+ mode is caught and the mode has switched to r+ + numpy_pickle.load(filename, mmap_mode='w+') + assert obj_loaded.array_int.flags.writeable + assert obj_loaded.array_int.mode == 'r+' + assert obj_loaded.array_float.flags.writeable + assert obj_loaded.array_float.mode == 'r+' + + +@with_numpy +def test_memmap_persistence_mixed_dtypes(tmpdir): + # loading datastructures that have sub-arrays with dtype=object + # should not prevent memmapping on fixed size dtype sub-arrays. + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + b = np.array([1, 'b'], dtype=object) + construct = (a, b) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(construct, filename) + a_clone, b_clone = numpy_pickle.load(filename, mmap_mode='r') + + # the floating point array has been memory mapped + assert isinstance(a_clone, np.memmap) + + # the object-dtype array has been loaded in memory + assert not isinstance(b_clone, np.memmap) + + +@with_numpy +def test_masked_array_persistence(tmpdir): + # The special-case picker fails, because saving masked_array + # not implemented, but it just delegates to the standard pickler. + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + a = np.ma.masked_greater(a, 0.5) + filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(a, filename) + b = numpy_pickle.load(filename, mmap_mode='r') + assert isinstance(b, np.ma.masked_array) + + +@with_numpy +def test_compress_mmap_mode_warning(tmpdir): + # Test the warning in case of compress + mmap_mode + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + this_filename = tmpdir.join('test.pkl').strpath + numpy_pickle.dump(a, this_filename, compress=1) + with warns(UserWarning) as warninfo: + numpy_pickle.load(this_filename, mmap_mode='r+') + debug_msg = "\n".join([str(w) for w in warninfo]) + warninfo = [w.message for w in warninfo] + assert len(warninfo) == 1, debug_msg + assert ( + str(warninfo[0]) == + 'mmap_mode "r+" is not compatible with compressed ' + f'file {this_filename}. "r+" flag will be ignored.' + ) + + +@with_numpy +@parametrize('cache_size', [None, 0, 10]) +def test_cache_size_warning(tmpdir, cache_size): + # Check deprecation warning raised when cache size is not None + filename = tmpdir.join('test.pkl').strpath + rnd = np.random.RandomState(0) + a = rnd.random_sample((10, 2)) + + warnings.simplefilter("always") + with warnings.catch_warnings(record=True) as warninfo: + numpy_pickle.dump(a, filename, cache_size=cache_size) + expected_nb_warnings = 1 if cache_size is not None else 0 + assert len(warninfo) == expected_nb_warnings + for w in warninfo: + assert w.category == DeprecationWarning + assert (str(w.message) == + "Please do not set 'cache_size' in joblib.dump, this " + "parameter has no effect and will be removed. You " + "used 'cache_size={0}'".format(cache_size)) + + +@with_numpy +@with_memory_profiler +@parametrize('compress', [True, False]) +def test_memory_usage(tmpdir, compress): + # Verify memory stays within expected bounds. + filename = tmpdir.join('test.pkl').strpath + small_array = np.ones((10, 10)) + big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8) + + for obj in (small_array, big_array): + size = obj.nbytes / 1e6 + obj_filename = filename + str(np.random.randint(0, 1000)) + mem_used = memory_used(numpy_pickle.dump, + obj, obj_filename, compress=compress) + + # The memory used to dump the object shouldn't exceed the buffer + # size used to write array chunks (16MB). + write_buf_size = _IO_BUFFER_SIZE + 16 * 1024 ** 2 / 1e6 + assert mem_used <= write_buf_size + + mem_used = memory_used(numpy_pickle.load, obj_filename) + # memory used should be less than array size + buffer size used to + # read the array chunk by chunk. + read_buf_size = 32 + _IO_BUFFER_SIZE # MiB + assert mem_used < size + read_buf_size + + +@with_numpy +def test_compressed_pickle_dump_and_load(tmpdir): + expected_list = [np.arange(5, dtype=np.dtype('i8')), + np.arange(5, dtype=np.dtype('f8')), + np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'), + np.arange(256, dtype=np.uint8).tobytes(), + u"C'est l'\xe9t\xe9 !"] + + fname = tmpdir.join('temp.pkl.gz').strpath + + dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1) + assert len(dumped_filenames) == 1 + result_list = numpy_pickle.load(fname) + for result, expected in zip(result_list, expected_list): + if isinstance(expected, np.ndarray): + expected = _ensure_native_byte_order(expected) + assert result.dtype == expected.dtype + np.testing.assert_equal(result, expected) + else: + assert result == expected + + +def _check_pickle(filename, expected_list, mmap_mode=None): + """Helper function to test joblib pickle content. + + Note: currently only pickles containing an iterable are supported + by this function. + """ + version_match = re.match(r'.+py(\d)(\d).+', filename) + py_version_used_for_writing = int(version_match.group(1)) + + py_version_to_default_pickle_protocol = {2: 2, 3: 3} + pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4) + pickle_writing_protocol = py_version_to_default_pickle_protocol.get( + py_version_used_for_writing, 4) + if pickle_reading_protocol >= pickle_writing_protocol: + try: + with warnings.catch_warnings(record=True) as warninfo: + warnings.simplefilter('always') + warnings.filterwarnings( + 'ignore', module='numpy', + message='The compiler package is deprecated') + result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode) + filename_base = os.path.basename(filename) + expected_nb_deprecation_warnings = 1 if ( + "_0.9" in filename_base or "_0.8.4" in filename_base) else 0 + + expected_nb_user_warnings = 3 if ( + re.search("_0.1.+.pkl$", filename_base) and + mmap_mode is not None) else 0 + expected_nb_warnings = \ + expected_nb_deprecation_warnings + expected_nb_user_warnings + assert len(warninfo) == expected_nb_warnings + + deprecation_warnings = [ + w for w in warninfo if issubclass( + w.category, DeprecationWarning)] + user_warnings = [ + w for w in warninfo if issubclass( + w.category, UserWarning)] + for w in deprecation_warnings: + assert (str(w.message) == + "The file '{0}' has been generated with a joblib " + "version less than 0.10. Please regenerate this " + "pickle file.".format(filename)) + + for w in user_warnings: + escaped_filename = re.escape(filename) + assert re.search( + f"memmapped.+{escaped_filename}.+segmentation fault", + str(w.message)) + + for result, expected in zip(result_list, expected_list): + if isinstance(expected, np.ndarray): + expected = _ensure_native_byte_order(expected) + assert result.dtype == expected.dtype + np.testing.assert_equal(result, expected) + else: + assert result == expected + except Exception as exc: + # When trying to read with python 3 a pickle generated + # with python 2 we expect a user-friendly error + if py_version_used_for_writing == 2: + assert isinstance(exc, ValueError) + message = ('You may be trying to read with ' + 'python 3 a joblib pickle generated with python 2.') + assert message in str(exc) + elif filename.endswith('.lz4') and with_lz4.args[0]: + assert isinstance(exc, ValueError) + assert LZ4_NOT_INSTALLED_ERROR in str(exc) + else: + raise + else: + # Pickle protocol used for writing is too high. We expect a + # "unsupported pickle protocol" error message + try: + numpy_pickle.load(filename) + raise AssertionError('Numpy pickle loading should ' + 'have raised a ValueError exception') + except ValueError as e: + message = 'unsupported pickle protocol: {0}'.format( + pickle_writing_protocol) + assert message in str(e.args) + + +@with_numpy +def test_joblib_pickle_across_python_versions(): + # We need to be specific about dtypes in particular endianness + # because the pickles can be generated on one architecture and + # the tests run on another one. See + # https://github.com/joblib/joblib/issues/279. + expected_list = [np.arange(5, dtype=np.dtype('i8'), ('', '>f8')]), + np.arange(3, dtype=np.dtype('>i8')), + np.arange(3, dtype=np.dtype('>f8'))] + + # Verify the byteorder mismatch is correctly detected. + for array in be_arrays: + if sys.byteorder == 'big': + assert not _is_numpy_array_byte_order_mismatch(array) + else: + assert _is_numpy_array_byte_order_mismatch(array) + converted = _ensure_native_byte_order(array) + if converted.dtype.fields: + for f in converted.dtype.fields.values(): + f[0].byteorder == '=' + else: + assert converted.dtype.byteorder == "=" + + # List of numpy arrays with little endian byteorder. + le_arrays = [np.array([(1, 2.0), (3, 4.0)], + dtype=[('', ' size + np.testing.assert_array_equal(obj, memmaps) + + +def test_register_compressor(tmpdir): + # Check that registering compressor file works. + compressor_name = 'test-name' + compressor_prefix = 'test-prefix' + + class BinaryCompressorTestFile(io.BufferedIOBase): + pass + + class BinaryCompressorTestWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryCompressorTestFile, + prefix=compressor_prefix) + + register_compressor(compressor_name, BinaryCompressorTestWrapper()) + + assert (_COMPRESSORS[compressor_name].fileobj_factory == + BinaryCompressorTestFile) + assert _COMPRESSORS[compressor_name].prefix == compressor_prefix + + # Remove this dummy compressor file from extra compressors because other + # tests might fail because of this + _COMPRESSORS.pop(compressor_name) + + +@parametrize('invalid_name', [1, (), {}]) +def test_register_compressor_invalid_name(invalid_name): + # Test that registering an invalid compressor name is not allowed. + with raises(ValueError) as excinfo: + register_compressor(invalid_name, None) + excinfo.match("Compressor name should be a string") + + +def test_register_compressor_invalid_fileobj(): + # Test that registering an invalid file object is not allowed. + + class InvalidFileObject(): + pass + + class InvalidFileObjectWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__(self, obj=InvalidFileObject, + prefix=b'prefix') + + with raises(ValueError) as excinfo: + register_compressor('invalid', InvalidFileObjectWrapper()) + + excinfo.match("Compressor 'fileobj_factory' attribute should implement " + "the file object interface") + + +class AnotherZlibCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b'prefix') + + +class StandardLibGzipCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b'prefix') + + +def test_register_compressor_already_registered(): + # Test registration of existing compressor files. + compressor_name = 'test-name' + + # register a test compressor + register_compressor(compressor_name, AnotherZlibCompressorWrapper()) + + with raises(ValueError) as excinfo: + register_compressor(compressor_name, + StandardLibGzipCompressorWrapper()) + excinfo.match("Compressor '{}' already registered." + .format(compressor_name)) + + register_compressor(compressor_name, StandardLibGzipCompressorWrapper(), + force=True) + + assert compressor_name in _COMPRESSORS + assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile + + # Remove this dummy compressor file from extra compressors because other + # tests might fail because of this + _COMPRESSORS.pop(compressor_name) + + +@with_lz4 +def test_lz4_compression(tmpdir): + # Check that lz4 can be used when dependency is available. + import lz4.frame + compressor = 'lz4' + assert compressor in _COMPRESSORS + assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile + + fname = tmpdir.join('test.pkl').strpath + data = 'test data' + numpy_pickle.dump(data, fname, compress=compressor) + + with open(fname, 'rb') as f: + assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX + assert numpy_pickle.load(fname) == data + + # Test that LZ4 is applied based on file extension + numpy_pickle.dump(data, fname + '.lz4') + with open(fname, 'rb') as f: + assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX + assert numpy_pickle.load(fname) == data + + +@without_lz4 +def test_lz4_compression_without_lz4(tmpdir): + # Check that lz4 cannot be used when dependency is not available. + fname = tmpdir.join('test.nolz4').strpath + data = 'test data' + msg = LZ4_NOT_INSTALLED_ERROR + with raises(ValueError) as excinfo: + numpy_pickle.dump(data, fname, compress='lz4') + excinfo.match(msg) + + with raises(ValueError) as excinfo: + numpy_pickle.dump(data, fname + '.lz4') + excinfo.match(msg) + + +protocols = [pickle.DEFAULT_PROTOCOL] +if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL: + protocols.append(pickle.HIGHEST_PROTOCOL) + + +@with_numpy +@parametrize('protocol', protocols) +def test_memmap_alignment_padding(tmpdir, protocol): + # Test that memmaped arrays returned by numpy.load are correctly aligned + fname = tmpdir.join('test.mmap').strpath + + a = np.random.randn(2) + numpy_pickle.dump(a, fname, protocol=protocol) + memmap = numpy_pickle.load(fname, mmap_mode='r') + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(a, memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned + + array_list = [ + np.random.randn(2), np.random.randn(2), + np.random.randn(2), np.random.randn(2) + ] + + # On Windows OSError 22 if reusing the same path for memmap ... + fname = tmpdir.join('test1.mmap').strpath + numpy_pickle.dump(array_list, fname, protocol=protocol) + l_reloaded = numpy_pickle.load(fname, mmap_mode='r') + + for idx, memmap in enumerate(l_reloaded): + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(array_list[idx], memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned + + array_dict = { + 'a0': np.arange(2, dtype=np.uint8), + 'a1': np.arange(3, dtype=np.uint8), + 'a2': np.arange(5, dtype=np.uint8), + 'a3': np.arange(7, dtype=np.uint8), + 'a4': np.arange(11, dtype=np.uint8), + 'a5': np.arange(13, dtype=np.uint8), + 'a6': np.arange(17, dtype=np.uint8), + 'a7': np.arange(19, dtype=np.uint8), + 'a8': np.arange(23, dtype=np.uint8), + } + + # On Windows OSError 22 if reusing the same path for memmap ... + fname = tmpdir.join('test2.mmap').strpath + numpy_pickle.dump(array_dict, fname, protocol=protocol) + d_reloaded = numpy_pickle.load(fname, mmap_mode='r') + + for key, memmap in d_reloaded.items(): + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(array_dict[key], memmap) + assert ( + memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0) + assert memmap.flags.aligned diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..9e95393b19e979593c7037d0d4fb740e47131dde --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py @@ -0,0 +1,16 @@ +"""Test the old numpy pickler, compatibility version.""" + +# numpy_pickle is not a drop-in replacement of pickle, as it takes +# filenames instead of open files as arguments. +from joblib import numpy_pickle_compat + + +def test_z_file(tmpdir): + # Test saving and loading data with Zfiles. + filename = tmpdir.join('test.pkl').strpath + data = numpy_pickle_compat.asbytes('Foo, \n Bar, baz, \n\nfoobar') + with open(filename, 'wb') as f: + numpy_pickle_compat.write_zfile(f, data) + with open(filename, 'rb') as f: + data_read = numpy_pickle_compat.read_zfile(f) + assert data == data_read diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5c414a2227cbf6094dc594f6712139d4fd397a9d --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py @@ -0,0 +1,9 @@ +from joblib.compressor import BinaryZlibFile +from joblib.testing import parametrize + + +@parametrize('filename', ['test', u'test']) # testing str and unicode names +def test_binary_zlib_file(tmpdir, filename): + """Testing creation of files depending on the type of the filenames.""" + binary_file = BinaryZlibFile(tmpdir.join(filename).strpath, mode='wb') + binary_file.close() diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_parallel.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..f171375a872bccafc1711f935ec5369737bc617c --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_parallel.py @@ -0,0 +1,2056 @@ +""" +Test the parallel module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2010-2011 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import os +import sys +import time +import mmap +import weakref +import warnings +import threading +from traceback import format_exception +from math import sqrt +from time import sleep +from pickle import PicklingError +from contextlib import nullcontext +from multiprocessing import TimeoutError +import pytest + +import joblib +from joblib import parallel +from joblib import dump, load + +from joblib._multiprocessing_helpers import mp + +from joblib.test.common import np, with_numpy +from joblib.test.common import with_multiprocessing +from joblib.test.common import IS_PYPY, force_gc_pypy +from joblib.testing import (parametrize, raises, check_subprocess_call, + skipif, warns) + +if mp is not None: + # Loky is not available if multiprocessing is not + from joblib.externals.loky import get_reusable_executor + +from queue import Queue + +try: + import posix +except ImportError: + posix = None + +try: + from ._openmp_test_helper.parallel_sum import parallel_sum +except ImportError: + parallel_sum = None + +try: + import distributed +except ImportError: + distributed = None + +from joblib._parallel_backends import SequentialBackend +from joblib._parallel_backends import ThreadingBackend +from joblib._parallel_backends import MultiprocessingBackend +from joblib._parallel_backends import ParallelBackendBase +from joblib._parallel_backends import LokyBackend + +from joblib.parallel import Parallel, delayed +from joblib.parallel import parallel_config +from joblib.parallel import parallel_backend +from joblib.parallel import register_parallel_backend +from joblib.parallel import effective_n_jobs, cpu_count + +from joblib.parallel import mp, BACKENDS, DEFAULT_BACKEND + + +RETURN_GENERATOR_BACKENDS = BACKENDS.copy() +RETURN_GENERATOR_BACKENDS.pop("multiprocessing", None) + +ALL_VALID_BACKENDS = [None] + sorted(BACKENDS.keys()) +# Add instances of backend classes deriving from ParallelBackendBase +ALL_VALID_BACKENDS += [BACKENDS[backend_str]() for backend_str in BACKENDS] +if mp is None: + PROCESS_BACKENDS = [] +else: + PROCESS_BACKENDS = ['multiprocessing', 'loky'] +PARALLEL_BACKENDS = PROCESS_BACKENDS + ['threading'] + +if hasattr(mp, 'get_context'): + # Custom multiprocessing context in Python 3.4+ + ALL_VALID_BACKENDS.append(mp.get_context('spawn')) + +DefaultBackend = BACKENDS[DEFAULT_BACKEND] + + +def get_workers(backend): + return getattr(backend, '_pool', getattr(backend, '_workers', None)) + + +def division(x, y): + return x / y + + +def square(x): + return x ** 2 + + +class MyExceptionWithFinickyInit(Exception): + """An exception class with non trivial __init__ + """ + def __init__(self, a, b, c, d): + pass + + +def exception_raiser(x, custom_exception=False): + if x == 7: + raise (MyExceptionWithFinickyInit('a', 'b', 'c', 'd') + if custom_exception else ValueError) + return x + + +def interrupt_raiser(x): + time.sleep(.05) + raise KeyboardInterrupt + + +def f(x, y=0, z=0): + """ A module-level function so that it can be spawn with + multiprocessing. + """ + return x ** 2 + y + z + + +def _active_backend_type(): + return type(parallel.get_active_backend()[0]) + + +def parallel_func(inner_n_jobs, backend): + return Parallel(n_jobs=inner_n_jobs, backend=backend)( + delayed(square)(i) for i in range(3)) + + +############################################################################### +def test_cpu_count(): + assert cpu_count() > 0 + + +def test_effective_n_jobs(): + assert effective_n_jobs() > 0 + + +@parametrize("context", [parallel_config, parallel_backend]) +@pytest.mark.parametrize( + "backend_n_jobs, expected_n_jobs", + [(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)], + ids=["positive-int", "negative-int", "None"] +) +@with_multiprocessing +def test_effective_n_jobs_None(context, backend_n_jobs, expected_n_jobs): + # check the number of effective jobs when `n_jobs=None` + # non-regression test for https://github.com/joblib/joblib/issues/984 + with context("threading", n_jobs=backend_n_jobs): + # when using a backend, the default of number jobs will be the one set + # in the backend + assert effective_n_jobs(n_jobs=None) == expected_n_jobs + # without any backend, None will default to a single job + assert effective_n_jobs(n_jobs=None) == 1 + + +############################################################################### +# Test parallel + +@parametrize('backend', ALL_VALID_BACKENDS) +@parametrize('n_jobs', [1, 2, -1, -2]) +@parametrize('verbose', [2, 11, 100]) +def test_simple_parallel(backend, n_jobs, verbose): + assert ([square(x) for x in range(5)] == + Parallel(n_jobs=n_jobs, backend=backend, + verbose=verbose)( + delayed(square)(x) for x in range(5))) + + +@parametrize('backend', ALL_VALID_BACKENDS) +def test_main_thread_renamed_no_warning(backend, monkeypatch): + # Check that no default backend relies on the name of the main thread: + # https://github.com/joblib/joblib/issues/180#issuecomment-253266247 + # Some programs use a different name for the main thread. This is the case + # for uWSGI apps for instance. + monkeypatch.setattr(target=threading.current_thread(), name='name', + value='some_new_name_for_the_main_thread') + + with warnings.catch_warnings(record=True) as warninfo: + results = Parallel(n_jobs=2, backend=backend)( + delayed(square)(x) for x in range(3)) + assert results == [0, 1, 4] + + # Due to the default parameters of LokyBackend, there is a chance that + # warninfo catches Warnings from worker timeouts. We remove it if it exists + warninfo = [w for w in warninfo if "worker timeout" not in str(w.message)] + + # The multiprocessing backend will raise a warning when detecting that is + # started from the non-main thread. Let's check that there is no false + # positive because of the name change. + assert len(warninfo) == 0 + + +def _assert_warning_nested(backend, inner_n_jobs, expected): + with warnings.catch_warnings(record=True) as warninfo: + warnings.simplefilter("always") + parallel_func(backend=backend, inner_n_jobs=inner_n_jobs) + + warninfo = [w.message for w in warninfo] + if expected: + if warninfo: + warnings_are_correct = all( + 'backed parallel loops cannot' in each.args[0] + for each in warninfo + ) + # With Python nogil, when the outer backend is threading, we might + # see more that one warning + warnings_have_the_right_length = ( + len(warninfo) >= 1 if getattr(sys.flags, 'nogil', False) + else len(warninfo) == 1) + return warnings_are_correct and warnings_have_the_right_length + + return False + else: + assert not warninfo + return True + + +@with_multiprocessing +@parametrize('parent_backend,child_backend,expected', [ + ('loky', 'multiprocessing', True), + ('loky', 'loky', False), + ('multiprocessing', 'multiprocessing', True), + ('multiprocessing', 'loky', True), + ('threading', 'multiprocessing', True), + ('threading', 'loky', True), +]) +def test_nested_parallel_warnings(parent_backend, child_backend, expected): + + # no warnings if inner_n_jobs=1 + Parallel(n_jobs=2, backend=parent_backend)( + delayed(_assert_warning_nested)( + backend=child_backend, inner_n_jobs=1, + expected=False) + for _ in range(5)) + + # warnings if inner_n_jobs != 1 and expected + res = Parallel(n_jobs=2, backend=parent_backend)( + delayed(_assert_warning_nested)( + backend=child_backend, inner_n_jobs=2, + expected=expected) + for _ in range(5)) + + # warning handling is not thread safe. One thread might see multiple + # warning or no warning at all. + if parent_backend == "threading": + if IS_PYPY and not any(res): + # Related to joblib#1426, should be removed once it is solved. + pytest.xfail(reason="This test often fails in PyPy.") + assert any(res) + else: + assert all(res) + + +@with_multiprocessing +@parametrize('backend', ['loky', 'multiprocessing', 'threading']) +def test_background_thread_parallelism(backend): + is_run_parallel = [False] + + def background_thread(is_run_parallel): + with warnings.catch_warnings(record=True) as warninfo: + Parallel(n_jobs=2)( + delayed(sleep)(.1) for _ in range(4)) + print(len(warninfo)) + is_run_parallel[0] = len(warninfo) == 0 + + t = threading.Thread(target=background_thread, args=(is_run_parallel,)) + t.start() + t.join() + assert is_run_parallel[0] + + +def nested_loop(backend): + Parallel(n_jobs=2, backend=backend)( + delayed(square)(.01) for _ in range(2)) + + +@parametrize('child_backend', BACKENDS) +@parametrize('parent_backend', BACKENDS) +def test_nested_loop(parent_backend, child_backend): + Parallel(n_jobs=2, backend=parent_backend)( + delayed(nested_loop)(child_backend) for _ in range(2)) + + +def raise_exception(backend): + raise ValueError + + +@with_multiprocessing +def test_nested_loop_with_exception_with_loky(): + with raises(ValueError): + with Parallel(n_jobs=2, backend="loky") as parallel: + parallel([delayed(nested_loop)("loky"), + delayed(raise_exception)("loky")]) + + +def test_mutate_input_with_threads(): + """Input is mutable when using the threading backend""" + q = Queue(maxsize=5) + Parallel(n_jobs=2, backend="threading")( + delayed(q.put)(1) for _ in range(5)) + assert q.full() + + +@parametrize('n_jobs', [1, 2, 3]) +def test_parallel_kwargs(n_jobs): + """Check the keyword argument processing of pmap.""" + lst = range(10) + assert ([f(x, y=1) for x in lst] == + Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)) + + +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_as_context_manager(backend): + lst = range(10) + expected = [f(x, y=1) for x in lst] + + with Parallel(n_jobs=4, backend=backend) as p: + # Internally a pool instance has been eagerly created and is managed + # via the context manager protocol + managed_backend = p._backend + + # We make call with the managed parallel object several times inside + # the managed block: + assert expected == p(delayed(f)(x, y=1) for x in lst) + assert expected == p(delayed(f)(x, y=1) for x in lst) + + # Those calls have all used the same pool instance: + if mp is not None: + assert get_workers(managed_backend) is get_workers(p._backend) + + # As soon as we exit the context manager block, the pool is terminated and + # no longer referenced from the parallel object: + if mp is not None: + assert get_workers(p._backend) is None + + # It's still possible to use the parallel instance in non-managed mode: + assert expected == p(delayed(f)(x, y=1) for x in lst) + if mp is not None: + assert get_workers(p._backend) is None + + +@with_multiprocessing +def test_parallel_pickling(): + """ Check that pmap captures the errors when it is passed an object + that cannot be pickled. + """ + class UnpicklableObject(object): + def __reduce__(self): + raise RuntimeError('123') + + with raises(PicklingError, match=r"the task to send"): + Parallel(n_jobs=2, backend='loky')(delayed(id)( + UnpicklableObject()) for _ in range(10)) + + +@with_numpy +@with_multiprocessing +@parametrize('byteorder', ['<', '>', '=']) +def test_parallel_byteorder_corruption(byteorder): + + def inspect_byteorder(x): + return x, x.dtype.byteorder + + x = np.arange(6).reshape((2, 3)).view(f'{byteorder}i4') + + initial_np_byteorder = x.dtype.byteorder + + result = Parallel(n_jobs=2, backend='loky')( + delayed(inspect_byteorder)(x) for _ in range(3) + ) + + for x_returned, byteorder_in_worker in result: + assert byteorder_in_worker == initial_np_byteorder + assert byteorder_in_worker == x_returned.dtype.byteorder + np.testing.assert_array_equal(x, x_returned) + + +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_timeout_success(backend): + # Check that timeout isn't thrown when function is fast enough + assert len(Parallel(n_jobs=2, backend=backend, timeout=30)( + delayed(sleep)(0.001) for x in range(10))) == 10 + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +def test_parallel_timeout_fail(backend): + # Check that timeout properly fails when function is too slow + with raises(TimeoutError): + Parallel(n_jobs=2, backend=backend, timeout=0.01)( + delayed(sleep)(10) for x in range(10)) + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_error_capture(backend): + # Check that error are captured, and that correct exceptions + # are raised. + if mp is not None: + with raises(ZeroDivisionError): + Parallel(n_jobs=2, backend=backend)( + [delayed(division)(x, y) + for x, y in zip((0, 1), (1, 0))]) + + with raises(KeyboardInterrupt): + Parallel(n_jobs=2, backend=backend)( + [delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # Try again with the context manager API + with Parallel(n_jobs=2, backend=backend) as parallel: + assert get_workers(parallel._backend) is not None + original_workers = get_workers(parallel._backend) + + with raises(ZeroDivisionError): + parallel([delayed(division)(x, y) + for x, y in zip((0, 1), (1, 0))]) + + # The managed pool should still be available and be in a working + # state despite the previously raised (and caught) exception + assert get_workers(parallel._backend) is not None + + # The pool should have been interrupted and restarted: + assert get_workers(parallel._backend) is not original_workers + + assert ([f(x, y=1) for x in range(10)] == + parallel(delayed(f)(x, y=1) for x in range(10))) + + original_workers = get_workers(parallel._backend) + with raises(KeyboardInterrupt): + parallel([delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # The pool should still be available despite the exception + assert get_workers(parallel._backend) is not None + + # The pool should have been interrupted and restarted: + assert get_workers(parallel._backend) is not original_workers + + assert ([f(x, y=1) for x in range(10)] == + parallel(delayed(f)(x, y=1) for x in range(10))), ( + parallel._iterating, parallel.n_completed_tasks, + parallel.n_dispatched_tasks, parallel._aborting + ) + + # Check that the inner pool has been terminated when exiting the + # context manager + assert get_workers(parallel._backend) is None + else: + with raises(KeyboardInterrupt): + Parallel(n_jobs=2)( + [delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # wrapped exceptions should inherit from the class of the original + # exception to make it easy to catch them + with raises(ZeroDivisionError): + Parallel(n_jobs=2)( + [delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))]) + + with raises(MyExceptionWithFinickyInit): + Parallel(n_jobs=2, verbose=0)( + (delayed(exception_raiser)(i, custom_exception=True) + for i in range(30))) + + +@with_multiprocessing +@parametrize('backend', BACKENDS) +def test_error_in_task_iterator(backend): + + def my_generator(raise_at=0): + for i in range(20): + if i == raise_at: + raise ValueError("Iterator Raising Error") + yield i + + with Parallel(n_jobs=2, backend=backend) as p: + # The error is raised in the pre-dispatch phase + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=0)) + + # The error is raised when dispatching a new task after the + # pre-dispatch (likely to happen in a different thread) + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=5)) + + # Same, but raises long after the pre-dispatch phase + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=19)) + + +def consumer(queue, item): + queue.append('Consumed %s' % item) + + +@parametrize('backend', BACKENDS) +@parametrize('batch_size, expected_queue', + [(1, ['Produced 0', 'Consumed 0', + 'Produced 1', 'Consumed 1', + 'Produced 2', 'Consumed 2', + 'Produced 3', 'Consumed 3', + 'Produced 4', 'Consumed 4', + 'Produced 5', 'Consumed 5']), + (4, [ # First Batch + 'Produced 0', 'Produced 1', 'Produced 2', 'Produced 3', + 'Consumed 0', 'Consumed 1', 'Consumed 2', 'Consumed 3', + # Second batch + 'Produced 4', 'Produced 5', 'Consumed 4', 'Consumed 5'])]) +def test_dispatch_one_job(backend, batch_size, expected_queue): + """ Test that with only one job, Parallel does act as a iterator. + """ + queue = list() + + def producer(): + for i in range(6): + queue.append('Produced %i' % i) + yield i + + Parallel(n_jobs=1, batch_size=batch_size, backend=backend)( + delayed(consumer)(queue, x) for x in producer()) + assert queue == expected_queue + assert len(queue) == 12 + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +def test_dispatch_multiprocessing(backend): + """ Check that using pre_dispatch Parallel does indeed dispatch items + lazily. + """ + manager = mp.Manager() + queue = manager.list() + + def producer(): + for i in range(6): + queue.append('Produced %i' % i) + yield i + + Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)( + delayed(consumer)(queue, 'any') for _ in producer()) + + queue_contents = list(queue) + assert queue_contents[0] == 'Produced 0' + + # Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only + # after any of the first 3 jobs have completed. + first_consumption_index = queue_contents[:4].index('Consumed any') + assert first_consumption_index > -1 + + produced_3_index = queue_contents.index('Produced 3') # 4th task produced + assert produced_3_index > first_consumption_index + + assert len(queue) == 12 + + +def test_batching_auto_threading(): + # batching='auto' with the threading backend leaves the effective batch + # size to 1 (no batching) as it has been found to never be beneficial with + # this low-overhead backend. + + with Parallel(n_jobs=2, batch_size='auto', backend='threading') as p: + p(delayed(id)(i) for i in range(5000)) # many very fast tasks + assert p._backend.compute_batch_size() == 1 + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_batching_auto_subprocesses(backend): + with Parallel(n_jobs=2, batch_size='auto', backend=backend) as p: + p(delayed(id)(i) for i in range(5000)) # many very fast tasks + + # It should be strictly larger than 1 but as we don't want heisen + # failures on clogged CI worker environment be safe and only check that + # it's a strictly positive number. + assert p._backend.compute_batch_size() > 0 + + +def test_exception_dispatch(): + """Make sure that exception raised during dispatch are indeed captured""" + with raises(ValueError): + Parallel(n_jobs=2, pre_dispatch=16, verbose=0)( + delayed(exception_raiser)(i) for i in range(30)) + + +def nested_function_inner(i): + Parallel(n_jobs=2)( + delayed(exception_raiser)(j) for j in range(30)) + + +def nested_function_outer(i): + Parallel(n_jobs=2)( + delayed(nested_function_inner)(j) for j in range(30)) + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +@pytest.mark.xfail(reason="https://github.com/joblib/loky/pull/255") +def test_nested_exception_dispatch(backend): + """Ensure errors for nested joblib cases gets propagated + + We rely on the Python 3 built-in __cause__ system that already + report this kind of information to the user. + """ + with raises(ValueError) as excinfo: + Parallel(n_jobs=2, backend=backend)( + delayed(nested_function_outer)(i) for i in range(30)) + + # Check that important information such as function names are visible + # in the final error message reported to the user + report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb) + report = "".join(report_lines) + assert 'nested_function_outer' in report + assert 'nested_function_inner' in report + assert 'exception_raiser' in report + + assert type(excinfo.value) is ValueError + + +class FakeParallelBackend(SequentialBackend): + """Pretends to run concurrently while running sequentially.""" + + def configure(self, n_jobs=1, parallel=None, **backend_args): + self.n_jobs = self.effective_n_jobs(n_jobs) + self.parallel = parallel + return n_jobs + + def effective_n_jobs(self, n_jobs=1): + if n_jobs < 0: + n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1) + return n_jobs + + +def test_invalid_backend(): + with raises(ValueError, match="Invalid backend:"): + Parallel(backend='unit-testing') + + with raises(ValueError, match="Invalid backend:"): + with parallel_config(backend='unit-testing'): + pass + + with raises(ValueError, match="Invalid backend:"): + with parallel_config(backend='unit-testing'): + pass + + +@parametrize('backend', ALL_VALID_BACKENDS) +def test_invalid_njobs(backend): + with raises(ValueError) as excinfo: + Parallel(n_jobs=0, backend=backend)._initialize_backend() + assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs=0.5, backend=backend)._initialize_backend() + assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs="2.3", backend=backend)._initialize_backend() + assert "n_jobs could not be converted to int" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs="invalid_str", backend=backend)._initialize_backend() + assert "n_jobs could not be converted to int" in str(excinfo.value) + + +@with_multiprocessing +@parametrize('backend', PARALLEL_BACKENDS) +@parametrize('n_jobs', ['2', 2.3, 2]) +def test_njobs_converted_to_int(backend, n_jobs): + p = Parallel(n_jobs=n_jobs, backend=backend) + assert p._effective_n_jobs() == 2 + + res = p(delayed(square)(i) for i in range(10)) + assert all(r == square(i) for i, r in enumerate(res)) + + +def test_register_parallel_backend(): + try: + register_parallel_backend("test_backend", FakeParallelBackend) + assert "test_backend" in BACKENDS + assert BACKENDS["test_backend"] == FakeParallelBackend + finally: + del BACKENDS["test_backend"] + + +def test_overwrite_default_backend(): + assert _active_backend_type() == DefaultBackend + try: + register_parallel_backend("threading", BACKENDS["threading"], + make_default=True) + assert _active_backend_type() == ThreadingBackend + finally: + # Restore the global default manually + parallel.DEFAULT_BACKEND = DEFAULT_BACKEND + assert _active_backend_type() == DefaultBackend + + +@skipif(mp is not None, reason="Only without multiprocessing") +def test_backend_no_multiprocessing(): + with warns(UserWarning, + match="joblib backend '.*' is not available on.*"): + Parallel(backend='loky')(delayed(square)(i) for i in range(3)) + + # The below should now work without problems + with parallel_config(backend='loky'): + Parallel()(delayed(square)(i) for i in range(3)) + + +def check_backend_context_manager(context, backend_name): + with context(backend_name, n_jobs=3): + active_backend, active_n_jobs = parallel.get_active_backend() + assert active_n_jobs == 3 + assert effective_n_jobs(3) == 3 + p = Parallel() + assert p.n_jobs == 3 + if backend_name == 'multiprocessing': + assert type(active_backend) is MultiprocessingBackend + assert type(p._backend) is MultiprocessingBackend + elif backend_name == 'loky': + assert type(active_backend) is LokyBackend + assert type(p._backend) is LokyBackend + elif backend_name == 'threading': + assert type(active_backend) is ThreadingBackend + assert type(p._backend) is ThreadingBackend + elif backend_name.startswith('test_'): + assert type(active_backend) is FakeParallelBackend + assert type(p._backend) is FakeParallelBackend + + +all_backends_for_context_manager = PARALLEL_BACKENDS[:] +all_backends_for_context_manager.extend( + ['test_backend_%d' % i for i in range(3)] +) + + +@with_multiprocessing +@parametrize('backend', all_backends_for_context_manager) +@parametrize('context', [parallel_backend, parallel_config]) +def test_backend_context_manager(monkeypatch, backend, context): + if backend not in BACKENDS: + monkeypatch.setitem(BACKENDS, backend, FakeParallelBackend) + + assert _active_backend_type() == DefaultBackend + # check that this possible to switch parallel backends sequentially + check_backend_context_manager(context, backend) + + # The default backend is restored + assert _active_backend_type() == DefaultBackend + + # Check that context manager switching is thread safe: + Parallel(n_jobs=2, backend='threading')( + delayed(check_backend_context_manager)(context, b) + for b in all_backends_for_context_manager if not b) + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +class ParameterizedParallelBackend(SequentialBackend): + """Pretends to run conncurrently while running sequentially.""" + + def __init__(self, param=None): + if param is None: + raise ValueError('param should not be None') + self.param = param + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parameterized_backend_context_manager(monkeypatch, context): + monkeypatch.setitem(BACKENDS, 'param_backend', + ParameterizedParallelBackend) + assert _active_backend_type() == DefaultBackend + + with context('param_backend', param=42, n_jobs=3): + active_backend, active_n_jobs = parallel.get_active_backend() + assert type(active_backend) is ParameterizedParallelBackend + assert active_backend.param == 42 + assert active_n_jobs == 3 + p = Parallel() + assert p.n_jobs == 3 + assert p._backend is active_backend + results = p(delayed(sqrt)(i) for i in range(5)) + assert results == [sqrt(i) for i in range(5)] + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_directly_parameterized_backend_context_manager(context): + assert _active_backend_type() == DefaultBackend + + # Check that it's possible to pass a backend instance directly, + # without registration + with context(ParameterizedParallelBackend(param=43), n_jobs=5): + active_backend, active_n_jobs = parallel.get_active_backend() + assert type(active_backend) is ParameterizedParallelBackend + assert active_backend.param == 43 + assert active_n_jobs == 5 + p = Parallel() + assert p.n_jobs == 5 + assert p._backend is active_backend + results = p(delayed(sqrt)(i) for i in range(5)) + assert results == [sqrt(i) for i in range(5)] + + # The default backend is again restored + assert _active_backend_type() == DefaultBackend + + +def sleep_and_return_pid(): + sleep(.1) + return os.getpid() + + +def get_nested_pids(): + assert _active_backend_type() == ThreadingBackend + # Assert that the nested backend does not change the default number of + # jobs used in Parallel + assert Parallel()._effective_n_jobs() == 1 + + # Assert that the tasks are running only on one process + return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)() + for _ in range(2)) + + +class MyBackend(joblib._parallel_backends.LokyBackend): + """Backend to test backward compatibility with older backends""" + def get_nested_backend(self, ): + # Older backends only return a backend, without n_jobs indications. + return super(MyBackend, self).get_nested_backend()[0] + + +register_parallel_backend('back_compat_backend', MyBackend) + + +@with_multiprocessing +@parametrize('backend', ['threading', 'loky', 'multiprocessing', + 'back_compat_backend']) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_backend_context_manager(context, backend): + # Check that by default, nested parallel calls will always use the + # ThreadingBackend + + with context(backend): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) == 1 + + +@with_multiprocessing +@parametrize('n_jobs', [2, -1, None]) +@parametrize('backend', PARALLEL_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_backend_in_sequential(backend, n_jobs, context): + # Check that by default, nested parallel calls will always use the + # ThreadingBackend + + def check_nested_backend(expected_backend_type, expected_n_job): + # Assert that the sequential backend at top level, does not change the + # backend for nested calls. + assert _active_backend_type() == BACKENDS[expected_backend_type] + + # Assert that the nested backend in SequentialBackend does not change + # the default number of jobs used in Parallel + expected_n_job = effective_n_jobs(expected_n_job) + assert Parallel()._effective_n_jobs() == expected_n_job + + Parallel(n_jobs=1)( + delayed(check_nested_backend)(DEFAULT_BACKEND, 1) + for _ in range(10) + ) + + with context(backend, n_jobs=n_jobs): + Parallel(n_jobs=1)( + delayed(check_nested_backend)(backend, n_jobs) + for _ in range(10) + ) + + +def check_nesting_level(context, inner_backend, expected_level): + with context(inner_backend) as ctx: + if context is parallel_config: + backend = ctx["backend"] + if context is parallel_backend: + backend = ctx[0] + assert backend.nesting_level == expected_level + + +@with_multiprocessing +@parametrize('outer_backend', PARALLEL_BACKENDS) +@parametrize('inner_backend', PARALLEL_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_nesting_level(context, outer_backend, inner_backend): + # Check that the nesting level for the backend is correctly set + check_nesting_level(context, outer_backend, 0) + + Parallel(n_jobs=2, backend=outer_backend)( + delayed(check_nesting_level)(context, inner_backend, 1) + for _ in range(10) + ) + + with context(inner_backend, n_jobs=2): + Parallel()(delayed(check_nesting_level)(context, inner_backend, 1) + for _ in range(10)) + + +@with_multiprocessing +@parametrize("context", [parallel_config, parallel_backend]) +@parametrize('with_retrieve_callback', [True, False]) +def test_retrieval_context(context, with_retrieve_callback): + import contextlib + + class MyBackend(ThreadingBackend): + i = 0 + supports_retrieve_callback = with_retrieve_callback + + @contextlib.contextmanager + def retrieval_context(self): + self.i += 1 + yield + + register_parallel_backend("retrieval", MyBackend) + + def nested_call(n): + return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n)) + + with context("retrieval") as ctx: + Parallel(n_jobs=2)( + delayed(nested_call)(i) + for i in range(5) + ) + if context is parallel_config: + assert ctx["backend"].i == 1 + if context is parallel_backend: + assert ctx[0].i == 1 + + +############################################################################### +# Test helpers + +@parametrize('batch_size', [0, -1, 1.42]) +def test_invalid_batch_size(batch_size): + with raises(ValueError): + Parallel(batch_size=batch_size) + + +@parametrize('n_tasks, n_jobs, pre_dispatch, batch_size', + [(2, 2, 'all', 'auto'), + (2, 2, 'n_jobs', 'auto'), + (10, 2, 'n_jobs', 'auto'), + (517, 2, 'n_jobs', 'auto'), + (10, 2, 'n_jobs', 'auto'), + (10, 4, 'n_jobs', 'auto'), + (200, 12, 'n_jobs', 'auto'), + (25, 12, '2 * n_jobs', 1), + (250, 12, 'all', 1), + (250, 12, '2 * n_jobs', 7), + (200, 12, '2 * n_jobs', 'auto')]) +def test_dispatch_race_condition(n_tasks, n_jobs, pre_dispatch, batch_size): + # Check that using (async-)dispatch does not yield a race condition on the + # iterable generator that is not thread-safe natively. + # This is a non-regression test for the "Pool seems closed" class of error + params = {'n_jobs': n_jobs, 'pre_dispatch': pre_dispatch, + 'batch_size': batch_size} + expected = [square(i) for i in range(n_tasks)] + results = Parallel(**params)(delayed(square)(i) for i in range(n_tasks)) + assert results == expected + + +@with_multiprocessing +def test_default_mp_context(): + mp_start_method = mp.get_start_method() + p = Parallel(n_jobs=2, backend='multiprocessing') + context = p._backend_args.get('context') + start_method = context.get_start_method() + assert start_method == mp_start_method + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_no_blas_crash_or_freeze_with_subprocesses(backend): + if backend == 'multiprocessing': + # Use the spawn backend that is both robust and available on all + # platforms + backend = mp.get_context('spawn') + + # Check that on recent Python version, the 'spawn' start method can make + # it possible to use multiprocessing in conjunction of any BLAS + # implementation that happens to be used by numpy with causing a freeze or + # a crash + rng = np.random.RandomState(42) + + # call BLAS DGEMM to force the initialization of the internal thread-pool + # in the main process + a = rng.randn(1000, 1000) + np.dot(a, a.T) + + # check that the internal BLAS thread-pool is not in an inconsistent state + # in the worker processes managed by multiprocessing + Parallel(n_jobs=2, backend=backend)( + delayed(np.dot)(a, a.T) for i in range(2)) + + +UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN = """\ +from joblib import Parallel, delayed + +def square(x): + return x ** 2 + +backend = "{}" +if backend == "spawn": + from multiprocessing import get_context + backend = get_context(backend) + +print(Parallel(n_jobs=2, backend=backend)( + delayed(square)(i) for i in range(5))) +""" + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_parallel_with_interactively_defined_functions(backend): + # When using the "-c" flag, interactive functions defined in __main__ + # should work with any backend. + if backend == "multiprocessing" and mp.get_start_method() != "fork": + pytest.skip("Require fork start method to use interactively defined " + "functions with multiprocessing.") + code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN.format(backend) + check_subprocess_call( + [sys.executable, '-c', code], timeout=10, + stdout_regex=r'\[0, 1, 4, 9, 16\]') + + +UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN = """\ +import sys +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed + +def run(f, x): + return f(x) + +{define_func} + +if __name__ == "__main__": + backend = "{backend}" + if backend == "spawn": + from multiprocessing import get_context + backend = get_context(backend) + + callable_position = "{callable_position}" + if callable_position == "delayed": + print(Parallel(n_jobs=2, backend=backend)( + delayed(square)(i) for i in range(5))) + elif callable_position == "args": + print(Parallel(n_jobs=2, backend=backend)( + delayed(run)(square, i) for i in range(5))) + else: + print(Parallel(n_jobs=2, backend=backend)( + delayed(run)(f=square, x=i) for i in range(5))) +""" + +SQUARE_MAIN = """\ +def square(x): + return x ** 2 +""" +SQUARE_LOCAL = """\ +def gen_square(): + def square(x): + return x ** 2 + return square +square = gen_square() +""" +SQUARE_LAMBDA = """\ +square = lambda x: x ** 2 +""" + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS + ([] if mp is None else ['spawn'])) +@parametrize('define_func', [SQUARE_MAIN, SQUARE_LOCAL, SQUARE_LAMBDA]) +@parametrize('callable_position', ['delayed', 'args', 'kwargs']) +def test_parallel_with_unpicklable_functions_in_args( + backend, define_func, callable_position, tmpdir): + if backend in ['multiprocessing', 'spawn'] and ( + define_func != SQUARE_MAIN or sys.platform == "win32"): + pytest.skip("Not picklable with pickle") + code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN.format( + define_func=define_func, backend=backend, + callable_position=callable_position, + joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__))) + code_file = tmpdir.join("unpicklable_func_script.py") + code_file.write(code) + check_subprocess_call( + [sys.executable, code_file.strpath], timeout=10, + stdout_regex=r'\[0, 1, 4, 9, 16\]') + + +INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT = """\ +import sys +import faulthandler +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed +from functools import partial + +class MyClass: + '''Class defined in the __main__ namespace''' + def __init__(self, value): + self.value = value + + +def square(x, ignored=None, ignored2=None): + '''Function defined in the __main__ namespace''' + return x.value ** 2 + + +square2 = partial(square, ignored2='something') + +# Here, we do not need the `if __name__ == "__main__":` safeguard when +# using the default `loky` backend (even on Windows). + +# To make debugging easier +faulthandler.dump_traceback_later(30, exit=True) + +# The following baroque function call is meant to check that joblib +# introspection rightfully uses cloudpickle instead of the (faster) pickle +# module of the standard library when necessary. In particular cloudpickle is +# necessary for functions and instances of classes interactively defined in the +# __main__ module. + +print(Parallel(backend="loky", n_jobs=2)( + delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))]) + for i in range(5) +)) +""".format(joblib_root_folder=os.path.dirname( + os.path.dirname(joblib.__file__))) + + +@with_multiprocessing +def test_parallel_with_interactively_defined_functions_loky(tmpdir): + # loky accepts interactive functions defined in __main__ and does not + # require if __name__ == '__main__' even when the __main__ module is + # defined by the result of the execution of a filesystem script. + script = tmpdir.join('joblib_interactively_defined_function.py') + script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT) + check_subprocess_call( + [sys.executable, script.strpath], + stdout_regex=r'\[0, 1, 4, 9, 16\]', + timeout=None, # rely on faulthandler to kill the process + ) + + +INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\ +import sys +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed, hash +import multiprocessing as mp +mp.util.log_to_stderr(5) + +class MyList(list): + '''MyList is interactively defined by MyList.append is a built-in''' + def __hash__(self): + # XXX: workaround limitation in cloudpickle + return hash(self).__hash__() + +l = MyList() + +print(Parallel(backend="loky", n_jobs=2)( + delayed(l.append)(i) for i in range(3) +)) +""".format(joblib_root_folder=os.path.dirname( + os.path.dirname(joblib.__file__))) + + +@with_multiprocessing +def test_parallel_with_interactively_defined_bound_method_loky(tmpdir): + script = tmpdir.join('joblib_interactive_bound_method_script.py') + script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT) + check_subprocess_call([sys.executable, script.strpath], + stdout_regex=r'\[None, None, None\]', + stderr_regex=r'LokyProcess', + timeout=15) + + +def test_parallel_with_exhausted_iterator(): + exhausted_iterator = iter([]) + assert Parallel(n_jobs=2)(exhausted_iterator) == [] + + +def _cleanup_worker(): + """Helper function to force gc in each worker.""" + force_gc_pypy() + time.sleep(.1) + + +def check_memmap(a): + if not isinstance(a, np.memmap): + raise TypeError('Expected np.memmap instance, got %r', + type(a)) + return a.copy() # return a regular array instead of a memmap + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_auto_memmap_on_arrays_from_generator(backend): + # Non-regression test for a problem with a bad interaction between the + # GC collecting arrays recently created during iteration inside the + # parallel dispatch loop and the auto-memmap feature of Parallel. + # See: https://github.com/joblib/joblib/pull/294 + def generate_arrays(n): + for i in range(n): + yield np.ones(10, dtype=np.float32) * i + # Use max_nbytes=1 to force the use of memory-mapping even for small + # arrays + results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)( + delayed(check_memmap)(a) for a in generate_arrays(100)) + for result, expected in zip(results, generate_arrays(len(results))): + np.testing.assert_array_equal(expected, result) + + # Second call to force loky to adapt the executor by growing the number + # of worker processes. This is a non-regression test for: + # https://github.com/joblib/joblib/issues/629. + results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)( + delayed(check_memmap)(a) for a in generate_arrays(100)) + for result, expected in zip(results, generate_arrays(len(results))): + np.testing.assert_array_equal(expected, result) + + +def identity(arg): + return arg + + +@with_numpy +@with_multiprocessing +def test_memmap_with_big_offset(tmpdir): + fname = tmpdir.join('test.mmap').strpath + size = mmap.ALLOCATIONGRANULARITY + obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')] + dump(obj, fname) + memmap = load(fname, mmap_mode='r') + result, = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0]) + assert isinstance(memmap[1], np.memmap) + assert memmap[1].offset > size + np.testing.assert_array_equal(obj, result) + + +def test_warning_about_timeout_not_supported_by_backend(): + with warnings.catch_warnings(record=True) as warninfo: + Parallel(n_jobs=1, timeout=1)(delayed(square)(i) for i in range(50)) + assert len(warninfo) == 1 + w = warninfo[0] + assert isinstance(w.message, UserWarning) + assert str(w.message) == ( + "The backend class 'SequentialBackend' does not support timeout. " + "You have set 'timeout=1' in Parallel but the 'timeout' parameter " + "will not be used.") + + +def set_list_value(input_list, index, value): + input_list[index] = value + return value + + +@pytest.mark.parametrize('n_jobs', [1, 2, 4]) +def test_parallel_return_order_with_return_as_generator_parameter(n_jobs): + # This test inserts values in a list in some expected order + # in sequential computing, and then checks that this order has been + # respected by Parallel output generator. + input_list = [0] * 5 + result = Parallel(n_jobs=n_jobs, return_as="generator", + backend='threading')( + delayed(set_list_value)(input_list, i, i) for i in range(5)) + + # Ensure that all the tasks are completed before checking the result + result = list(result) + + assert all(v == r for v, r in zip(input_list, result)) + + +def _sqrt_with_delay(e, delay): + if delay: + sleep(30) + return sqrt(e) + + +def _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs): + # This test submits 10 tasks, but the second task is super slow. This test + # checks that the 9 other tasks return before the slow task is done, when + # `return_as` parameter is set to `'generator_unordered'` + result = Parallel(n_jobs=n_jobs, return_as="generator_unordered", + backend=backend)( + delayed(_sqrt_with_delay)(i**2, (i == 1)) for i in range(10)) + + quickly_returned = sorted(next(result) for _ in range(9)) + + expected_quickly_returned = [0] + list(range(2, 10)) + + assert all( + v == r for v, r in zip(expected_quickly_returned, quickly_returned) + ) + + del result + force_gc_pypy() + + +@pytest.mark.parametrize('n_jobs', [2, 4]) +# NB: for this test to work, the backend must be allowed to process tasks +# concurrently, so at least two jobs with a non-sequential backend are +# mandatory. +@with_multiprocessing +@parametrize('backend', set(RETURN_GENERATOR_BACKENDS) - {"sequential"}) +def test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs): + _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs) + + +@pytest.mark.parametrize('n_jobs', [2, -1]) +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_parallel_unordered_generator_returns_fastest_first_with_dask( + n_jobs, context +): + with distributed.Client( + n_workers=2, threads_per_worker=2 + ), context("dask"): + _test_parallel_unordered_generator_returns_fastest_first(None, n_jobs) + + +@parametrize('backend', ALL_VALID_BACKENDS) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_abort_backend(n_jobs, backend): + delays = ["a"] + [10] * 100 + with raises(TypeError): + t_start = time.time() + Parallel(n_jobs=n_jobs, backend=backend)( + delayed(time.sleep)(i) for i in delays) + dt = time.time() - t_start + assert dt < 20 + + +def get_large_object(arg): + result = np.ones(int(5 * 1e5), dtype=bool) + result[0] = False + return result + + +def _test_deadlock_with_generator(backend, return_as, n_jobs): + # Non-regression test for a race condition in the backends when the pickler + # is delayed by a large object. + with Parallel(n_jobs=n_jobs, backend=backend, + return_as=return_as) as parallel: + result = parallel(delayed(get_large_object)(i) for i in range(10)) + next(result) + next(result) + del result + # The gc in pypy can be delayed. Force it to make sure this test does + # not cause timeout on the CI. + force_gc_pypy() + + +@with_numpy +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_deadlock_with_generator(backend, return_as, n_jobs): + _test_deadlock_with_generator(backend, return_as, n_jobs) + + +@with_numpy +@pytest.mark.parametrize('n_jobs', [2, -1]) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_deadlock_with_generator_and_dask(context, return_as, n_jobs): + with distributed.Client( + n_workers=2, threads_per_worker=2 + ), context("dask"): + _test_deadlock_with_generator(None, return_as, n_jobs) + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call(backend, return_as, n_jobs): + # Non-regression test that ensures the dispatch of the tasks starts + # immediately when Parallel.__call__ is called. This test relies on the + # assumption that only one generator can be submitted at a time. + with raises(RuntimeError, + match="This Parallel instance is already running"): + parallel = Parallel(n_jobs, backend=backend, return_as=return_as) + g = parallel(delayed(sleep)(1) for _ in range(10)) # noqa: F841 + t_start = time.time() + gen2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841 + + # Make sure that the error is raised quickly + assert time.time() - t_start < 2, ( + "The error should be raised immediatly when submitting a new task " + "but it took more than 2s." + ) + + del g + # The gc in pypy can be delayed. Force it to make sure this test does not + # cause timeout on the CI. + force_gc_pypy() + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call_managed(backend, return_as, n_jobs): + # Non-regression test that ensures the dispatch of the tasks starts + # immediately when Parallel.__call__ is called. This test relies on the + # assumption that only one generator can be submitted at a time. + with Parallel(n_jobs, backend=backend, + return_as=return_as) as parallel: + g = parallel(delayed(sleep)(10) for _ in range(10)) # noqa: F841 + t_start = time.time() + with raises(RuntimeError, + match="This Parallel instance is already running"): + g2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841 + + # Make sure that the error is raised quickly + assert time.time() - t_start < 2, ( + "The error should be raised immediatly when submitting a new task " + "but it took more than 2s." + ) + + # The gc in pypy can be delayed. Force it to make sure this test does not + # cause timeout on the CI. + del g + force_gc_pypy() + + +@parametrize('backend', RETURN_GENERATOR_BACKENDS) +@parametrize('return_as_1', ["generator", "generator_unordered"]) +@parametrize('return_as_2', ["generator", "generator_unordered"]) +@parametrize('n_jobs', [1, 2, -2, -1]) +def test_multiple_generator_call_separated( + backend, return_as_1, return_as_2, n_jobs +): + # Check that for separated Parallel, both tasks are correctly returned. + g = Parallel(n_jobs, backend=backend, return_as=return_as_1)( + delayed(sqrt)(i ** 2) for i in range(10) + ) + g2 = Parallel(n_jobs, backend=backend, return_as=return_as_2)( + delayed(sqrt)(i ** 2) for i in range(10, 20) + ) + + if return_as_1 == "generator_unordered": + g = sorted(g) + + if return_as_2 == "generator_unordered": + g2 = sorted(g2) + + assert all(res == i for res, i in zip(g, range(10))) + assert all(res == i for res, i in zip(g2, range(10, 20))) + + +@parametrize('backend, error', [ + ('loky', True), + ('threading', False), + ('sequential', False), +]) +@parametrize('return_as_1', ["generator", "generator_unordered"]) +@parametrize('return_as_2', ["generator", "generator_unordered"]) +def test_multiple_generator_call_separated_gc( + backend, return_as_1, return_as_2, error +): + + if (backend == 'loky') and (mp is None): + pytest.skip("Requires multiprocessing") + + # Check that in loky, only one call can be run at a time with + # a single executor. + parallel = Parallel(2, backend=backend, return_as=return_as_1) + g = parallel(delayed(sleep)(10) for i in range(10)) + g_wr = weakref.finalize(g, lambda: print("Generator collected")) + ctx = ( + raises(RuntimeError, match="The executor underlying Parallel") + if error else nullcontext() + ) + with ctx: + # For loky, this call will raise an error as the gc of the previous + # generator will shutdown the shared executor. + # For the other backends, as the worker pools are not shared between + # the two calls, this should proceed correctly. + t_start = time.time() + g = Parallel(2, backend=backend, return_as=return_as_2)( + delayed(sqrt)(i ** 2) for i in range(10, 20) + ) + + # The gc in pypy can be delayed. Force it to test the behavior when it + # will eventually be collected. + force_gc_pypy() + + if return_as_2 == "generator_unordered": + g = sorted(g) + + assert all(res == i for res, i in zip(g, range(10, 20))) + + assert time.time() - t_start < 5 + + # Make sure that the computation are stopped for the gc'ed generator + retry = 0 + while g_wr.alive and retry < 3: + retry += 1 + time.sleep(.5) + assert time.time() - t_start < 5 + + if parallel._effective_n_jobs() != 1: + # check that the first parallel object is aborting (the final _aborted + # state might be delayed). + assert parallel._aborting + + +@with_numpy +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_memmapping_leaks(backend, tmpdir): + # Non-regression test for memmapping backends. Ensure that the data + # does not stay too long in memory + tmpdir = tmpdir.strpath + + # Use max_nbytes=1 to force the use of memory-mapping even for small + # arrays + with Parallel(n_jobs=2, max_nbytes=1, backend=backend, + temp_folder=tmpdir) as p: + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + # The memmap folder should not be clean in the context scope + assert len(os.listdir(tmpdir)) > 0 + + # Cleaning of the memmap folder is triggered by the garbage + # collection. With pypy the garbage collection has been observed to be + # delayed, sometimes up until the shutdown of the interpreter. This + # cleanup job executed in the worker ensures that it's triggered + # immediately. + p(delayed(_cleanup_worker)() for _ in range(2)) + + # Make sure that the shared memory is cleaned at the end when we exit + # the context + for _ in range(100): + if not os.listdir(tmpdir): + break + sleep(.1) + else: + raise AssertionError('temporary directory of Parallel was not removed') + + # Make sure that the shared memory is cleaned at the end of a call + p = Parallel(n_jobs=2, max_nbytes=1, backend=backend) + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + p(delayed(_cleanup_worker)() for _ in range(2)) + + for _ in range(100): + if not os.listdir(tmpdir): + break + sleep(.1) + else: + raise AssertionError('temporary directory of Parallel was not removed') + + +@parametrize('backend', + ([None, 'threading'] if mp is None + else [None, 'loky', 'threading']) + ) +def test_lambda_expression(backend): + # cloudpickle is used to pickle delayed callables + results = Parallel(n_jobs=2, backend=backend)( + delayed(lambda x: x ** 2)(i) for i in range(10)) + assert results == [i ** 2 for i in range(10)] + + +@with_multiprocessing +@parametrize('backend', PROCESS_BACKENDS) +def test_backend_batch_statistics_reset(backend): + """Test that a parallel backend correctly resets its batch statistics.""" + n_jobs = 2 + n_inputs = 500 + task_time = 2. / n_inputs + + p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend) + p(delayed(time.sleep)(task_time) for i in range(n_inputs)) + assert (p._backend._effective_batch_size == + p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE) + assert (p._backend._smoothed_batch_duration == + p._backend._DEFAULT_SMOOTHED_BATCH_DURATION) + + p(delayed(time.sleep)(task_time) for i in range(n_inputs)) + assert (p._backend._effective_batch_size == + p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE) + assert (p._backend._smoothed_batch_duration == + p._backend._DEFAULT_SMOOTHED_BATCH_DURATION) + + +@with_multiprocessing +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_hinting_and_constraints(context): + for n_jobs in [1, 2, -1]: + assert type(Parallel(n_jobs=n_jobs)._backend) == DefaultBackend + + p = Parallel(n_jobs=n_jobs, prefer='threads') + assert type(p._backend) is ThreadingBackend + + p = Parallel(n_jobs=n_jobs, prefer='processes') + assert type(p._backend) is DefaultBackend + + p = Parallel(n_jobs=n_jobs, require='sharedmem') + assert type(p._backend) is ThreadingBackend + + # Explicit backend selection can override backend hinting although it + # is useless to pass a hint when selecting a backend. + p = Parallel(n_jobs=2, backend='loky', prefer='threads') + assert type(p._backend) is LokyBackend + + with context('loky', n_jobs=2): + # Explicit backend selection by the user with the context manager + # should be respected when combined with backend hints only. + p = Parallel(prefer='threads') + assert type(p._backend) is LokyBackend + assert p.n_jobs == 2 + + with context('loky', n_jobs=2): + # Locally hard-coded n_jobs value is respected. + p = Parallel(n_jobs=3, prefer='threads') + assert type(p._backend) is LokyBackend + assert p.n_jobs == 3 + + with context('loky', n_jobs=2): + # Explicit backend selection by the user with the context manager + # should be ignored when the Parallel call has hard constraints. + # In this case, the default backend that supports shared mem is + # used an the default number of processes is used. + p = Parallel(require='sharedmem') + assert type(p._backend) is ThreadingBackend + assert p.n_jobs == 1 + + with context('loky', n_jobs=2): + p = Parallel(n_jobs=3, require='sharedmem') + assert type(p._backend) is ThreadingBackend + assert p.n_jobs == 3 + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_hinting_and_constraints_with_custom_backends( + capsys, context +): + # Custom backends can declare that they use threads and have shared memory + # semantics: + class MyCustomThreadingBackend(ParallelBackendBase): + supports_sharedmem = True + use_threads = True + + def apply_async(self): + pass + + def effective_n_jobs(self, n_jobs): + return n_jobs + + with context(MyCustomThreadingBackend()): + p = Parallel(n_jobs=2, prefer='processes') # ignored + assert type(p._backend) is MyCustomThreadingBackend + + p = Parallel(n_jobs=2, require='sharedmem') + assert type(p._backend) is MyCustomThreadingBackend + + class MyCustomProcessingBackend(ParallelBackendBase): + supports_sharedmem = False + use_threads = False + + def apply_async(self): + pass + + def effective_n_jobs(self, n_jobs): + return n_jobs + + with context(MyCustomProcessingBackend()): + p = Parallel(n_jobs=2, prefer='processes') + assert type(p._backend) is MyCustomProcessingBackend + + out, err = capsys.readouterr() + assert out == "" + assert err == "" + + p = Parallel(n_jobs=2, require='sharedmem', verbose=10) + assert type(p._backend) is ThreadingBackend + + out, err = capsys.readouterr() + expected = ("Using ThreadingBackend as joblib backend " + "instead of MyCustomProcessingBackend as the latter " + "does not provide shared memory semantics.") + assert out.strip() == expected + assert err == "" + + with raises(ValueError): + Parallel(backend=MyCustomProcessingBackend(), require='sharedmem') + + +def test_invalid_backend_hinting_and_constraints(): + with raises(ValueError): + Parallel(prefer='invalid') + + with raises(ValueError): + Parallel(require='invalid') + + with raises(ValueError): + # It is inconsistent to prefer process-based parallelism while + # requiring shared memory semantics. + Parallel(prefer='processes', require='sharedmem') + + if mp is not None: + # It is inconsistent to ask explicitly for a process-based + # parallelism while requiring shared memory semantics. + with raises(ValueError): + Parallel(backend='loky', require='sharedmem') + with raises(ValueError): + Parallel(backend='multiprocessing', require='sharedmem') + + +def _recursive_backend_info(limit=3, **kwargs): + """Perform nested parallel calls and introspect the backend on the way""" + + with Parallel(n_jobs=2) as p: + this_level = [(type(p._backend).__name__, p._backend.nesting_level)] + if limit == 0: + return this_level + results = p(delayed(_recursive_backend_info)(limit=limit - 1, **kwargs) + for i in range(1)) + return this_level + results[0] + + +@with_multiprocessing +@parametrize('backend', ['loky', 'threading']) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_parallelism_limit(context, backend): + with context(backend, n_jobs=2): + backend_types_and_levels = _recursive_backend_info() + + if cpu_count() == 1: + second_level_backend_type = 'SequentialBackend' + max_level = 1 + else: + second_level_backend_type = 'ThreadingBackend' + max_level = 2 + + top_level_backend_type = backend.title() + 'Backend' + expected_types_and_levels = [ + (top_level_backend_type, 0), + (second_level_backend_type, 1), + ('SequentialBackend', max_level), + ('SequentialBackend', max_level) + ] + assert backend_types_and_levels == expected_types_and_levels + + +@with_numpy +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is None, reason='This test requires dask') +def test_nested_parallelism_with_dask(context): + with distributed.Client(n_workers=2, threads_per_worker=2): + # 10 MB of data as argument to trigger implicit scattering + data = np.ones(int(1e7), dtype=np.uint8) + for i in range(2): + with context('dask'): + backend_types_and_levels = _recursive_backend_info(data=data) + assert len(backend_types_and_levels) == 4 + assert all(name == 'DaskDistributedBackend' + for name, _ in backend_types_and_levels) + + # No argument + with context('dask'): + backend_types_and_levels = _recursive_backend_info() + assert len(backend_types_and_levels) == 4 + assert all(name == 'DaskDistributedBackend' + for name, _ in backend_types_and_levels) + + +def _recursive_parallel(nesting_limit=None): + """A horrible function that does recursive parallel calls""" + return Parallel()(delayed(_recursive_parallel)() for i in range(2)) + + +@pytest.mark.no_cover +@parametrize("context", [parallel_config, parallel_backend]) +@parametrize( + 'backend', (['threading'] if mp is None else ['loky', 'threading']) +) +def test_thread_bomb_mitigation(context, backend): + # Test that recursive parallelism raises a recursion rather than + # saturating the operating system resources by creating a unbounded number + # of threads. + with context(backend, n_jobs=2): + with raises(BaseException) as excinfo: + _recursive_parallel() + exc = excinfo.value + if backend == "loky": + # Local import because loky may not be importable for lack of + # multiprocessing + from joblib.externals.loky.process_executor import TerminatedWorkerError # noqa + if isinstance(exc, (TerminatedWorkerError, PicklingError)): + # The recursion exception can itself cause an error when + # pickling it to be send back to the parent process. In this + # case the worker crashes but the original traceback is still + # printed on stderr. This could be improved but does not seem + # simple to do and this is not critical for users (as long + # as there is no process or thread bomb happening). + pytest.xfail("Loky worker crash when serializing RecursionError") + + assert isinstance(exc, RecursionError) + + +def _run_parallel_sum(): + env_vars = {} + for var in ['OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS', + 'VECLIB_MAXIMUM_THREADS', 'NUMEXPR_NUM_THREADS', + 'NUMBA_NUM_THREADS', 'ENABLE_IPC']: + env_vars[var] = os.environ.get(var) + return env_vars, parallel_sum(100) + + +@parametrize("backend", ([None, 'loky'] if mp is not None else [None])) +@skipif(parallel_sum is None, reason="Need OpenMP helper compiled") +def test_parallel_thread_limit(backend): + results = Parallel(n_jobs=2, backend=backend)( + delayed(_run_parallel_sum)() for _ in range(2) + ) + expected_num_threads = max(cpu_count() // 2, 1) + for worker_env_vars, omp_num_threads in results: + assert omp_num_threads == expected_num_threads + for name, value in worker_env_vars.items(): + if name.endswith("_THREADS"): + assert value == str(expected_num_threads) + else: + assert name == "ENABLE_IPC" + assert value == "1" + + +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is not None, reason='This test requires dask') +def test_dask_backend_when_dask_not_installed(context): + with raises(ValueError, match='Please install dask'): + context('dask') + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_zero_worker_backend(context): + # joblib.Parallel should reject with an explicit error message parallel + # backends that have no worker. + class ZeroWorkerBackend(ThreadingBackend): + def configure(self, *args, **kwargs): + return 0 + + def apply_async(self, func, callback=None): # pragma: no cover + raise TimeoutError("No worker available") + + def effective_n_jobs(self, n_jobs): # pragma: no cover + return 0 + + expected_msg = "ZeroWorkerBackend has no active worker" + with context(ZeroWorkerBackend()): + with pytest.raises(RuntimeError, match=expected_msg): + Parallel(n_jobs=2)(delayed(id)(i) for i in range(2)) + + +def test_globals_update_at_each_parallel_call(): + # This is a non-regression test related to joblib issues #836 and #833. + # Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global + # variables changes in a parent process between two calls to + # joblib.Parallel would not be propagated into the workers. + global MY_GLOBAL_VARIABLE + MY_GLOBAL_VARIABLE = "original value" + + def check_globals(): + global MY_GLOBAL_VARIABLE + return MY_GLOBAL_VARIABLE + + assert check_globals() == "original value" + + workers_global_variable = Parallel(n_jobs=2)( + delayed(check_globals)() for i in range(2)) + assert set(workers_global_variable) == {"original value"} + + # Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets + # propagated into the workers environment + MY_GLOBAL_VARIABLE = "changed value" + assert check_globals() == "changed value" + + workers_global_variable = Parallel(n_jobs=2)( + delayed(check_globals)() for i in range(2)) + assert set(workers_global_variable) == {"changed value"} + + +############################################################################## +# Test environment variable in child env, in particular for limiting +# the maximal number of threads in C-library threadpools. +# + +def _check_numpy_threadpool_limits(): + import numpy as np + # Let's call BLAS on a Matrix Matrix multiplication with dimensions large + # enough to ensure that the threadpool managed by the underlying BLAS + # implementation is actually used so as to force its initialization. + a = np.random.randn(100, 100) + np.dot(a, a) + from threadpoolctl import threadpool_info + return threadpool_info() + + +def _parent_max_num_threads_for(child_module, parent_info): + for parent_module in parent_info: + if parent_module['filepath'] == child_module['filepath']: + return parent_module['num_threads'] + raise ValueError("An unexpected module was loaded in child:\n{}" + .format(child_module)) + + +def check_child_num_threads(workers_info, parent_info, num_threads): + # Check that the number of threads reported in workers_info is consistent + # with the expectation. We need to be careful to handle the cases where + # the requested number of threads is below max_num_thread for the library. + for child_threadpool_info in workers_info: + for child_module in child_threadpool_info: + parent_max_num_threads = _parent_max_num_threads_for( + child_module, parent_info) + expected = {min(num_threads, parent_max_num_threads), num_threads} + assert child_module['num_threads'] in expected + + +@with_numpy +@with_multiprocessing +@parametrize('n_jobs', [2, 4, -2, -1]) +def test_threadpool_limitation_in_child_loky(n_jobs): + # Check that the protection against oversubscription in workers is working + # using threadpoolctl functionalities. + + # Skip this test if numpy is not linked to a BLAS library + parent_info = _check_numpy_threadpool_limits() + if len(parent_info) == 0: + pytest.skip(reason="Need a version of numpy linked to BLAS") + + workers_threadpool_infos = Parallel(backend="loky", n_jobs=n_jobs)( + delayed(_check_numpy_threadpool_limits)() for i in range(2)) + + n_jobs = effective_n_jobs(n_jobs) + expected_child_num_threads = max(cpu_count() // n_jobs, 1) + + check_child_num_threads(workers_threadpool_infos, parent_info, + expected_child_num_threads) + + +@with_numpy +@with_multiprocessing +@parametrize('inner_max_num_threads', [1, 2, 4, None]) +@parametrize('n_jobs', [2, -1]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_context( + context, n_jobs, inner_max_num_threads +): + # Check that the protection against oversubscription in workers is working + # using threadpoolctl functionalities. + + # Skip this test if numpy is not linked to a BLAS library + parent_info = _check_numpy_threadpool_limits() + if len(parent_info) == 0: + pytest.skip(reason="Need a version of numpy linked to BLAS") + + with context('loky', inner_max_num_threads=inner_max_num_threads): + workers_threadpool_infos = Parallel(n_jobs=n_jobs)( + delayed(_check_numpy_threadpool_limits)() for i in range(2)) + + n_jobs = effective_n_jobs(n_jobs) + if inner_max_num_threads is None: + expected_child_num_threads = max(cpu_count() // n_jobs, 1) + else: + expected_child_num_threads = inner_max_num_threads + + check_child_num_threads(workers_threadpool_infos, parent_info, + expected_child_num_threads) + + +@with_multiprocessing +@parametrize('n_jobs', [2, -1]) +@parametrize('var_name', ["OPENBLAS_NUM_THREADS", + "MKL_NUM_THREADS", + "OMP_NUM_THREADS"]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_override(context, n_jobs, var_name): + # Check that environment variables set by the user on the main process + # always have the priority. + + # Clean up the existing executor because we change the environment of the + # parent at runtime and it is not detected in loky intentionally. + get_reusable_executor(reuse=True).shutdown() + + def _get_env(var_name): + return os.environ.get(var_name) + + original_var_value = os.environ.get(var_name) + try: + os.environ[var_name] = "4" + # Skip this test if numpy is not linked to a BLAS library + results = Parallel(n_jobs=n_jobs)( + delayed(_get_env)(var_name) for i in range(2)) + assert results == ["4", "4"] + + with context('loky', inner_max_num_threads=1): + results = Parallel(n_jobs=n_jobs)( + delayed(_get_env)(var_name) for i in range(2)) + assert results == ["1", "1"] + + finally: + if original_var_value is None: + del os.environ[var_name] + else: + os.environ[var_name] = original_var_value + + +@with_multiprocessing +@parametrize('n_jobs', [2, 4, -1]) +def test_loky_reuse_workers(n_jobs): + # Non-regression test for issue #967 where the workers are not reused when + # calling multiple Parallel loops. + + def parallel_call(n_jobs): + x = range(10) + Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10)) + + # Run a parallel loop and get the workers used for computations + parallel_call(n_jobs) + first_executor = get_reusable_executor(reuse=True) + + # Ensure that the workers are reused for the next calls, as the executor is + # not restarted. + for _ in range(10): + parallel_call(n_jobs) + executor = get_reusable_executor(reuse=True) + assert executor == first_executor diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_store_backends.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_store_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..e5db16757ea45fd6761e1b8cfd35e5f5a920752f --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_store_backends.py @@ -0,0 +1,94 @@ + +try: + # Python 2.7: use the C pickle to speed up + # test_concurrency_safe_write which pickles big python objects + import cPickle as cpickle +except ImportError: + import pickle as cpickle +import functools +from pickle import PicklingError +import time + +import pytest + +from joblib.testing import parametrize, timeout +from joblib.test.common import with_multiprocessing +from joblib.backports import concurrency_safe_rename +from joblib import Parallel, delayed +from joblib._store_backends import ( + concurrency_safe_write, + FileSystemStoreBackend, + CacheWarning, +) + + +def write_func(output, filename): + with open(filename, 'wb') as f: + cpickle.dump(output, f) + + +def load_func(expected, filename): + for i in range(10): + try: + with open(filename, 'rb') as f: + reloaded = cpickle.load(f) + break + except (OSError, IOError): + # On Windows you can have WindowsError ([Error 5] Access + # is denied or [Error 13] Permission denied) when reading the file, + # probably because a writer process has a lock on the file + time.sleep(0.1) + else: + raise + assert expected == reloaded + + +def concurrency_safe_write_rename(to_write, filename, write_func): + temporary_filename = concurrency_safe_write(to_write, + filename, write_func) + concurrency_safe_rename(temporary_filename, filename) + + +@timeout(0) # No timeout as this test can be long +@with_multiprocessing +@parametrize('backend', ['multiprocessing', 'loky', 'threading']) +def test_concurrency_safe_write(tmpdir, backend): + # Add one item to cache + filename = tmpdir.join('test.pkl').strpath + + obj = {str(i): i for i in range(int(1e5))} + funcs = [functools.partial(concurrency_safe_write_rename, + write_func=write_func) + if i % 3 != 2 else load_func for i in range(12)] + Parallel(n_jobs=2, backend=backend)( + delayed(func)(obj, filename) for func in funcs) + + +def test_warning_on_dump_failure(tmpdir): + # Check that a warning is raised when the dump fails for any reason but + # a PicklingError. + class UnpicklableObject(object): + def __reduce__(self): + raise RuntimeError("some exception") + + backend = FileSystemStoreBackend() + backend.location = tmpdir.join('test_warning_on_pickling_error').strpath + backend.compress = None + + with pytest.warns(CacheWarning, match="some exception"): + backend.dump_item("testpath", UnpicklableObject()) + + +def test_warning_on_pickling_error(tmpdir): + # This is separate from test_warning_on_dump_failure because in the + # future we will turn this into an exception. + class UnpicklableObject(object): + def __reduce__(self): + raise PicklingError("not picklable") + + backend = FileSystemStoreBackend() + backend.location = tmpdir.join('test_warning_on_pickling_error').strpath + backend.compress = None + + with pytest.warns(FutureWarning, match="not picklable"): + backend.dump_item("testpath", UnpicklableObject()) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_testing.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..e8095aa67040ce868849b89927b325895b5d8e34 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_testing.py @@ -0,0 +1,75 @@ +import sys +import re + +from joblib.testing import raises, check_subprocess_call + + +def test_check_subprocess_call(): + code = '\n'.join(['result = 1 + 2 * 3', + 'print(result)', + 'my_list = [1, 2, 3]', + 'print(my_list)']) + + check_subprocess_call([sys.executable, '-c', code]) + + # Now checking stdout with a regex + check_subprocess_call([sys.executable, '-c', code], + # Regex needed for platform-specific line endings + stdout_regex=r'7\s{1,2}\[1, 2, 3\]') + + +def test_check_subprocess_call_non_matching_regex(): + code = '42' + non_matching_pattern = '_no_way_this_matches_anything_' + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code], + stdout_regex=non_matching_pattern) + excinfo.match('Unexpected stdout.+{}'.format(non_matching_pattern)) + + +def test_check_subprocess_call_wrong_command(): + wrong_command = '_a_command_that_does_not_exist_' + with raises(OSError): + check_subprocess_call([wrong_command]) + + +def test_check_subprocess_call_non_zero_return_code(): + code_with_non_zero_exit = '\n'.join([ + 'import sys', + 'print("writing on stdout")', + 'sys.stderr.write("writing on stderr")', + 'sys.exit(123)']) + + pattern = re.compile('Non-zero return code: 123.+' + 'Stdout:\nwriting on stdout.+' + 'Stderr:\nwriting on stderr', re.DOTALL) + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code_with_non_zero_exit]) + excinfo.match(pattern) + + +def test_check_subprocess_call_timeout(): + code_timing_out = '\n'.join([ + 'import time', + 'import sys', + 'print("before sleep on stdout")', + 'sys.stdout.flush()', + 'sys.stderr.write("before sleep on stderr")', + 'sys.stderr.flush()', + # We need to sleep for at least 2 * timeout seconds in case the SIGKILL + # is triggered. + 'time.sleep(10)', + 'print("process should have be killed before")', + 'sys.stdout.flush()']) + + pattern = re.compile('Non-zero return code:.+' + 'Stdout:\nbefore sleep on stdout\\s+' + 'Stderr:\nbefore sleep on stderr', + re.DOTALL) + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, '-c', code_timing_out], + timeout=1) + excinfo.match(pattern) diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_utils.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4999a212c462bdb6c10e9e08fdfba74d03b05294 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_utils.py @@ -0,0 +1,27 @@ +import pytest + +from joblib._utils import eval_expr + + +@pytest.mark.parametrize( + "expr", + ["exec('import os')", "print(1)", "import os", "1+1; import os", "1^1"], +) +def test_eval_expr_invalid(expr): + with pytest.raises( + ValueError, match="is not a valid or supported arithmetic" + ): + eval_expr(expr) + + +@pytest.mark.parametrize( + "expr, result", + [ + ("2*6", 12), + ("2**6", 64), + ("1 + 2*3**(4) / (6 + -7)", -161.0), + ("(20 // 3) % 5", 1), + ], +) +def test_eval_expr_valid(expr, result): + assert eval_expr(expr) == result diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/testutils.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..20ec8c1ba0a50da6be9fce3686ac1950b1a55ab5 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/testutils.py @@ -0,0 +1,8 @@ +def return_slice_of_data(arr, start_idx, end_idx): + return arr[start_idx:end_idx] + + +def print_filename_and_raise(arr): + from joblib._memmapping_reducer import _get_backing_memmap + print(_get_backing_memmap(arr).filename) + raise ValueError diff --git a/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/INSTALLER b/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/METADATA b/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..f52e7cd82ebddad7c66174058b828db25fa87aa4 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/METADATA @@ -0,0 +1,261 @@ +Metadata-Version: 2.2 +Name: nvidia-ml-py +Version: 12.570.86 +Summary: Python Bindings for the NVIDIA Management Library +Home-page: https://forums.developer.nvidia.com +Author: NVIDIA Corporation +Author-email: nvml-bindings@nvidia.com +License: BSD +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Hardware +Classifier: Topic :: System :: Systems Administration +Description-Content-Type: text/markdown +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license +Dynamic: summary + +pyNVML +====== + +Python bindings to the NVIDIA Management Library +------------------------------------------------ + +Provides a Python interface to GPU management and monitoring functions. + +This is a wrapper around the NVML library. +For information about the NVML library, see the NVML developer page +http://developer.nvidia.com/nvidia-management-library-nvml + +Download the latest package from: +http://pypi.python.org/pypi/nvidia-ml-py/ + +Note this file can be run with 'python -m doctest -v README.txt' +although the results are system dependent + +The nvml header file contains function documentation that is relevant +to this wrapper. The header file is distributed with. +https://developer.nvidia.com/gpu-deployment-kit + +The main difference is this library handles allocating structs and +passing pointers to the functions, before returning the desired value. +Non-success return codes are raised as exceptions as described in the +section below. + +REQUIRES +-------- +Python 2.5, or an earlier version with the ctypes module. + +INSTALLATION +------------ + +Pip Installation with python3: +- `python3 -m pip install nvidia-ml-py` + +Manual Installation: +``` +$ tar -xzf nvidia-ml-py-$major-$minor-$patch.tar.gz` +$ cd nvidia-ml-py-$major-$minor-$patch +$ sudo python setup.py install +``` + +USAGE +----- +``` +>>> from pynvml import * +>>> nvmlInit() +>>> print(f"Driver Version: {nvmlSystemGetDriverVersion()}") +Driver Version: 11.515.48 +>>> deviceCount = nvmlDeviceGetCount() +>>> for i in range(deviceCount): +... handle = nvmlDeviceGetHandleByIndex(i) +... print(f"Device {i} : {nvmlDeviceGetName(handle)}") +... +Device 0 : Tesla K40c + +>>> nvmlShutdown() +``` + +FUNCTIONS +--------- +Python methods wrap NVML functions, implemented in a C shared library. +Each function's use is the same with the following exceptions: + +- Instead of returning error codes, failing error codes are raised as Python exceptions. + +``` +>>> try: +... nvmlDeviceGetCount() +... except NVMLError as error: +... print(error) +... +Uninitialized +``` + +- C function output parameters are returned from the corresponding Python function left to right. +``` +nvmlReturn_t nvmlDeviceGetEccMode(nvmlDevice_t device, + nvmlEnableState_t *current, + nvmlEnableState_t *pending); + +>>> nvmlInit() +>>> handle = nvmlDeviceGetHandleByIndex(0) +>>> (current, pending) = nvmlDeviceGetEccMode(handle) +``` +- C structs are converted into Python classes. + +``` +// C Function and typedef struct +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, + nvmlMemory_t *memory); +typedef struct nvmlMemory_st { + unsigned long long total; + unsigned long long free; + unsigned long long used; +} nvmlMemory_t; + + +# Python call to function and accessing members of ctype struct +>>> info = nvmlDeviceGetMemoryInfo(handle) +>>> print(f"Total memory: {info.total}") +Total memory: 5636292608 +>>> print(f"Free memory:, {info.free}") +Free memory: 5578420224 +>>> print(f"Used memory: {info.used}") +Used memory: 57872384 +``` + +- Python handles string buffer creation. + +``` +// C Function that needs character array and length +nvmlReturn_t nvmlSystemGetDriverVersion(char* version, + unsigned int length); + +# Python function handles memory +>>> version = nvmlSystemGetDriverVersion() +>>> print(version) +... 11.520.75 +>>> nvmlShutdown() +``` + +For usage information see the NVML documentation. + +VARIABLES +--------- +All meaningful NVML constants and enums are exposed in Python. + +The NVML_VALUE_NOT_AVAILABLE constant is not used. Instead None is mapped to the field. + +EXCEPTIONS +---------- +Since the C library uses return codes and python prefers exception handling, the +library converts all return codes to various exceptions. The exceptions are generated +automatically via a function at run time instead of being defined manually. + +The list of exceptions can be found in NVMLError base class. + +The example seen above in the FUNCTIONS section: + +``` +>>> try: +... nvmlDeviceGetCount() +... except NVMLError as error: +... print(error) +... +Uninitialized +``` + +Can be more accurately caught like this: + +``` +>>> try: +... nvmlDeviceGetCount() +... except NVMLError_Uninitialized as error: +... print(error) +... +Uninitialized +``` + +The conversion from name to exception is like this for all exceptions: +* `NVML_ERROR_UNINITIALIZED` => `NVMLError_Uninitialized` +* `NVML_ERROR_LIBRARY_NOT_FOUND` => `NVMLError_LibraryNotFound` +* `NVML_ERROR_ALREADY_INITIALIZED` => `NVMLError_AlreadyInitialized` + +RELEASE NOTES +------------- +Version 2.285.0 +- Added new functions for NVML 2.285. See NVML documentation for more information. +- Ported to support Python 3.0 and Python 2.0 syntax. +- Added nvidia_smi.py tool as a sample app. + +Version 3.295.0 +- Added new functions for NVML 3.295. See NVML documentation for more information. +- Updated nvidia_smi.py tool +- Includes additional error handling + +Version 4.304.0 +- Added new functions for NVML 4.304. See NVML documentation for more information. +- Updated nvidia_smi.py tool + +Version 4.304.3 +- Fixing nvmlUnitGetDeviceCount bug + +Version 5.319.0 +- Added new functions for NVML 5.319. See NVML documentation for more information. + +Version 6.340.0 +- Added new functions for NVML 6.340. See NVML documentation for more information. + +Version 7.346.0 +- Added new functions for NVML 7.346. See NVML documentation for more information. + +Version 7.352.0 +- Added new functions for NVML 7.352. See NVML documentation for more information. + +Version 10.418 +- Added new functions for NVML 10.418. See NVML documentation for more information. +- Fixed issues with using the bindings with Python 3.x +- Replaced sample app nvidia_smi.py with example.py + +Version 11.515.48 +- Python3 support added +- Updated API to add function new to NVML, bringing pynvml up to date with NVML +- Added auto-version to handle byte and string conversion automatically for both structs and functions +- Minor bug fixes +- Added README.txt correctly in long_description for pypi.org + +Version 11.520 +- Updated Long Description to be actual markdown +- Added new functions for NVML 11.520 + +Version 11.525 +- Added new functions for NVML 11.525 + +COPYRIGHT +--------- +Copyright (c) 2011-2023, NVIDIA Corporation. All rights reserved. + +LICENSE +------- +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +- Neither the name of the NVIDIA Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/REQUESTED b/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/top_level.txt b/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..32cd7dc2238f6f95fdc3a286f712d402cdda37aa --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/nvidia_ml_py-12.570.86.dist-info/top_level.txt @@ -0,0 +1,2 @@ +example +pynvml diff --git a/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/INSTALLER b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/LICENSE.txt b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..598b8430eff95ffcc7152feb2c9668d716f1c8eb --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/LICENSE.txt @@ -0,0 +1,24 @@ +Copyright (c) 2005-2020, Ilya Etingof +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/METADATA b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..40fc8b89b3076db413112bcf87a5e8094eae7d00 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/METADATA @@ -0,0 +1,72 @@ +Metadata-Version: 2.1 +Name: pyasn1_modules +Version: 0.4.1 +Summary: A collection of ASN.1-based protocols modules +Home-page: https://github.com/pyasn1/pyasn1-modules +Author: Ilya Etingof +Author-email: etingof@gmail.com +Maintainer: pyasn1 maintenance organization +Maintainer-email: Christian Heimes +License: BSD +Project-URL: Source, https://github.com/pyasn1/pyasn1-modules +Project-URL: Issues, https://github.com/pyasn1/pyasn1-modules/issues +Project-URL: Changelog, https://github.com/pyasn1/pyasn1-modules/blob/master/CHANGES.txt +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: Intended Audience :: Telecommunications Industry +Classifier: License :: OSI Approved :: BSD License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Communications +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE.txt +Requires-Dist: pyasn1 <0.7.0,>=0.4.6 + + +ASN.1 modules for Python +------------------------ +[![PyPI](https://img.shields.io/pypi/v/pyasn1-modules.svg?maxAge=2592000)](https://pypi.org/project/pyasn1-modules) +[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1-modules.svg)](https://pypi.org/project/pyasn1-modules/) +[![Build status](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml) +[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1-modules.svg)](https://codecov.io/github/pyasn1/pyasn1-modules) +[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1-modules/master/LICENSE.txt) + +The `pyasn1-modules` package contains a collection of +[ASN.1](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items) +data structures expressed as Python classes based on [pyasn1](https://github.com/pyasn1/pyasn1) +data model. + +If ASN.1 module you need is not present in this collection, try using +[Asn1ate](https://github.com/kimgr/asn1ate) tool that compiles ASN.1 documents +into pyasn1 code. + +**NOTE:** The package is now maintained by *Christian Heimes* and +*Simon Pichugin* in project https://github.com/pyasn1/pyasn1-modules. + +Feedback +-------- + +If something does not work as expected, +[open an issue](https://github.com/pyasn1/pyasn1-modules/issues) at GitHub +or post your question [on Stack Overflow](https://stackoverflow.com/questions/ask) + +New modules contributions are welcome via GitHub pull requests. + +Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com). +All rights reserved. diff --git a/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/RECORD b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f64d49e9fdbf603511b15e8d43c4dc9e9e367c21 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/RECORD @@ -0,0 +1,272 @@ +pyasn1_modules-0.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pyasn1_modules-0.4.1.dist-info/LICENSE.txt,sha256=Kq1fwA9wXEoa3bg-7RCmp10oajd58M-FGdh-YrxHNf0,1334 +pyasn1_modules-0.4.1.dist-info/METADATA,sha256=WSFUaKrfbedz6tktpYhCNYRJIYGA4HtUV_WUz37JbkY,3463 +pyasn1_modules-0.4.1.dist-info/RECORD,, +pyasn1_modules-0.4.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pyasn1_modules-0.4.1.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91 +pyasn1_modules-0.4.1.dist-info/top_level.txt,sha256=e_AojfE1DNY4M8P9LAS7qh8Fx3eOmovobqkr7NEjlg4,15 +pyasn1_modules-0.4.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +pyasn1_modules/__init__.py,sha256=wyAeH6rFjPCa4DOpn5UHHyRBt_kXHnItSWwMdo6jFJ8,65 +pyasn1_modules/__pycache__/__init__.cpython-310.pyc,, +pyasn1_modules/__pycache__/pem.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc1155.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc1157.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc1901.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc1902.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc1905.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2251.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2314.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2315.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2437.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2459.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2511.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2560.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2631.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2634.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2876.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2985.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc2986.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3058.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3114.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3125.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3161.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3274.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3279.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3280.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3281.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3370.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3412.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3414.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3447.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3537.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3560.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3565.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3657.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3709.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3739.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3770.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3779.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3820.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc3852.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4010.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4043.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4055.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4073.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4108.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4210.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4211.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4334.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4357.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4387.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4476.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4490.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4491.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4683.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc4985.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5035.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5083.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5084.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5126.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5208.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5275.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5280.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5480.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5636.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5639.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5649.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5652.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5697.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5751.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5752.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5753.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5755.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5913.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5914.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5915.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5916.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5917.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5924.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5934.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5940.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5958.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc5990.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6010.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6019.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6031.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6032.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6120.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6170.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6187.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6210.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6211.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6402.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6482.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6486.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6487.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6664.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6955.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc6960.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7030.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7191.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7229.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7292.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7296.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7508.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7585.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7633.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7773.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7894.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7906.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc7914.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8017.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8018.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8103.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8209.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8226.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8358.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8360.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8398.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8410.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8418.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8419.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8479.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8494.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8520.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8619.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8649.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8692.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8696.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8702.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8708.cpython-310.pyc,, +pyasn1_modules/__pycache__/rfc8769.cpython-310.pyc,, +pyasn1_modules/pem.py,sha256=J9x8rEx_jhdXBKeM4X3JS5ucRNsdQyaNJrs4Dfog0MM,1821 +pyasn1_modules/rfc1155.py,sha256=yG3A0ClwC3diCdMxEywvLvdc8eBtL_fh9yIdImYij2E,2683 +pyasn1_modules/rfc1157.py,sha256=JBkbJD7LcyGH4vzxYIXgAw58X15Wx6RrL59NMBLqYRU,3554 +pyasn1_modules/rfc1901.py,sha256=qpv7P8cC0kcy33rZ5_Et0AXgJlOB6Rsv6gpMpWnZBGk,646 +pyasn1_modules/rfc1902.py,sha256=VfB6zThzmIzBPcAkosPVfNQH0D-MtKYarL7BvgMJS0U,3705 +pyasn1_modules/rfc1905.py,sha256=q---r52j2AyQ4HL3hRbWBcNGNLTahDsAGAJiCYzsW0Q,4831 +pyasn1_modules/rfc2251.py,sha256=jHH9A0FTDwNp4lJLCr24_tFSofi_5r2rq8BLEB-EfcI,26931 +pyasn1_modules/rfc2314.py,sha256=h7G-AdL89X9-3qxtjcQYAikVtvK-l4xmxD_KZySDH50,1313 +pyasn1_modules/rfc2315.py,sha256=_0tMHafjOM4lUYOg-Cc_-_Fd_BGlPT32erpYswntRLM,9666 +pyasn1_modules/rfc2437.py,sha256=1lie1MIMdOFBnlAETF2ThpLs0rz6aV8AJaSlwWMsRUY,2623 +pyasn1_modules/rfc2459.py,sha256=ZHKavZuowKbEbzTxpjtSNi3nBUS3gsigSZjwMoPx2E0,50002 +pyasn1_modules/rfc2511.py,sha256=IuJekkyDgOD5mYN9a2zHf4hpKSuhPBgnwbPx5EUOH0k,10350 +pyasn1_modules/rfc2560.py,sha256=ztdWLRPg99biGGOTmkfTIJGZrCEwXNGgUPS-9bOzaBQ,8406 +pyasn1_modules/rfc2631.py,sha256=Het4nHPVFj6oElpEANYkKQuincUa0ms5SOt94Ph8jhs,1219 +pyasn1_modules/rfc2634.py,sha256=7sTu3YysbHImknLk7CbdQIjJjt6cC849-XqkuEDgFPk,9425 +pyasn1_modules/rfc2876.py,sha256=yWx-0S_Lm-qGaEmJ4e3DZy7GKpvxFTf0rQVfnz_ke-8,1438 +pyasn1_modules/rfc2985.py,sha256=8GL8jkWGpN1t7sVaEtyhVgfCM80XhlYOUEi9jhcAX0E,14359 +pyasn1_modules/rfc2986.py,sha256=fLxMpQEBGC2e68ASrQWCUvRzpndfy1uOiSvf1pa0V-M,1896 +pyasn1_modules/rfc3058.py,sha256=dKAjM1SUIk-Hoj5ps5MDzbDv8fHQma2J6B8SoZqlrb8,992 +pyasn1_modules/rfc3114.py,sha256=02eDCK2blUNybTaGX85vxGfCTnzHXXa9BP9IaVVocK8,1961 +pyasn1_modules/rfc3125.py,sha256=bd80G-Frzu3A_rfql04WLJcbKigQjsDY_6kFBinMFQk,16707 +pyasn1_modules/rfc3161.py,sha256=9kz_TvQ5_OpBPuHQDAh2WyqKeOThgxPq8E5iBB-sNp8,4260 +pyasn1_modules/rfc3274.py,sha256=ZULbMN3wksvv_fWvT_C1vskxuh_IzRCAD9QD1hdk-lo,1670 +pyasn1_modules/rfc3279.py,sha256=uRaWfvIw4WXBoJN9gcAhsW8MTDymGoa-FrrC2k033TI,6807 +pyasn1_modules/rfc3280.py,sha256=JxIDKgqrhEkRmr54yNmoNsFCRIK8Lz8X85yLHiMTXcg,46620 +pyasn1_modules/rfc3281.py,sha256=IBiEVBhKoWlb0KJwVwqAPW91PPW-v8Dla8p52n2GgXY,9866 +pyasn1_modules/rfc3370.py,sha256=q3dToJOoYvaxFO89Zqa_nYi9l3rQvGwCfLF7qFfhbfs,2811 +pyasn1_modules/rfc3412.py,sha256=MqrXwY35FPEZqXNdey4LZ44wBO6nrITqD5-thE_tLd8,1956 +pyasn1_modules/rfc3414.py,sha256=NmGZ3dWZY3e_Ovv0PyHDdK8hOqdbaar2AuUSi7YKkDc,1167 +pyasn1_modules/rfc3447.py,sha256=IBxHcG0Xg8kXomMB6hFv-CMa_Y9XQQa-b1bs7L0bhsM,1605 +pyasn1_modules/rfc3537.py,sha256=I0LVSnsyyMOTNJyLda-8T98JShYtDk13yPd_bmrZ3OQ,796 +pyasn1_modules/rfc3560.py,sha256=3Ud7sY7OAV_4KGNn_hg5xZblEkxE_ILH1kP2TI-KbZw,1818 +pyasn1_modules/rfc3565.py,sha256=nRephcXY7ioG5I4iaT6mSQYGwaouRQXoMnp2kFQQOE0,1438 +pyasn1_modules/rfc3657.py,sha256=H484bVE7VJ05rVbk6BdlbIbAWCd7CFqt5e5BptNs3wY,1746 +pyasn1_modules/rfc3709.py,sha256=KAaG7SKTT9Ef-Kza5Zn_qXkZppul8Wt8MPSkzS4qs5o,6469 +pyasn1_modules/rfc3739.py,sha256=p-D897qRYt6QR0fWmgoPVmB6VXnz_1mMY1HRJnH9eeE,5122 +pyasn1_modules/rfc3770.py,sha256=ue0Qaiys8J86M-8EtLNrcfuXm87Mr2GQ4f30lSs0vXE,1743 +pyasn1_modules/rfc3779.py,sha256=x8HYKGCaGO3BohCREHQUEa1oYGArWIC2J0PftxiPrjI,3260 +pyasn1_modules/rfc3820.py,sha256=hV70UpibmcSzH4_TncHPPHsw4OrtFjMbepS4EvieQLk,1478 +pyasn1_modules/rfc3852.py,sha256=VcWcbTAAYtZ0FDYcuofDxVVSMdKgithdTBixQ898Szs,20101 +pyasn1_modules/rfc4010.py,sha256=J2FptJQv03o8N-mo7vJ9q5_A9vrwEk0crpvAkXmVH74,1228 +pyasn1_modules/rfc4043.py,sha256=OWPgVzfK3Hs5sNQJSqUBkInhgikv-x15-xLSg30xwNE,1067 +pyasn1_modules/rfc4055.py,sha256=f2rlyaBeNhl287b_qLLsNpjgwxYRVzBgbOH28UnJZwQ,10392 +pyasn1_modules/rfc4073.py,sha256=bHVssQE3yXwetes1TPWAT30UhOEinHj8vEBaYjWC24g,1636 +pyasn1_modules/rfc4108.py,sha256=-I63Z0crn_Elvr85nSa9BqAlRx7cIJfEb9ItPDkq8JY,10598 +pyasn1_modules/rfc4210.py,sha256=3ndhsJ5yFx3ZUvUP8EDeJhUilSFAdeL8baT5tKPyIi0,28469 +pyasn1_modules/rfc4211.py,sha256=THqr9n4PGg0jVMmj8k9bhKzb5j7IapwtBinhWQAnL9k,12110 +pyasn1_modules/rfc4334.py,sha256=Q-fcYksrunAo1t07HE2jm5WlQgFAf5o39utpel0ZjcI,1586 +pyasn1_modules/rfc4357.py,sha256=tpZZ-6xDt_u8aYr4qbwe5UJxT-JVGOihuxPaemB1AXM,15036 +pyasn1_modules/rfc4387.py,sha256=CylvEQRpV_U9vVivzTJ4PVDjZEBAKS-6TrVVoepRk2E,441 +pyasn1_modules/rfc4476.py,sha256=klWmMFZg_aMqmVGuYEjXQa3v3iKlopkhMumBoLdmYT4,1960 +pyasn1_modules/rfc4490.py,sha256=Z9nkntSSc9npWHBjV9c86QGW_itvGYQ4l5jJO0a2GCA,3401 +pyasn1_modules/rfc4491.py,sha256=Lpej17T5MfF25FnfWo1CFqmBYWQTJS4WQSGki5hCgsM,1054 +pyasn1_modules/rfc4683.py,sha256=X0P6ln34ZsYkCepgGJH8AxUhoG85TuLof55jIIDMNeI,1839 +pyasn1_modules/rfc4985.py,sha256=oWCBG3tknFLUJOeG4aKF7JrkA4qMjPyJFGTnf7xmPd8,961 +pyasn1_modules/rfc5035.py,sha256=xgw9ztAM_bJKlIUCzni2zcE_z3ErEuXpWRPJpXI1KEw,4523 +pyasn1_modules/rfc5083.py,sha256=ENXIEL0CYrTqvf_iwpvAkBBJpi2pOFNBDFEYc37yqF8,1888 +pyasn1_modules/rfc5084.py,sha256=i9sFdUklbdTQodTya4BNFnpeFxGIB2uS1aNkfFdZpu4,2855 +pyasn1_modules/rfc5126.py,sha256=ape8y-hcslojU0MRD6-JCoQlJxsaS0h_0sWs2FlUGqI,15780 +pyasn1_modules/rfc5208.py,sha256=adwiwa639VdhaNMRTKXfEPl-AkbB5grrLJMJN6FZVsg,1432 +pyasn1_modules/rfc5275.py,sha256=mgirSEvl3OJn1C40dHkNTH04Sm4aEvT8c682FE_cOQ8,11605 +pyasn1_modules/rfc5280.py,sha256=GFwSclsvpTUc2rjEF2fwAK48wHI3PkXxHIbVMkfD4sQ,51236 +pyasn1_modules/rfc5480.py,sha256=GzBTgKQ68V-L-Qy0SBrCQMgqR5mGF7U73uXlBzfV2Jk,4834 +pyasn1_modules/rfc5636.py,sha256=2z0NoxI2uMlTHHGA10Q0W_329PsmZOjSFnciL2CiywE,2324 +pyasn1_modules/rfc5639.py,sha256=28YlbU49j4lr_Blwfjn2-WJyzKQhpJPPZMNW0lWXlSk,1025 +pyasn1_modules/rfc5649.py,sha256=3A--LQL7iw8DGXSDyiSUeh6wwFPKQQGyVY94mNzY0Ek,830 +pyasn1_modules/rfc5652.py,sha256=65A_djYGSuEhuFj08mn7y2kvVrbr4ArxcW0bW5JVzrE,21451 +pyasn1_modules/rfc5697.py,sha256=aWqi-QBZrEr6I5bIppRet4dREqFF8E1tO2BvaZBGOOE,1702 +pyasn1_modules/rfc5751.py,sha256=M8kTLARhdqh3UqmlZv_FWJfuJb-ph7P6MVGxSP7Q4wQ,3198 +pyasn1_modules/rfc5752.py,sha256=Sjijqbi5C_deVnJJMBtOHz7wBWFM7PCTwDWZ2lomWT0,1431 +pyasn1_modules/rfc5753.py,sha256=2Nwr8GsV2TgjTDLtE5QiIunPOA617F3CXB_tPYe7BHc,4534 +pyasn1_modules/rfc5755.py,sha256=RZ28NeCnEAGr2pLRSNFw0BRb_b_eulmxag-lRTmUeTo,12081 +pyasn1_modules/rfc5913.py,sha256=OayMmpi29ZlQI1EszIxXaU8Mhwi41BrH5esoyS80efQ,1161 +pyasn1_modules/rfc5914.py,sha256=nXOb4SvESbEFYI8h0nEYkRArNZ9w5Zqxva_4uAdMXNY,3714 +pyasn1_modules/rfc5915.py,sha256=VqMRd_Ksm0LFvE5XX4_MO6BdFG7Ch7NdQcwT_DMWAK4,1056 +pyasn1_modules/rfc5916.py,sha256=gHrFO9lX21h6Wa3JnEqyjuqXQlcTE0loUIu913Sit0E,800 +pyasn1_modules/rfc5917.py,sha256=nM08rGm9D3O8uqSbmshvp7_fHl2dYaTdhUGVJQHe0xc,1511 +pyasn1_modules/rfc5924.py,sha256=_8TqEJ9Q7cFSd2u3Za6rzlNPqGLl7IA4oHtYVpoJhdA,425 +pyasn1_modules/rfc5934.py,sha256=77z96SeP4iM2R6Rl5-Vx7OaENA8ZQvzrfhDVZRy9lqk,23798 +pyasn1_modules/rfc5940.py,sha256=66rMmgyKBhay-RZsWaKz7PUGwp0bqEAVULPb4Edk1vk,1613 +pyasn1_modules/rfc5958.py,sha256=NZPx-7FvjzgErz2lTURiRq8m3XCZ7D9QbGDhtIF-zCE,2650 +pyasn1_modules/rfc5990.py,sha256=-b0St64ba3LVRGSeNmbGoMIbkU8c8FDpo4zFWF0PCFM,5505 +pyasn1_modules/rfc6010.py,sha256=F43AYVFUwu-2_xjJE2Wmw1Wdt0K7l3vg0_fCa_QHqBU,2347 +pyasn1_modules/rfc6019.py,sha256=vzj5tfG4694-ucpErpAtE1DVOE4-v0dkN894Zr9xm4o,1086 +pyasn1_modules/rfc6031.py,sha256=X2cjNyVnrX3G2zG7kD4Rq__kF6-ftmmnqHlCQJDCuMU,12137 +pyasn1_modules/rfc6032.py,sha256=uNAu5zLHg0b583xxzFNUZxCnJaCzMw1iobzREuejMoM,1950 +pyasn1_modules/rfc6120.py,sha256=JehGZD8Y0Bdhr_ojpMSjHgnRHEdUXauZxqLxRwns6Cc,818 +pyasn1_modules/rfc6170.py,sha256=sL2yPZzO--MI4ToeAwlFEP-x6I0-etuJxT2mgAPjEO4,409 +pyasn1_modules/rfc6187.py,sha256=jOMiIhw4HAUn7hj37gKImNU_hK8TamAfd0V0Jrwh_YU,489 +pyasn1_modules/rfc6210.py,sha256=wLifK_EShv1a4TOhGJ-k9zA1kVVYVDNjS-Rh0ohmCh0,1052 +pyasn1_modules/rfc6211.py,sha256=XotTBQVseK7y0nJB4Fx-npdhRHeH53IM84kGupWIprk,2257 +pyasn1_modules/rfc6402.py,sha256=ksg6YsacS9vJAzObqFOPRCoe8mpyvRDb43Z0v-QKhnM,17148 +pyasn1_modules/rfc6482.py,sha256=10_Xyb2TaPFx72IUCZtu81aH5rmYihhdL0P-PVby1ys,2085 +pyasn1_modules/rfc6486.py,sha256=a3_5OJvkz2G7xWOC0dqbNqJQDsHQAOU62AWin107c4k,1916 +pyasn1_modules/rfc6487.py,sha256=gTUVkFYJyUcr1E4uoeN2cXPNaXyjYbixupbBKFQA4jQ,472 +pyasn1_modules/rfc6664.py,sha256=nq8F5wDeO49FoBGVQDx8ivvg_GsubdWa1bpZM_40Tms,4270 +pyasn1_modules/rfc6955.py,sha256=FBVb8LpHKMZjR3wOJtm-BPbi5EMiRoGuUWh41r1soCU,2814 +pyasn1_modules/rfc6960.py,sha256=BhEDCLLrae4RaCpMuKJc0kw1bGs56V0_F-NxiO9ctuw,7913 +pyasn1_modules/rfc7030.py,sha256=t-s2BDyX3Zk2sy_jMQl-P2I2NXFOn7huu0wFcM-2sqs,1441 +pyasn1_modules/rfc7191.py,sha256=uMsBzJ9167wxsiPYDQUnZQFVFNfgUxnCwRNeKnXxNGM,7062 +pyasn1_modules/rfc7229.py,sha256=GSiUz4QkYODfnIvLRXKiabyno9Gmd6CX0zWR7HoIpCk,743 +pyasn1_modules/rfc7292.py,sha256=wORjDGD_aqHoujB2wu6nNrEjYTw3VO_xDp-Qx0VWLbc,8478 +pyasn1_modules/rfc7296.py,sha256=eAZpZ2dgUhxbJrLLGtDff4UspauG7Tr5dj8WELYHnUM,885 +pyasn1_modules/rfc7508.py,sha256=ZmJFbQO934Fs8wxcpO0gg5fU0d8yEFlkkFD3KMUQbAE,2182 +pyasn1_modules/rfc7585.py,sha256=T0-sdzPJoop1jbB2RJ-wzUnf6t6CeD2eMMXpcz55JEg,1076 +pyasn1_modules/rfc7633.py,sha256=8P_fBWkoGk3rsk7SEAm6QZcPjoRGTRGQuasWMLOrLKY,841 +pyasn1_modules/rfc7773.py,sha256=6UGPWyVYuicKe6snZCnD1wuAu1MOVgzPoSALL2uvTrI,1315 +pyasn1_modules/rfc7894.py,sha256=HLaSBoOUB-_cSE5935TXAnuFBVpZBv6jBnLOPp_-LNk,2769 +pyasn1_modules/rfc7906.py,sha256=mDf1pWwVNlCcEQfswUhtQDStAnwS-5xbZtjMlfnWLdI,18921 +pyasn1_modules/rfc7914.py,sha256=JxWGnXV-V13xzOn7c7-_3vxDNpkPtdZIYU4KF2kFXR4,1493 +pyasn1_modules/rfc8017.py,sha256=pwPRSchvMtXuatcCLULHuvSL8kAPEqkC4aIJjd5vEAo,4178 +pyasn1_modules/rfc8018.py,sha256=8_49xA3vEOdlGUhasw2xTUv4TpHBvjRuoonMT_k1TTk,6166 +pyasn1_modules/rfc8103.py,sha256=pNYAFfKCNrg9ZmRKsNNwr2ooptEABF3gMaPbqCroRnQ,1017 +pyasn1_modules/rfc8209.py,sha256=9EQ077rjD9uoTZWIOGmeOaHLDDq0IRXh3Rt0eYB-Ysc,393 +pyasn1_modules/rfc8226.py,sha256=mudlVgrsJ6XeHnFmxBNW_NgcYcFsHUvK04_MTr3UkRM,4291 +pyasn1_modules/rfc8358.py,sha256=aiHaXQAaaP-q5c90x_uZHSpQRTB-yekwhe6V9-EtrFg,1136 +pyasn1_modules/rfc8360.py,sha256=T4sY6o2VLVPnZ9s4yJ8PzfVA8Y60ne-1KcVNtw5yt-s,1075 +pyasn1_modules/rfc8398.py,sha256=i3lwgf__9oJzOaaHJKWmDAx3d_deKNCCuvIDWqQWiJ4,1192 +pyasn1_modules/rfc8410.py,sha256=nteKyTKcIwVlgh1qUl-8kE63kKG-KgWtLrfF92TWyyQ,971 +pyasn1_modules/rfc8418.py,sha256=eTCPTOm6t-RyHd6PlowLogDzUO72lRddESYLiSiOpC0,1109 +pyasn1_modules/rfc8419.py,sha256=qcvBlXxqvsCvG_F6AKKjqBderqbWwBy8zjZOjAPdYU4,1704 +pyasn1_modules/rfc8479.py,sha256=rDKzrp-MmEF0t3E7lqKXhgwcggvx8NoWVbtJHGLxDYM,1142 +pyasn1_modules/rfc8494.py,sha256=GMht1RdAbjHLtSqHdJ2cLO8HXRz6SLIPE254T4oy0S4,2363 +pyasn1_modules/rfc8520.py,sha256=_o00lv2MYciOqo0UKjlZBQNY_MzzgQt1SV9VXCI0T9A,1496 +pyasn1_modules/rfc8619.py,sha256=qSYiBefLSFukLg6VIgR6dnhX-uBwJMItxqHjNXnBgM0,1136 +pyasn1_modules/rfc8649.py,sha256=oHCQK7g4vKs1B0IO9GgiidTyPOk4pz5bYkXSRmBOAHo,982 +pyasn1_modules/rfc8692.py,sha256=eLLbpx7CdcCSL3MgogE17WvPoV3VW-K1bo6dq4mRoeg,2098 +pyasn1_modules/rfc8696.py,sha256=iqGh7uhZ9wO2UU6Tk2tIBTw_uwOutogkksnDdg_-Qjg,3479 +pyasn1_modules/rfc8702.py,sha256=t7J9gvKZ3n02UCUjTHeJZWdcC8moBe2QmA_P6K8LVww,2739 +pyasn1_modules/rfc8708.py,sha256=iHNsHkk1G419l7KOQAZwq25yL9m6wyACmM0lS4f5XGw,978 +pyasn1_modules/rfc8769.py,sha256=vwRJCfdmar4FdQ-IhCHJRyyn4fVm_82WsVMuqBeDy5o,441 diff --git a/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/WHEEL b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..0fde4dd96cac9c2431a08860c658f1a5789618f6 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/pyasn1_modules-0.4.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (74.1.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..966be9402b04c3b8fe5625cc462ed81b41994029 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:144baeb12c2535b1f89a6c90fc86c808c98d803195ed8d2d399ded498cf2066a +size 100326 diff --git a/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 b/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 new file mode 100644 index 0000000000000000000000000000000000000000..77c99d55bc623eba835b47acfb233cfc5abc7b65 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43436dd54ee4bf238f387dfba3d1321fdeb004415c807d4d3490c368bd5e5dec +size 285328 diff --git a/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libz.37eba27a.so.1 b/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libz.37eba27a.so.1 new file mode 100644 index 0000000000000000000000000000000000000000..64cd6a309bad00dc38040a577191f6681e18bf89 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libz.37eba27a.so.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b0e682a9dc7fd4895a6783288f851b793dc89633f28714027974fa4d66f3914 +size 124744