ZTWHHH commited on
Commit
bb135fc
·
verified ·
1 Parent(s): 79a8e06

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/LICENSE +20 -0
  3. evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/RECORD +83 -0
  4. evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/REQUESTED +0 -0
  5. evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/WHEEL +5 -0
  6. evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/top_level.txt +1 -0
  7. evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/INSTALLER +1 -0
  8. evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/LICENSE +29 -0
  9. evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/WHEEL +6 -0
  10. evalkit_internvl/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc +0 -0
  11. evalkit_internvl/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so +3 -0
  12. evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/LICENSE +25 -0
  13. evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/METADATA +41 -0
  14. evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_definitions.cpython-310.pyc +0 -0
  15. evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_parsing.cpython-310.pyc +0 -0
  16. evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc +0 -0
  17. evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-linux64-v4.2.2 +3 -0
  18. evalkit_internvl/lib/python3.10/site-packages/joblib/_utils.py +83 -0
  19. evalkit_internvl/lib/python3.10/site-packages/joblib/compressor.py +570 -0
  20. evalkit_internvl/lib/python3.10/site-packages/joblib/disk.py +136 -0
  21. evalkit_internvl/lib/python3.10/site-packages/joblib/pool.py +354 -0
  22. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__init__.py +0 -0
  23. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc +0 -0
  24. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc +0 -0
  25. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc +0 -0
  26. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc +0 -0
  27. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc +0 -0
  28. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc +0 -0
  29. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc +0 -0
  30. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc +0 -0
  31. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc +0 -0
  32. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc +0 -0
  33. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc +0 -0
  34. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc +0 -0
  35. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc +0 -0
  36. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc +0 -0
  37. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc +0 -0
  38. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc +0 -0
  39. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc +0 -0
  40. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc +0 -0
  41. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc +0 -0
  42. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc +0 -0
  43. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc +0 -0
  44. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc +0 -0
  45. evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc +0 -0
  46. evalkit_internvl/lib/python3.10/site-packages/joblib/test/common.py +84 -0
  47. evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_backports.py +35 -0
  48. evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py +27 -0
  49. evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_config.py +151 -0
  50. evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_dask.py +499 -0
.gitattributes CHANGED
@@ -1646,3 +1646,8 @@ evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/sing
1646
  evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1647
  evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1648
  evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
1646
  evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1647
  evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1648
  evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1649
+ evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 filter=lfs diff=lfs merge=lfs -text
1650
+ evalkit_internvl/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1651
+ evalkit_internvl/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1652
+ evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libz.37eba27a.so.1 filter=lfs diff=lfs merge=lfs -text
1653
+ evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-linux64-v4.2.2 filter=lfs diff=lfs merge=lfs -text
evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/LICENSE ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2018 Alex Grönholm
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
6
+ this software and associated documentation files (the "Software"), to deal in
7
+ the Software without restriction, including without limitation the rights to
8
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9
+ the Software, and to permit persons to whom the Software is furnished to do so,
10
+ subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/RECORD ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ anyio-3.7.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ anyio-3.7.1.dist-info/LICENSE,sha256=U2GsncWPLvX9LpsJxoKXwX8ElQkJu8gCO9uC6s8iwrA,1081
3
+ anyio-3.7.1.dist-info/METADATA,sha256=mOhfXPB7qKVQh3dUtp2NgLysa10jHWeDBNnRg-93A_c,4708
4
+ anyio-3.7.1.dist-info/RECORD,,
5
+ anyio-3.7.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ anyio-3.7.1.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
7
+ anyio-3.7.1.dist-info/entry_points.txt,sha256=_d6Yu6uiaZmNe0CydowirE9Cmg7zUL2g08tQpoS3Qvc,39
8
+ anyio-3.7.1.dist-info/top_level.txt,sha256=QglSMiWX8_5dpoVAEIHdEYzvqFMdSYWmCj6tYw2ITkQ,6
9
+ anyio/__init__.py,sha256=Pq9lO03Zm5ynIPlhkquaOuIc1dTTeLGNUQ5HT5qwYMI,4073
10
+ anyio/__pycache__/__init__.cpython-310.pyc,,
11
+ anyio/__pycache__/from_thread.cpython-310.pyc,,
12
+ anyio/__pycache__/lowlevel.cpython-310.pyc,,
13
+ anyio/__pycache__/pytest_plugin.cpython-310.pyc,,
14
+ anyio/__pycache__/to_process.cpython-310.pyc,,
15
+ anyio/__pycache__/to_thread.cpython-310.pyc,,
16
+ anyio/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ anyio/_backends/__pycache__/__init__.cpython-310.pyc,,
18
+ anyio/_backends/__pycache__/_asyncio.cpython-310.pyc,,
19
+ anyio/_backends/__pycache__/_trio.cpython-310.pyc,,
20
+ anyio/_backends/_asyncio.py,sha256=fgwZmYnGOxT_pX0OZTPPgRdFqKLjnKvQUk7tsfuNmfM,67056
21
+ anyio/_backends/_trio.py,sha256=EJAj0tNi0JRM2y3QWP7oS4ct7wnjMSYDG8IZUWMta-E,30035
22
+ anyio/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
+ anyio/_core/__pycache__/__init__.cpython-310.pyc,,
24
+ anyio/_core/__pycache__/_compat.cpython-310.pyc,,
25
+ anyio/_core/__pycache__/_eventloop.cpython-310.pyc,,
26
+ anyio/_core/__pycache__/_exceptions.cpython-310.pyc,,
27
+ anyio/_core/__pycache__/_fileio.cpython-310.pyc,,
28
+ anyio/_core/__pycache__/_resources.cpython-310.pyc,,
29
+ anyio/_core/__pycache__/_signals.cpython-310.pyc,,
30
+ anyio/_core/__pycache__/_sockets.cpython-310.pyc,,
31
+ anyio/_core/__pycache__/_streams.cpython-310.pyc,,
32
+ anyio/_core/__pycache__/_subprocesses.cpython-310.pyc,,
33
+ anyio/_core/__pycache__/_synchronization.cpython-310.pyc,,
34
+ anyio/_core/__pycache__/_tasks.cpython-310.pyc,,
35
+ anyio/_core/__pycache__/_testing.cpython-310.pyc,,
36
+ anyio/_core/__pycache__/_typedattr.cpython-310.pyc,,
37
+ anyio/_core/_compat.py,sha256=XZfBUInEt7jaiTBI2Qbul7EpJdngbwTtG4Qj26un1YE,5726
38
+ anyio/_core/_eventloop.py,sha256=xJ8KflV1bJ9GAuQRr4o1ojv8wWya4nt_XARta8uLPwc,4083
39
+ anyio/_core/_exceptions.py,sha256=uOrN5l98o6UrOU6O3kPf0VCDl_zPP-kgZs4IyaLVgwU,2916
40
+ anyio/_core/_fileio.py,sha256=DWuIul5izCocmJpgqDDNKc_GhMUwayHKdM5R-sbT_A8,18026
41
+ anyio/_core/_resources.py,sha256=NbmU5O5UX3xEyACnkmYX28Fmwdl-f-ny0tHym26e0w0,435
42
+ anyio/_core/_signals.py,sha256=KKkZAYL08auydjZnK9S4FQsxx555jT4gXAMcTXdNaok,863
43
+ anyio/_core/_sockets.py,sha256=szcPd7kKBmlHnx8g_KJWZo2k6syouRNF2614ZrtqiV0,20667
44
+ anyio/_core/_streams.py,sha256=5gryxQiUisED8uFUAHje5O44RL9wyndNMANzzQWUn1U,1518
45
+ anyio/_core/_subprocesses.py,sha256=OSAcLAsjfCplXlRyTjWonfS1xU8d5MaZblXYqqY-BM4,4977
46
+ anyio/_core/_synchronization.py,sha256=Uquo_52vZ7iZzDDoaN_j-N7jeyAlefzOZ8Pxt9mU6gY,16747
47
+ anyio/_core/_tasks.py,sha256=1wZZWlpDkr6w3kMD629vzJDkPselDvx4XVElgTCVwyM,5316
48
+ anyio/_core/_testing.py,sha256=7Yll-DOI0uIlIF5VHLUpGGyDPWtDEjFZ85-6ZniwIJU,2217
49
+ anyio/_core/_typedattr.py,sha256=8o0gwQYSl04zlO9uHqcHu1T6hOw7peY9NW1mOX5DKnY,2551
50
+ anyio/abc/__init__.py,sha256=UkC-KDbyIoKeDUDhJciwANSoyzz_qaFh4Fb7_AvwjZc,2159
51
+ anyio/abc/__pycache__/__init__.cpython-310.pyc,,
52
+ anyio/abc/__pycache__/_resources.cpython-310.pyc,,
53
+ anyio/abc/__pycache__/_sockets.cpython-310.pyc,,
54
+ anyio/abc/__pycache__/_streams.cpython-310.pyc,,
55
+ anyio/abc/__pycache__/_subprocesses.cpython-310.pyc,,
56
+ anyio/abc/__pycache__/_tasks.cpython-310.pyc,,
57
+ anyio/abc/__pycache__/_testing.cpython-310.pyc,,
58
+ anyio/abc/_resources.py,sha256=h1rkzr3E0MFqdXLh9aLLXe-A5W7k_Jc-5XzNr6SJ4w4,763
59
+ anyio/abc/_sockets.py,sha256=WWYJ6HndKCEuvobAPDkmX0tjwN2FOxf3eTGb1DB7wHE,5243
60
+ anyio/abc/_streams.py,sha256=yGhOmlVI3W9whmzPuewwYQ2BrKhrUFuWZ4zpVLWOK84,6584
61
+ anyio/abc/_subprocesses.py,sha256=r-totaRbFX6kKV-4WTeuswz8n01aap8cvkYVQCRKN0M,2067
62
+ anyio/abc/_tasks.py,sha256=a_5DLyiCbp0K57LJPOyF-PZyXmUcv_p9VRXPFj_K03M,3413
63
+ anyio/abc/_testing.py,sha256=Eub7gXJ0tVPo_WN5iJAw10FrvC7C1uaL3b2neGr_pfs,1924
64
+ anyio/from_thread.py,sha256=aUVKXctPgZ5wK3p5VTyrtjDj9tSQSrH6xCjBuo-hv3A,16563
65
+ anyio/lowlevel.py,sha256=cOTncxRW5KeswqYQQdp0pfAw6OFWXius1SPhCYwHZL4,4647
66
+ anyio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
+ anyio/pytest_plugin.py,sha256=_Txgl0-I3kO1rk_KATXmIUV57C34hajcJCGcgV26CU0,5022
68
+ anyio/streams/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
69
+ anyio/streams/__pycache__/__init__.cpython-310.pyc,,
70
+ anyio/streams/__pycache__/buffered.cpython-310.pyc,,
71
+ anyio/streams/__pycache__/file.cpython-310.pyc,,
72
+ anyio/streams/__pycache__/memory.cpython-310.pyc,,
73
+ anyio/streams/__pycache__/stapled.cpython-310.pyc,,
74
+ anyio/streams/__pycache__/text.cpython-310.pyc,,
75
+ anyio/streams/__pycache__/tls.cpython-310.pyc,,
76
+ anyio/streams/buffered.py,sha256=2ifplNLwT73d1UKBxrkFdlC9wTAze9LhPL7pt_7cYgY,4473
77
+ anyio/streams/file.py,sha256=-NP6jMcUd2f1VJwgcxgiRHdEsNnhE0lANl0ov_i7FrE,4356
78
+ anyio/streams/memory.py,sha256=QZhc5qdomBpGCgrUVWAaqEBxI0oklVxK_62atW6tnNk,9274
79
+ anyio/streams/stapled.py,sha256=9u2GxpiOPsGtgO1qsj2tVoW4b8bgiwp5rSDs1BFKkLM,4275
80
+ anyio/streams/text.py,sha256=1K4ZCLKl2b7yywrW6wKEeMu3xyQHE_T0aU5_oC9GPTE,5043
81
+ anyio/streams/tls.py,sha256=TbdCz1KtfEnp3mxHvkROXRefhE6S1LHiwgWiJX8zYaU,12099
82
+ anyio/to_process.py,sha256=_RSsG8UME2nGxeFEdg3OEfv9XshSQwrMU7DAbwWGx9U,9242
83
+ anyio/to_thread.py,sha256=HVpTvBei2sSXgJJeNKdwhJwQaW76LDbb1htQ-Mc6zDs,2146
evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/REQUESTED ADDED
File without changes
evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.40.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
evalkit_internvl/lib/python3.10/site-packages/anyio-3.7.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ anyio
evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/LICENSE ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2021-2024, ContourPy Developers.
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ 1. Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ 2. Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ 3. Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
evalkit_internvl/lib/python3.10/site-packages/contourpy-1.3.0.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: meson
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
evalkit_internvl/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6074265dd34a9e3432470e05c02d1670d530f8201a34419216e7b6eb84757dfb
3
+ size 771776
evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/LICENSE ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 2-Clause License
2
+
3
+ Copyright (c) 2019, imageio
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ * Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ * Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
20
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg-0.5.1.dist-info/METADATA ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: imageio-ffmpeg
3
+ Version: 0.5.1
4
+ Summary: FFMPEG wrapper for Python
5
+ Home-page: https://github.com/imageio/imageio-ffmpeg
6
+ Download-URL: http://pypi.python.org/pypi/imageio-ffmpeg
7
+ Author: imageio contributors
8
+ Author-email: almar.klein@gmail.com
9
+ License: BSD-2-Clause
10
+ Keywords: video ffmpeg
11
+ Platform: any
12
+ Classifier: Development Status :: 5 - Production/Stable
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: Intended Audience :: Education
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: License :: OSI Approved :: BSD License
17
+ Classifier: Operating System :: MacOS :: MacOS X
18
+ Classifier: Operating System :: Microsoft :: Windows
19
+ Classifier: Operating System :: POSIX
20
+ Classifier: Programming Language :: Python
21
+ Classifier: Programming Language :: Python :: 3
22
+ Classifier: Programming Language :: Python :: 3.7
23
+ Classifier: Programming Language :: Python :: 3.8
24
+ Classifier: Programming Language :: Python :: 3.9
25
+ Classifier: Programming Language :: Python :: 3.10
26
+ Classifier: Programming Language :: Python :: 3.11
27
+ Classifier: Programming Language :: Python :: 3.12
28
+ Provides: imageio_ffmpeg
29
+ Requires-Python: >=3.5
30
+ License-File: LICENSE
31
+ Requires-Dist: setuptools
32
+
33
+ FFMPEG wrapper for Python.
34
+
35
+ Note that the platform-specific wheels contain the binary executable
36
+ of ffmpeg, which makes this package around 60 MiB in size.
37
+ I guess that's the cost for being able to read/write video files.
38
+
39
+ For Linux users: the above is not the case when installing via your
40
+ Linux package manager (if that is possible), because this package would
41
+ simply depend on ffmpeg in that case.
evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_definitions.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_parsing.cpython-310.pyc ADDED
Binary file (5.42 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-linux64-v4.2.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:700073daef5c23bbcb18c2eae60553a454a5221ec19b4a88c8c367a664671a7c
3
+ size 73807592
evalkit_internvl/lib/python3.10/site-packages/joblib/_utils.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://stackoverflow.com/a/9558001/2536294
2
+
3
+ import ast
4
+ from dataclasses import dataclass
5
+ import operator as op
6
+
7
+
8
+ from ._multiprocessing_helpers import mp
9
+
10
+ if mp is not None:
11
+ from .externals.loky.process_executor import _ExceptionWithTraceback
12
+
13
+
14
+ # supported operators
15
+ operators = {
16
+ ast.Add: op.add,
17
+ ast.Sub: op.sub,
18
+ ast.Mult: op.mul,
19
+ ast.Div: op.truediv,
20
+ ast.FloorDiv: op.floordiv,
21
+ ast.Mod: op.mod,
22
+ ast.Pow: op.pow,
23
+ ast.USub: op.neg,
24
+ }
25
+
26
+
27
+ def eval_expr(expr):
28
+ """
29
+ >>> eval_expr('2*6')
30
+ 12
31
+ >>> eval_expr('2**6')
32
+ 64
33
+ >>> eval_expr('1 + 2*3**(4) / (6 + -7)')
34
+ -161.0
35
+ """
36
+ try:
37
+ return eval_(ast.parse(expr, mode="eval").body)
38
+ except (TypeError, SyntaxError, KeyError) as e:
39
+ raise ValueError(
40
+ f"{expr!r} is not a valid or supported arithmetic expression."
41
+ ) from e
42
+
43
+
44
+ def eval_(node):
45
+ if isinstance(node, ast.Constant): # <constant>
46
+ return node.value
47
+ elif isinstance(node, ast.BinOp): # <left> <operator> <right>
48
+ return operators[type(node.op)](eval_(node.left), eval_(node.right))
49
+ elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
50
+ return operators[type(node.op)](eval_(node.operand))
51
+ else:
52
+ raise TypeError(node)
53
+
54
+
55
+ @dataclass(frozen=True)
56
+ class _Sentinel:
57
+ """A sentinel to mark a parameter as not explicitly set"""
58
+ default_value: object
59
+
60
+ def __repr__(self):
61
+ return f"default({self.default_value!r})"
62
+
63
+
64
+ class _TracebackCapturingWrapper:
65
+ """Protect function call and return error with traceback."""
66
+
67
+ def __init__(self, func):
68
+ self.func = func
69
+
70
+ def __call__(self, **kwargs):
71
+ try:
72
+ return self.func(**kwargs)
73
+ except BaseException as e:
74
+ return _ExceptionWithTraceback(e)
75
+
76
+
77
+ def _retrieve_traceback_capturing_wrapped_call(out):
78
+ if isinstance(out, _ExceptionWithTraceback):
79
+ rebuild, args = out.__reduce__()
80
+ out = rebuild(*args)
81
+ if isinstance(out, BaseException):
82
+ raise out
83
+ return out
evalkit_internvl/lib/python3.10/site-packages/joblib/compressor.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Classes and functions for managing compressors."""
2
+
3
+ import io
4
+ import zlib
5
+ from joblib.backports import LooseVersion
6
+
7
+ try:
8
+ from threading import RLock
9
+ except ImportError:
10
+ from dummy_threading import RLock
11
+
12
+ try:
13
+ import bz2
14
+ except ImportError:
15
+ bz2 = None
16
+
17
+ try:
18
+ import lz4
19
+ from lz4.frame import LZ4FrameFile
20
+ except ImportError:
21
+ lz4 = None
22
+
23
+ try:
24
+ import lzma
25
+ except ImportError:
26
+ lzma = None
27
+
28
+
29
+ LZ4_NOT_INSTALLED_ERROR = ('LZ4 is not installed. Install it with pip: '
30
+ 'https://python-lz4.readthedocs.io/')
31
+
32
+ # Registered compressors
33
+ _COMPRESSORS = {}
34
+
35
+ # Magic numbers of supported compression file formats.
36
+ _ZFILE_PREFIX = b'ZF' # used with pickle files created before 0.9.3.
37
+ _ZLIB_PREFIX = b'\x78'
38
+ _GZIP_PREFIX = b'\x1f\x8b'
39
+ _BZ2_PREFIX = b'BZ'
40
+ _XZ_PREFIX = b'\xfd\x37\x7a\x58\x5a'
41
+ _LZMA_PREFIX = b'\x5d\x00'
42
+ _LZ4_PREFIX = b'\x04\x22\x4D\x18'
43
+
44
+
45
+ def register_compressor(compressor_name, compressor,
46
+ force=False):
47
+ """Register a new compressor.
48
+
49
+ Parameters
50
+ ----------
51
+ compressor_name: str.
52
+ The name of the compressor.
53
+ compressor: CompressorWrapper
54
+ An instance of a 'CompressorWrapper'.
55
+ """
56
+ global _COMPRESSORS
57
+ if not isinstance(compressor_name, str):
58
+ raise ValueError("Compressor name should be a string, "
59
+ "'{}' given.".format(compressor_name))
60
+
61
+ if not isinstance(compressor, CompressorWrapper):
62
+ raise ValueError("Compressor should implement the CompressorWrapper "
63
+ "interface, '{}' given.".format(compressor))
64
+
65
+ if (compressor.fileobj_factory is not None and
66
+ (not hasattr(compressor.fileobj_factory, 'read') or
67
+ not hasattr(compressor.fileobj_factory, 'write') or
68
+ not hasattr(compressor.fileobj_factory, 'seek') or
69
+ not hasattr(compressor.fileobj_factory, 'tell'))):
70
+ raise ValueError("Compressor 'fileobj_factory' attribute should "
71
+ "implement the file object interface, '{}' given."
72
+ .format(compressor.fileobj_factory))
73
+
74
+ if compressor_name in _COMPRESSORS and not force:
75
+ raise ValueError("Compressor '{}' already registered."
76
+ .format(compressor_name))
77
+
78
+ _COMPRESSORS[compressor_name] = compressor
79
+
80
+
81
+ class CompressorWrapper():
82
+ """A wrapper around a compressor file object.
83
+
84
+ Attributes
85
+ ----------
86
+ obj: a file-like object
87
+ The object must implement the buffer interface and will be used
88
+ internally to compress/decompress the data.
89
+ prefix: bytestring
90
+ A bytestring corresponding to the magic number that identifies the
91
+ file format associated to the compressor.
92
+ extension: str
93
+ The file extension used to automatically select this compressor during
94
+ a dump to a file.
95
+ """
96
+
97
+ def __init__(self, obj, prefix=b'', extension=''):
98
+ self.fileobj_factory = obj
99
+ self.prefix = prefix
100
+ self.extension = extension
101
+
102
+ def compressor_file(self, fileobj, compresslevel=None):
103
+ """Returns an instance of a compressor file object."""
104
+ if compresslevel is None:
105
+ return self.fileobj_factory(fileobj, 'wb')
106
+ else:
107
+ return self.fileobj_factory(fileobj, 'wb',
108
+ compresslevel=compresslevel)
109
+
110
+ def decompressor_file(self, fileobj):
111
+ """Returns an instance of a decompressor file object."""
112
+ return self.fileobj_factory(fileobj, 'rb')
113
+
114
+
115
+ class BZ2CompressorWrapper(CompressorWrapper):
116
+
117
+ prefix = _BZ2_PREFIX
118
+ extension = '.bz2'
119
+
120
+ def __init__(self):
121
+ if bz2 is not None:
122
+ self.fileobj_factory = bz2.BZ2File
123
+ else:
124
+ self.fileobj_factory = None
125
+
126
+ def _check_versions(self):
127
+ if bz2 is None:
128
+ raise ValueError('bz2 module is not compiled on your python '
129
+ 'standard library.')
130
+
131
+ def compressor_file(self, fileobj, compresslevel=None):
132
+ """Returns an instance of a compressor file object."""
133
+ self._check_versions()
134
+ if compresslevel is None:
135
+ return self.fileobj_factory(fileobj, 'wb')
136
+ else:
137
+ return self.fileobj_factory(fileobj, 'wb',
138
+ compresslevel=compresslevel)
139
+
140
+ def decompressor_file(self, fileobj):
141
+ """Returns an instance of a decompressor file object."""
142
+ self._check_versions()
143
+ fileobj = self.fileobj_factory(fileobj, 'rb')
144
+ return fileobj
145
+
146
+
147
+ class LZMACompressorWrapper(CompressorWrapper):
148
+
149
+ prefix = _LZMA_PREFIX
150
+ extension = '.lzma'
151
+ _lzma_format_name = 'FORMAT_ALONE'
152
+
153
+ def __init__(self):
154
+ if lzma is not None:
155
+ self.fileobj_factory = lzma.LZMAFile
156
+ self._lzma_format = getattr(lzma, self._lzma_format_name)
157
+ else:
158
+ self.fileobj_factory = None
159
+
160
+ def _check_versions(self):
161
+ if lzma is None:
162
+ raise ValueError('lzma module is not compiled on your python '
163
+ 'standard library.')
164
+
165
+ def compressor_file(self, fileobj, compresslevel=None):
166
+ """Returns an instance of a compressor file object."""
167
+ if compresslevel is None:
168
+ return self.fileobj_factory(fileobj, 'wb',
169
+ format=self._lzma_format)
170
+ else:
171
+ return self.fileobj_factory(fileobj, 'wb',
172
+ format=self._lzma_format,
173
+ preset=compresslevel)
174
+
175
+ def decompressor_file(self, fileobj):
176
+ """Returns an instance of a decompressor file object."""
177
+ return lzma.LZMAFile(fileobj, 'rb')
178
+
179
+
180
+ class XZCompressorWrapper(LZMACompressorWrapper):
181
+
182
+ prefix = _XZ_PREFIX
183
+ extension = '.xz'
184
+ _lzma_format_name = 'FORMAT_XZ'
185
+
186
+
187
+ class LZ4CompressorWrapper(CompressorWrapper):
188
+
189
+ prefix = _LZ4_PREFIX
190
+ extension = '.lz4'
191
+
192
+ def __init__(self):
193
+ if lz4 is not None:
194
+ self.fileobj_factory = LZ4FrameFile
195
+ else:
196
+ self.fileobj_factory = None
197
+
198
+ def _check_versions(self):
199
+ if lz4 is None:
200
+ raise ValueError(LZ4_NOT_INSTALLED_ERROR)
201
+ lz4_version = lz4.__version__
202
+ if lz4_version.startswith("v"):
203
+ lz4_version = lz4_version[1:]
204
+ if LooseVersion(lz4_version) < LooseVersion('0.19'):
205
+ raise ValueError(LZ4_NOT_INSTALLED_ERROR)
206
+
207
+ def compressor_file(self, fileobj, compresslevel=None):
208
+ """Returns an instance of a compressor file object."""
209
+ self._check_versions()
210
+ if compresslevel is None:
211
+ return self.fileobj_factory(fileobj, 'wb')
212
+ else:
213
+ return self.fileobj_factory(fileobj, 'wb',
214
+ compression_level=compresslevel)
215
+
216
+ def decompressor_file(self, fileobj):
217
+ """Returns an instance of a decompressor file object."""
218
+ self._check_versions()
219
+ return self.fileobj_factory(fileobj, 'rb')
220
+
221
+
222
+ ###############################################################################
223
+ # base file compression/decompression object definition
224
+ _MODE_CLOSED = 0
225
+ _MODE_READ = 1
226
+ _MODE_READ_EOF = 2
227
+ _MODE_WRITE = 3
228
+ _BUFFER_SIZE = 8192
229
+
230
+
231
+ class BinaryZlibFile(io.BufferedIOBase):
232
+ """A file object providing transparent zlib (de)compression.
233
+
234
+ TODO python2_drop: is it still needed since we dropped Python 2 support A
235
+ BinaryZlibFile can act as a wrapper for an existing file object, or refer
236
+ directly to a named file on disk.
237
+
238
+ Note that BinaryZlibFile provides only a *binary* file interface: data read
239
+ is returned as bytes, and data to be written should be given as bytes.
240
+
241
+ This object is an adaptation of the BZ2File object and is compatible with
242
+ versions of python >= 2.7.
243
+
244
+ If filename is a str or bytes object, it gives the name
245
+ of the file to be opened. Otherwise, it should be a file object,
246
+ which will be used to read or write the compressed data.
247
+
248
+ mode can be 'rb' for reading (default) or 'wb' for (over)writing
249
+
250
+ If mode is 'wb', compresslevel can be a number between 1
251
+ and 9 specifying the level of compression: 1 produces the least
252
+ compression, and 9 produces the most compression. 3 is the default.
253
+ """
254
+
255
+ wbits = zlib.MAX_WBITS
256
+
257
+ def __init__(self, filename, mode="rb", compresslevel=3):
258
+ # This lock must be recursive, so that BufferedIOBase's
259
+ # readline(), readlines() and writelines() don't deadlock.
260
+ self._lock = RLock()
261
+ self._fp = None
262
+ self._closefp = False
263
+ self._mode = _MODE_CLOSED
264
+ self._pos = 0
265
+ self._size = -1
266
+ self.compresslevel = compresslevel
267
+
268
+ if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9):
269
+ raise ValueError("'compresslevel' must be an integer "
270
+ "between 1 and 9. You provided 'compresslevel={}'"
271
+ .format(compresslevel))
272
+
273
+ if mode == "rb":
274
+ self._mode = _MODE_READ
275
+ self._decompressor = zlib.decompressobj(self.wbits)
276
+ self._buffer = b""
277
+ self._buffer_offset = 0
278
+ elif mode == "wb":
279
+ self._mode = _MODE_WRITE
280
+ self._compressor = zlib.compressobj(self.compresslevel,
281
+ zlib.DEFLATED, self.wbits,
282
+ zlib.DEF_MEM_LEVEL, 0)
283
+ else:
284
+ raise ValueError("Invalid mode: %r" % (mode,))
285
+
286
+ if isinstance(filename, str):
287
+ self._fp = io.open(filename, mode)
288
+ self._closefp = True
289
+ elif hasattr(filename, "read") or hasattr(filename, "write"):
290
+ self._fp = filename
291
+ else:
292
+ raise TypeError("filename must be a str or bytes object, "
293
+ "or a file")
294
+
295
+ def close(self):
296
+ """Flush and close the file.
297
+
298
+ May be called more than once without error. Once the file is
299
+ closed, any other operation on it will raise a ValueError.
300
+ """
301
+ with self._lock:
302
+ if self._mode == _MODE_CLOSED:
303
+ return
304
+ try:
305
+ if self._mode in (_MODE_READ, _MODE_READ_EOF):
306
+ self._decompressor = None
307
+ elif self._mode == _MODE_WRITE:
308
+ self._fp.write(self._compressor.flush())
309
+ self._compressor = None
310
+ finally:
311
+ try:
312
+ if self._closefp:
313
+ self._fp.close()
314
+ finally:
315
+ self._fp = None
316
+ self._closefp = False
317
+ self._mode = _MODE_CLOSED
318
+ self._buffer = b""
319
+ self._buffer_offset = 0
320
+
321
+ @property
322
+ def closed(self):
323
+ """True if this file is closed."""
324
+ return self._mode == _MODE_CLOSED
325
+
326
+ def fileno(self):
327
+ """Return the file descriptor for the underlying file."""
328
+ self._check_not_closed()
329
+ return self._fp.fileno()
330
+
331
+ def seekable(self):
332
+ """Return whether the file supports seeking."""
333
+ return self.readable() and self._fp.seekable()
334
+
335
+ def readable(self):
336
+ """Return whether the file was opened for reading."""
337
+ self._check_not_closed()
338
+ return self._mode in (_MODE_READ, _MODE_READ_EOF)
339
+
340
+ def writable(self):
341
+ """Return whether the file was opened for writing."""
342
+ self._check_not_closed()
343
+ return self._mode == _MODE_WRITE
344
+
345
+ # Mode-checking helper functions.
346
+
347
+ def _check_not_closed(self):
348
+ if self.closed:
349
+ fname = getattr(self._fp, 'name', None)
350
+ msg = "I/O operation on closed file"
351
+ if fname is not None:
352
+ msg += " {}".format(fname)
353
+ msg += "."
354
+ raise ValueError(msg)
355
+
356
+ def _check_can_read(self):
357
+ if self._mode not in (_MODE_READ, _MODE_READ_EOF):
358
+ self._check_not_closed()
359
+ raise io.UnsupportedOperation("File not open for reading")
360
+
361
+ def _check_can_write(self):
362
+ if self._mode != _MODE_WRITE:
363
+ self._check_not_closed()
364
+ raise io.UnsupportedOperation("File not open for writing")
365
+
366
+ def _check_can_seek(self):
367
+ if self._mode not in (_MODE_READ, _MODE_READ_EOF):
368
+ self._check_not_closed()
369
+ raise io.UnsupportedOperation("Seeking is only supported "
370
+ "on files open for reading")
371
+ if not self._fp.seekable():
372
+ raise io.UnsupportedOperation("The underlying file object "
373
+ "does not support seeking")
374
+
375
+ # Fill the readahead buffer if it is empty. Returns False on EOF.
376
+ def _fill_buffer(self):
377
+ if self._mode == _MODE_READ_EOF:
378
+ return False
379
+ # Depending on the input data, our call to the decompressor may not
380
+ # return any data. In this case, try again after reading another block.
381
+ while self._buffer_offset == len(self._buffer):
382
+ try:
383
+ rawblock = (self._decompressor.unused_data or
384
+ self._fp.read(_BUFFER_SIZE))
385
+ if not rawblock:
386
+ raise EOFError
387
+ except EOFError:
388
+ # End-of-stream marker and end of file. We're good.
389
+ self._mode = _MODE_READ_EOF
390
+ self._size = self._pos
391
+ return False
392
+ else:
393
+ self._buffer = self._decompressor.decompress(rawblock)
394
+ self._buffer_offset = 0
395
+ return True
396
+
397
+ # Read data until EOF.
398
+ # If return_data is false, consume the data without returning it.
399
+ def _read_all(self, return_data=True):
400
+ # The loop assumes that _buffer_offset is 0. Ensure that this is true.
401
+ self._buffer = self._buffer[self._buffer_offset:]
402
+ self._buffer_offset = 0
403
+
404
+ blocks = []
405
+ while self._fill_buffer():
406
+ if return_data:
407
+ blocks.append(self._buffer)
408
+ self._pos += len(self._buffer)
409
+ self._buffer = b""
410
+ if return_data:
411
+ return b"".join(blocks)
412
+
413
+ # Read a block of up to n bytes.
414
+ # If return_data is false, consume the data without returning it.
415
+ def _read_block(self, n_bytes, return_data=True):
416
+ # If we have enough data buffered, return immediately.
417
+ end = self._buffer_offset + n_bytes
418
+ if end <= len(self._buffer):
419
+ data = self._buffer[self._buffer_offset: end]
420
+ self._buffer_offset = end
421
+ self._pos += len(data)
422
+ return data if return_data else None
423
+
424
+ # The loop assumes that _buffer_offset is 0. Ensure that this is true.
425
+ self._buffer = self._buffer[self._buffer_offset:]
426
+ self._buffer_offset = 0
427
+
428
+ blocks = []
429
+ while n_bytes > 0 and self._fill_buffer():
430
+ if n_bytes < len(self._buffer):
431
+ data = self._buffer[:n_bytes]
432
+ self._buffer_offset = n_bytes
433
+ else:
434
+ data = self._buffer
435
+ self._buffer = b""
436
+ if return_data:
437
+ blocks.append(data)
438
+ self._pos += len(data)
439
+ n_bytes -= len(data)
440
+ if return_data:
441
+ return b"".join(blocks)
442
+
443
+ def read(self, size=-1):
444
+ """Read up to size uncompressed bytes from the file.
445
+
446
+ If size is negative or omitted, read until EOF is reached.
447
+ Returns b'' if the file is already at EOF.
448
+ """
449
+ with self._lock:
450
+ self._check_can_read()
451
+ if size == 0:
452
+ return b""
453
+ elif size < 0:
454
+ return self._read_all()
455
+ else:
456
+ return self._read_block(size)
457
+
458
+ def readinto(self, b):
459
+ """Read up to len(b) bytes into b.
460
+
461
+ Returns the number of bytes read (0 for EOF).
462
+ """
463
+ with self._lock:
464
+ return io.BufferedIOBase.readinto(self, b)
465
+
466
+ def write(self, data):
467
+ """Write a byte string to the file.
468
+
469
+ Returns the number of uncompressed bytes written, which is
470
+ always len(data). Note that due to buffering, the file on disk
471
+ may not reflect the data written until close() is called.
472
+ """
473
+ with self._lock:
474
+ self._check_can_write()
475
+ # Convert data type if called by io.BufferedWriter.
476
+ if isinstance(data, memoryview):
477
+ data = data.tobytes()
478
+
479
+ compressed = self._compressor.compress(data)
480
+ self._fp.write(compressed)
481
+ self._pos += len(data)
482
+ return len(data)
483
+
484
+ # Rewind the file to the beginning of the data stream.
485
+ def _rewind(self):
486
+ self._fp.seek(0, 0)
487
+ self._mode = _MODE_READ
488
+ self._pos = 0
489
+ self._decompressor = zlib.decompressobj(self.wbits)
490
+ self._buffer = b""
491
+ self._buffer_offset = 0
492
+
493
+ def seek(self, offset, whence=0):
494
+ """Change the file position.
495
+
496
+ The new position is specified by offset, relative to the
497
+ position indicated by whence. Values for whence are:
498
+
499
+ 0: start of stream (default); offset must not be negative
500
+ 1: current stream position
501
+ 2: end of stream; offset must not be positive
502
+
503
+ Returns the new file position.
504
+
505
+ Note that seeking is emulated, so depending on the parameters,
506
+ this operation may be extremely slow.
507
+ """
508
+ with self._lock:
509
+ self._check_can_seek()
510
+
511
+ # Recalculate offset as an absolute file position.
512
+ if whence == 0:
513
+ pass
514
+ elif whence == 1:
515
+ offset = self._pos + offset
516
+ elif whence == 2:
517
+ # Seeking relative to EOF - we need to know the file's size.
518
+ if self._size < 0:
519
+ self._read_all(return_data=False)
520
+ offset = self._size + offset
521
+ else:
522
+ raise ValueError("Invalid value for whence: %s" % (whence,))
523
+
524
+ # Make it so that offset is the number of bytes to skip forward.
525
+ if offset < self._pos:
526
+ self._rewind()
527
+ else:
528
+ offset -= self._pos
529
+
530
+ # Read and discard data until we reach the desired position.
531
+ self._read_block(offset, return_data=False)
532
+
533
+ return self._pos
534
+
535
+ def tell(self):
536
+ """Return the current file position."""
537
+ with self._lock:
538
+ self._check_not_closed()
539
+ return self._pos
540
+
541
+
542
+ class ZlibCompressorWrapper(CompressorWrapper):
543
+
544
+ def __init__(self):
545
+ CompressorWrapper.__init__(self, obj=BinaryZlibFile,
546
+ prefix=_ZLIB_PREFIX, extension='.z')
547
+
548
+
549
+ class BinaryGzipFile(BinaryZlibFile):
550
+ """A file object providing transparent gzip (de)compression.
551
+
552
+ If filename is a str or bytes object, it gives the name
553
+ of the file to be opened. Otherwise, it should be a file object,
554
+ which will be used to read or write the compressed data.
555
+
556
+ mode can be 'rb' for reading (default) or 'wb' for (over)writing
557
+
558
+ If mode is 'wb', compresslevel can be a number between 1
559
+ and 9 specifying the level of compression: 1 produces the least
560
+ compression, and 9 produces the most compression. 3 is the default.
561
+ """
562
+
563
+ wbits = 31 # zlib compressor/decompressor wbits value for gzip format.
564
+
565
+
566
+ class GzipCompressorWrapper(CompressorWrapper):
567
+
568
+ def __init__(self):
569
+ CompressorWrapper.__init__(self, obj=BinaryGzipFile,
570
+ prefix=_GZIP_PREFIX, extension='.gz')
evalkit_internvl/lib/python3.10/site-packages/joblib/disk.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Disk management utilities.
3
+ """
4
+
5
+ # Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Lars Buitinck
7
+ # Copyright (c) 2010 Gael Varoquaux
8
+ # License: BSD Style, 3 clauses.
9
+
10
+
11
+ import os
12
+ import sys
13
+ import time
14
+ import errno
15
+ import shutil
16
+
17
+ from multiprocessing import util
18
+
19
+
20
+ try:
21
+ WindowsError
22
+ except NameError:
23
+ WindowsError = OSError
24
+
25
+
26
+ def disk_used(path):
27
+ """ Return the disk usage in a directory."""
28
+ size = 0
29
+ for file in os.listdir(path) + ['.']:
30
+ stat = os.stat(os.path.join(path, file))
31
+ if hasattr(stat, 'st_blocks'):
32
+ size += stat.st_blocks * 512
33
+ else:
34
+ # on some platform st_blocks is not available (e.g., Windows)
35
+ # approximate by rounding to next multiple of 512
36
+ size += (stat.st_size // 512 + 1) * 512
37
+ # We need to convert to int to avoid having longs on some systems (we
38
+ # don't want longs to avoid problems we SQLite)
39
+ return int(size / 1024.)
40
+
41
+
42
+ def memstr_to_bytes(text):
43
+ """ Convert a memory text to its value in bytes.
44
+ """
45
+ kilo = 1024
46
+ units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3)
47
+ try:
48
+ size = int(units[text[-1]] * float(text[:-1]))
49
+ except (KeyError, ValueError) as e:
50
+ raise ValueError(
51
+ "Invalid literal for size give: %s (type %s) should be "
52
+ "alike '10G', '500M', '50K'." % (text, type(text))) from e
53
+ return size
54
+
55
+
56
+ def mkdirp(d):
57
+ """Ensure directory d exists (like mkdir -p on Unix)
58
+ No guarantee that the directory is writable.
59
+ """
60
+ try:
61
+ os.makedirs(d)
62
+ except OSError as e:
63
+ if e.errno != errno.EEXIST:
64
+ raise
65
+
66
+
67
+ # if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),
68
+ # then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the
69
+ # exception. this mechanism ensures that the sub-process gc have the time to
70
+ # collect and close the memmaps before we fail.
71
+ RM_SUBDIRS_RETRY_TIME = 0.1
72
+ RM_SUBDIRS_N_RETRY = 10
73
+
74
+
75
+ def rm_subdirs(path, onerror=None):
76
+ """Remove all subdirectories in this path.
77
+
78
+ The directory indicated by `path` is left in place, and its subdirectories
79
+ are erased.
80
+
81
+ If onerror is set, it is called to handle the error with arguments (func,
82
+ path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
83
+ path is the argument to that function that caused it to fail; and
84
+ exc_info is a tuple returned by sys.exc_info(). If onerror is None,
85
+ an exception is raised.
86
+ """
87
+
88
+ # NOTE this code is adapted from the one in shutil.rmtree, and is
89
+ # just as fast
90
+
91
+ names = []
92
+ try:
93
+ names = os.listdir(path)
94
+ except os.error:
95
+ if onerror is not None:
96
+ onerror(os.listdir, path, sys.exc_info())
97
+ else:
98
+ raise
99
+
100
+ for name in names:
101
+ fullname = os.path.join(path, name)
102
+ delete_folder(fullname, onerror=onerror)
103
+
104
+
105
+ def delete_folder(folder_path, onerror=None, allow_non_empty=True):
106
+ """Utility function to cleanup a temporary folder if it still exists."""
107
+ if os.path.isdir(folder_path):
108
+ if onerror is not None:
109
+ shutil.rmtree(folder_path, False, onerror)
110
+ else:
111
+ # allow the rmtree to fail once, wait and re-try.
112
+ # if the error is raised again, fail
113
+ err_count = 0
114
+ while True:
115
+ files = os.listdir(folder_path)
116
+ try:
117
+ if len(files) == 0 or allow_non_empty:
118
+ shutil.rmtree(
119
+ folder_path, ignore_errors=False, onerror=None
120
+ )
121
+ util.debug(
122
+ "Successfully deleted {}".format(folder_path))
123
+ break
124
+ else:
125
+ raise OSError(
126
+ "Expected empty folder {} but got {} "
127
+ "files.".format(folder_path, len(files))
128
+ )
129
+ except (OSError, WindowsError):
130
+ err_count += 1
131
+ if err_count > RM_SUBDIRS_N_RETRY:
132
+ # the folder cannot be deleted right now. It maybe
133
+ # because some temporary files have not been deleted
134
+ # yet.
135
+ raise
136
+ time.sleep(RM_SUBDIRS_RETRY_TIME)
evalkit_internvl/lib/python3.10/site-packages/joblib/pool.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Custom implementation of multiprocessing.Pool with custom pickler.
2
+
3
+ This module provides efficient ways of working with data stored in
4
+ shared memory with numpy.memmap arrays without inducing any memory
5
+ copy between the parent and child processes.
6
+
7
+ This module should not be imported if multiprocessing is not
8
+ available as it implements subclasses of multiprocessing Pool
9
+ that uses a custom alternative to SimpleQueue.
10
+
11
+ """
12
+ # Author: Olivier Grisel <olivier.grisel@ensta.org>
13
+ # Copyright: 2012, Olivier Grisel
14
+ # License: BSD 3 clause
15
+
16
+ import copyreg
17
+ import sys
18
+ import warnings
19
+ from time import sleep
20
+
21
+ try:
22
+ WindowsError
23
+ except NameError:
24
+ WindowsError = type(None)
25
+
26
+ from pickle import Pickler
27
+
28
+ from pickle import HIGHEST_PROTOCOL
29
+ from io import BytesIO
30
+
31
+ from ._memmapping_reducer import get_memmapping_reducers
32
+ from ._memmapping_reducer import TemporaryResourcesManager
33
+ from ._multiprocessing_helpers import mp, assert_spawning
34
+
35
+ # We need the class definition to derive from it, not the multiprocessing.Pool
36
+ # factory function
37
+ from multiprocessing.pool import Pool
38
+
39
+ try:
40
+ import numpy as np
41
+ except ImportError:
42
+ np = None
43
+
44
+
45
+ ###############################################################################
46
+ # Enable custom pickling in Pool queues
47
+
48
+ class CustomizablePickler(Pickler):
49
+ """Pickler that accepts custom reducers.
50
+
51
+ TODO python2_drop : can this be simplified ?
52
+
53
+ HIGHEST_PROTOCOL is selected by default as this pickler is used
54
+ to pickle ephemeral datastructures for interprocess communication
55
+ hence no backward compatibility is required.
56
+
57
+ `reducers` is expected to be a dictionary with key/values
58
+ being `(type, callable)` pairs where `callable` is a function that
59
+ give an instance of `type` will return a tuple `(constructor,
60
+ tuple_of_objects)` to rebuild an instance out of the pickled
61
+ `tuple_of_objects` as would return a `__reduce__` method. See the
62
+ standard library documentation on pickling for more details.
63
+
64
+ """
65
+
66
+ # We override the pure Python pickler as its the only way to be able to
67
+ # customize the dispatch table without side effects in Python 2.7
68
+ # to 3.2. For Python 3.3+ leverage the new dispatch_table
69
+ # feature from https://bugs.python.org/issue14166 that makes it possible
70
+ # to use the C implementation of the Pickler which is faster.
71
+
72
+ def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
73
+ Pickler.__init__(self, writer, protocol=protocol)
74
+ if reducers is None:
75
+ reducers = {}
76
+ if hasattr(Pickler, 'dispatch'):
77
+ # Make the dispatch registry an instance level attribute instead of
78
+ # a reference to the class dictionary under Python 2
79
+ self.dispatch = Pickler.dispatch.copy()
80
+ else:
81
+ # Under Python 3 initialize the dispatch table with a copy of the
82
+ # default registry
83
+ self.dispatch_table = copyreg.dispatch_table.copy()
84
+ for type, reduce_func in reducers.items():
85
+ self.register(type, reduce_func)
86
+
87
+ def register(self, type, reduce_func):
88
+ """Attach a reducer function to a given type in the dispatch table."""
89
+ if hasattr(Pickler, 'dispatch'):
90
+ # Python 2 pickler dispatching is not explicitly customizable.
91
+ # Let us use a closure to workaround this limitation.
92
+ def dispatcher(self, obj):
93
+ reduced = reduce_func(obj)
94
+ self.save_reduce(obj=obj, *reduced)
95
+ self.dispatch[type] = dispatcher
96
+ else:
97
+ self.dispatch_table[type] = reduce_func
98
+
99
+
100
+ class CustomizablePicklingQueue(object):
101
+ """Locked Pipe implementation that uses a customizable pickler.
102
+
103
+ This class is an alternative to the multiprocessing implementation
104
+ of SimpleQueue in order to make it possible to pass custom
105
+ pickling reducers, for instance to avoid memory copy when passing
106
+ memory mapped datastructures.
107
+
108
+ `reducers` is expected to be a dict with key / values being
109
+ `(type, callable)` pairs where `callable` is a function that, given an
110
+ instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
111
+ to rebuild an instance out of the pickled `tuple_of_objects` as would
112
+ return a `__reduce__` method.
113
+
114
+ See the standard library documentation on pickling for more details.
115
+ """
116
+
117
+ def __init__(self, context, reducers=None):
118
+ self._reducers = reducers
119
+ self._reader, self._writer = context.Pipe(duplex=False)
120
+ self._rlock = context.Lock()
121
+ if sys.platform == 'win32':
122
+ self._wlock = None
123
+ else:
124
+ self._wlock = context.Lock()
125
+ self._make_methods()
126
+
127
+ def __getstate__(self):
128
+ assert_spawning(self)
129
+ return (self._reader, self._writer, self._rlock, self._wlock,
130
+ self._reducers)
131
+
132
+ def __setstate__(self, state):
133
+ (self._reader, self._writer, self._rlock, self._wlock,
134
+ self._reducers) = state
135
+ self._make_methods()
136
+
137
+ def empty(self):
138
+ return not self._reader.poll()
139
+
140
+ def _make_methods(self):
141
+ self._recv = recv = self._reader.recv
142
+ racquire, rrelease = self._rlock.acquire, self._rlock.release
143
+
144
+ def get():
145
+ racquire()
146
+ try:
147
+ return recv()
148
+ finally:
149
+ rrelease()
150
+
151
+ self.get = get
152
+
153
+ if self._reducers:
154
+ def send(obj):
155
+ buffer = BytesIO()
156
+ CustomizablePickler(buffer, self._reducers).dump(obj)
157
+ self._writer.send_bytes(buffer.getvalue())
158
+ self._send = send
159
+ else:
160
+ self._send = send = self._writer.send
161
+ if self._wlock is None:
162
+ # writes to a message oriented win32 pipe are atomic
163
+ self.put = send
164
+ else:
165
+ wlock_acquire, wlock_release = (
166
+ self._wlock.acquire, self._wlock.release)
167
+
168
+ def put(obj):
169
+ wlock_acquire()
170
+ try:
171
+ return send(obj)
172
+ finally:
173
+ wlock_release()
174
+
175
+ self.put = put
176
+
177
+
178
+ class PicklingPool(Pool):
179
+ """Pool implementation with customizable pickling reducers.
180
+
181
+ This is useful to control how data is shipped between processes
182
+ and makes it possible to use shared memory without useless
183
+ copies induces by the default pickling methods of the original
184
+ objects passed as arguments to dispatch.
185
+
186
+ `forward_reducers` and `backward_reducers` are expected to be
187
+ dictionaries with key/values being `(type, callable)` pairs where
188
+ `callable` is a function that, given an instance of `type`, will return a
189
+ tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
190
+ pickled `tuple_of_objects` as would return a `__reduce__` method.
191
+ See the standard library documentation about pickling for more details.
192
+
193
+ """
194
+
195
+ def __init__(self, processes=None, forward_reducers=None,
196
+ backward_reducers=None, **kwargs):
197
+ if forward_reducers is None:
198
+ forward_reducers = dict()
199
+ if backward_reducers is None:
200
+ backward_reducers = dict()
201
+ self._forward_reducers = forward_reducers
202
+ self._backward_reducers = backward_reducers
203
+ poolargs = dict(processes=processes)
204
+ poolargs.update(kwargs)
205
+ super(PicklingPool, self).__init__(**poolargs)
206
+
207
+ def _setup_queues(self):
208
+ context = getattr(self, '_ctx', mp)
209
+ self._inqueue = CustomizablePicklingQueue(context,
210
+ self._forward_reducers)
211
+ self._outqueue = CustomizablePicklingQueue(context,
212
+ self._backward_reducers)
213
+ self._quick_put = self._inqueue._send
214
+ self._quick_get = self._outqueue._recv
215
+
216
+
217
+ class MemmappingPool(PicklingPool):
218
+ """Process pool that shares large arrays to avoid memory copy.
219
+
220
+ This drop-in replacement for `multiprocessing.pool.Pool` makes
221
+ it possible to work efficiently with shared memory in a numpy
222
+ context.
223
+
224
+ Existing instances of numpy.memmap are preserved: the child
225
+ suprocesses will have access to the same shared memory in the
226
+ original mode except for the 'w+' mode that is automatically
227
+ transformed as 'r+' to avoid zeroing the original data upon
228
+ instantiation.
229
+
230
+ Furthermore large arrays from the parent process are automatically
231
+ dumped to a temporary folder on the filesystem such as child
232
+ processes to access their content via memmapping (file system
233
+ backed shared memory).
234
+
235
+ Note: it is important to call the terminate method to collect
236
+ the temporary folder used by the pool.
237
+
238
+ Parameters
239
+ ----------
240
+ processes: int, optional
241
+ Number of worker processes running concurrently in the pool.
242
+ initializer: callable, optional
243
+ Callable executed on worker process creation.
244
+ initargs: tuple, optional
245
+ Arguments passed to the initializer callable.
246
+ temp_folder: (str, callable) optional
247
+ If str:
248
+ Folder to be used by the pool for memmapping large arrays
249
+ for sharing memory with worker processes. If None, this will try in
250
+ order:
251
+ - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
252
+ - /dev/shm if the folder exists and is writable: this is a RAMdisk
253
+ filesystem available by default on modern Linux distributions,
254
+ - the default system temporary folder that can be overridden
255
+ with TMP, TMPDIR or TEMP environment variables, typically /tmp
256
+ under Unix operating systems.
257
+ if callable:
258
+ An callable in charge of dynamically resolving a temporary folder
259
+ for memmapping large arrays.
260
+ max_nbytes int or None, optional, 1e6 by default
261
+ Threshold on the size of arrays passed to the workers that
262
+ triggers automated memory mapping in temp_folder.
263
+ Use None to disable memmapping of large arrays.
264
+ mmap_mode: {'r+', 'r', 'w+', 'c'}
265
+ Memmapping mode for numpy arrays passed to workers.
266
+ See 'max_nbytes' parameter documentation for more details.
267
+ forward_reducers: dictionary, optional
268
+ Reducers used to pickle objects passed from main process to worker
269
+ processes: see below.
270
+ backward_reducers: dictionary, optional
271
+ Reducers used to pickle return values from workers back to the
272
+ main process.
273
+ verbose: int, optional
274
+ Make it possible to monitor how the communication of numpy arrays
275
+ with the subprocess is handled (pickling or memmapping)
276
+ prewarm: bool or str, optional, "auto" by default.
277
+ If True, force a read on newly memmapped array to make sure that OS
278
+ pre-cache it in memory. This can be useful to avoid concurrent disk
279
+ access when the same data array is passed to different worker
280
+ processes. If "auto" (by default), prewarm is set to True, unless the
281
+ Linux shared memory partition /dev/shm is available and used as temp
282
+ folder.
283
+
284
+ `forward_reducers` and `backward_reducers` are expected to be
285
+ dictionaries with key/values being `(type, callable)` pairs where
286
+ `callable` is a function that give an instance of `type` will return
287
+ a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
288
+ of the pickled `tuple_of_objects` as would return a `__reduce__`
289
+ method. See the standard library documentation on pickling for more
290
+ details.
291
+
292
+ """
293
+
294
+ def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
295
+ mmap_mode='r', forward_reducers=None, backward_reducers=None,
296
+ verbose=0, context_id=None, prewarm=False, **kwargs):
297
+
298
+ if context_id is not None:
299
+ warnings.warn('context_id is deprecated and ignored in joblib'
300
+ ' 0.9.4 and will be removed in 0.11',
301
+ DeprecationWarning)
302
+
303
+ manager = TemporaryResourcesManager(temp_folder)
304
+ self._temp_folder_manager = manager
305
+
306
+ # The usage of a temp_folder_resolver over a simple temp_folder is
307
+ # superfluous for multiprocessing pools, as they don't get reused, see
308
+ # get_memmapping_executor for more details. We still use it for code
309
+ # simplicity.
310
+ forward_reducers, backward_reducers = \
311
+ get_memmapping_reducers(
312
+ temp_folder_resolver=manager.resolve_temp_folder_name,
313
+ max_nbytes=max_nbytes, mmap_mode=mmap_mode,
314
+ forward_reducers=forward_reducers,
315
+ backward_reducers=backward_reducers, verbose=verbose,
316
+ unlink_on_gc_collect=False, prewarm=prewarm)
317
+
318
+ poolargs = dict(
319
+ processes=processes,
320
+ forward_reducers=forward_reducers,
321
+ backward_reducers=backward_reducers)
322
+ poolargs.update(kwargs)
323
+ super(MemmappingPool, self).__init__(**poolargs)
324
+
325
+ def terminate(self):
326
+ n_retries = 10
327
+ for i in range(n_retries):
328
+ try:
329
+ super(MemmappingPool, self).terminate()
330
+ break
331
+ except OSError as e:
332
+ if isinstance(e, WindowsError):
333
+ # Workaround occasional "[Error 5] Access is denied" issue
334
+ # when trying to terminate a process under windows.
335
+ sleep(0.1)
336
+ if i + 1 == n_retries:
337
+ warnings.warn("Failed to terminate worker processes in"
338
+ " multiprocessing pool: %r" % e)
339
+
340
+ # Clean up the temporary resources as the workers should now be off.
341
+ self._temp_folder_manager._clean_temporary_resources()
342
+
343
+ @property
344
+ def _temp_folder(self):
345
+ # Legacy property in tests. could be removed if we refactored the
346
+ # memmapping tests. SHOULD ONLY BE USED IN TESTS!
347
+ # We cache this property because it is called late in the tests - at
348
+ # this point, all context have been unregistered, and
349
+ # resolve_temp_folder_name raises an error.
350
+ if getattr(self, '_cached_temp_folder', None) is not None:
351
+ return self._cached_temp_folder
352
+ else:
353
+ self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa
354
+ return self._cached_temp_folder
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__init__.py ADDED
File without changes
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc ADDED
Binary file (2.22 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc ADDED
Binary file (5.47 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc ADDED
Binary file (2.12 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc ADDED
Binary file (330 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc ADDED
Binary file (420 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc ADDED
Binary file (763 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc ADDED
Binary file (41.7 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc ADDED
Binary file (4.53 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc ADDED
Binary file (2.03 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc ADDED
Binary file (31.8 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc ADDED
Binary file (705 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc ADDED
Binary file (598 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc ADDED
Binary file (68 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc ADDED
Binary file (879 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc ADDED
Binary file (557 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/joblib/test/common.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Small utilities for testing.
3
+ """
4
+ import os
5
+ import gc
6
+ import sys
7
+
8
+ from joblib._multiprocessing_helpers import mp
9
+ from joblib.testing import SkipTest, skipif
10
+
11
+ try:
12
+ import lz4
13
+ except ImportError:
14
+ lz4 = None
15
+
16
+ IS_PYPY = hasattr(sys, "pypy_version_info")
17
+
18
+ # A decorator to run tests only when numpy is available
19
+ try:
20
+ import numpy as np
21
+
22
+ def with_numpy(func):
23
+ """A decorator to skip tests requiring numpy."""
24
+ return func
25
+
26
+ except ImportError:
27
+ def with_numpy(func):
28
+ """A decorator to skip tests requiring numpy."""
29
+ def my_func():
30
+ raise SkipTest('Test requires numpy')
31
+ return my_func
32
+ np = None
33
+
34
+ # TODO: Turn this back on after refactoring yield based tests in test_hashing
35
+ # with_numpy = skipif(not np, reason='Test requires numpy.')
36
+
37
+ # we use memory_profiler library for memory consumption checks
38
+ try:
39
+ from memory_profiler import memory_usage
40
+
41
+ def with_memory_profiler(func):
42
+ """A decorator to skip tests requiring memory_profiler."""
43
+ return func
44
+
45
+ def memory_used(func, *args, **kwargs):
46
+ """Compute memory usage when executing func."""
47
+ gc.collect()
48
+ mem_use = memory_usage((func, args, kwargs), interval=.001)
49
+ return max(mem_use) - min(mem_use)
50
+
51
+ except ImportError:
52
+ def with_memory_profiler(func):
53
+ """A decorator to skip tests requiring memory_profiler."""
54
+ def dummy_func():
55
+ raise SkipTest('Test requires memory_profiler.')
56
+ return dummy_func
57
+
58
+ memory_usage = memory_used = None
59
+
60
+
61
+ def force_gc_pypy():
62
+ # The gc in pypy can be delayed. Force it to test the behavior when it
63
+ # will eventually be collected.
64
+ if IS_PYPY:
65
+ # Run gc.collect() twice to make sure the weakref is collected, as
66
+ # mentionned in the pypy doc:
67
+ # https://doc.pypy.org/en/latest/config/objspace.usemodules._weakref.html
68
+ import gc
69
+ gc.collect()
70
+ gc.collect()
71
+
72
+
73
+ with_multiprocessing = skipif(
74
+ mp is None, reason='Needs multiprocessing to run.')
75
+
76
+
77
+ with_dev_shm = skipif(
78
+ not os.path.exists('/dev/shm'),
79
+ reason='This test requires a large /dev/shm shared memory fs.')
80
+
81
+ with_lz4 = skipif(lz4 is None, reason='Needs lz4 compression to run')
82
+
83
+ without_lz4 = skipif(
84
+ lz4 is not None, reason='Needs lz4 not being installed to run')
evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_backports.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mmap
2
+
3
+ from joblib.backports import make_memmap, concurrency_safe_rename
4
+ from joblib.test.common import with_numpy
5
+ from joblib.testing import parametrize
6
+ from joblib import Parallel, delayed
7
+
8
+
9
+ @with_numpy
10
+ def test_memmap(tmpdir):
11
+ fname = tmpdir.join('test.mmap').strpath
12
+ size = 5 * mmap.ALLOCATIONGRANULARITY
13
+ offset = mmap.ALLOCATIONGRANULARITY + 1
14
+ memmap_obj = make_memmap(fname, shape=size, mode='w+', offset=offset)
15
+ assert memmap_obj.offset == offset
16
+
17
+
18
+ @parametrize('dst_content', [None, 'dst content'])
19
+ @parametrize('backend', [None, 'threading'])
20
+ def test_concurrency_safe_rename(tmpdir, dst_content, backend):
21
+ src_paths = [tmpdir.join('src_%d' % i) for i in range(4)]
22
+ for src_path in src_paths:
23
+ src_path.write('src content')
24
+ dst_path = tmpdir.join('dst')
25
+ if dst_content is not None:
26
+ dst_path.write(dst_content)
27
+
28
+ Parallel(n_jobs=4, backend=backend)(
29
+ delayed(concurrency_safe_rename)(src_path.strpath, dst_path.strpath)
30
+ for src_path in src_paths
31
+ )
32
+ assert dst_path.exists()
33
+ assert dst_path.read() == 'src content'
34
+ for src_path in src_paths:
35
+ assert not src_path.exists()
evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test that our implementation of wrap_non_picklable_objects mimics
3
+ properly the loky implementation.
4
+ """
5
+
6
+ from .._cloudpickle_wrapper import wrap_non_picklable_objects
7
+ from .._cloudpickle_wrapper import _my_wrap_non_picklable_objects
8
+
9
+
10
+ def a_function(x):
11
+ return x
12
+
13
+
14
+ class AClass(object):
15
+
16
+ def __call__(self, x):
17
+ return x
18
+
19
+
20
+ def test_wrap_non_picklable_objects():
21
+ # Mostly a smoke test: test that we can use callable in the same way
22
+ # with both our implementation of wrap_non_picklable_objects and the
23
+ # upstream one
24
+ for obj in (a_function, AClass()):
25
+ wrapped_obj = wrap_non_picklable_objects(obj)
26
+ my_wrapped_obj = _my_wrap_non_picklable_objects(obj)
27
+ assert wrapped_obj(1) == my_wrapped_obj(1)
evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_config.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from joblib.parallel import parallel_config
4
+ from joblib.parallel import parallel_backend
5
+ from joblib.parallel import Parallel, delayed
6
+
7
+ from joblib.parallel import BACKENDS
8
+ from joblib.parallel import DEFAULT_BACKEND
9
+ from joblib.parallel import EXTERNAL_BACKENDS
10
+
11
+ from joblib._parallel_backends import LokyBackend
12
+ from joblib._parallel_backends import ThreadingBackend
13
+ from joblib._parallel_backends import MultiprocessingBackend
14
+
15
+ from joblib.testing import parametrize, raises
16
+ from joblib.test.common import np, with_numpy
17
+ from joblib.test.common import with_multiprocessing
18
+ from joblib.test.test_parallel import check_memmap
19
+
20
+
21
+ @parametrize("context", [parallel_config, parallel_backend])
22
+ def test_global_parallel_backend(context):
23
+ default = Parallel()._backend
24
+
25
+ pb = context('threading')
26
+ try:
27
+ assert isinstance(Parallel()._backend, ThreadingBackend)
28
+ finally:
29
+ pb.unregister()
30
+ assert type(Parallel()._backend) is type(default)
31
+
32
+
33
+ @parametrize("context", [parallel_config, parallel_backend])
34
+ def test_external_backends(context):
35
+ def register_foo():
36
+ BACKENDS['foo'] = ThreadingBackend
37
+
38
+ EXTERNAL_BACKENDS['foo'] = register_foo
39
+ try:
40
+ with context('foo'):
41
+ assert isinstance(Parallel()._backend, ThreadingBackend)
42
+ finally:
43
+ del EXTERNAL_BACKENDS['foo']
44
+
45
+
46
+ @with_numpy
47
+ @with_multiprocessing
48
+ def test_parallel_config_no_backend(tmpdir):
49
+ # Check that parallel_config allows to change the config
50
+ # even if no backend is set.
51
+ with parallel_config(n_jobs=2, max_nbytes=1, temp_folder=tmpdir):
52
+ with Parallel(prefer="processes") as p:
53
+ assert isinstance(p._backend, LokyBackend)
54
+ assert p.n_jobs == 2
55
+
56
+ # Checks that memmapping is enabled
57
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
58
+ assert len(os.listdir(tmpdir)) > 0
59
+
60
+
61
+ @with_numpy
62
+ @with_multiprocessing
63
+ def test_parallel_config_params_explicit_set(tmpdir):
64
+ with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir):
65
+ with Parallel(n_jobs=2, prefer="processes", max_nbytes='1M') as p:
66
+ assert isinstance(p._backend, LokyBackend)
67
+ assert p.n_jobs == 2
68
+
69
+ # Checks that memmapping is disabled
70
+ with raises(TypeError, match="Expected np.memmap instance"):
71
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
72
+
73
+
74
+ @parametrize("param", ["prefer", "require"])
75
+ def test_parallel_config_bad_params(param):
76
+ # Check that an error is raised when setting a wrong backend
77
+ # hint or constraint
78
+ with raises(ValueError, match=f"{param}=wrong is not a valid"):
79
+ with parallel_config(**{param: "wrong"}):
80
+ Parallel()
81
+
82
+
83
+ def test_parallel_config_constructor_params():
84
+ # Check that an error is raised when backend is None
85
+ # but backend constructor params are given
86
+ with raises(ValueError, match="only supported when backend is not None"):
87
+ with parallel_config(inner_max_num_threads=1):
88
+ pass
89
+
90
+ with raises(ValueError, match="only supported when backend is not None"):
91
+ with parallel_config(backend_param=1):
92
+ pass
93
+
94
+
95
+ def test_parallel_config_nested():
96
+ # Check that nested configuration retrieves the info from the
97
+ # parent config and do not reset them.
98
+
99
+ with parallel_config(n_jobs=2):
100
+ p = Parallel()
101
+ assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND])
102
+ assert p.n_jobs == 2
103
+
104
+ with parallel_config(backend='threading'):
105
+ with parallel_config(n_jobs=2):
106
+ p = Parallel()
107
+ assert isinstance(p._backend, ThreadingBackend)
108
+ assert p.n_jobs == 2
109
+
110
+ with parallel_config(verbose=100):
111
+ with parallel_config(n_jobs=2):
112
+ p = Parallel()
113
+ assert p.verbose == 100
114
+ assert p.n_jobs == 2
115
+
116
+
117
+ @with_numpy
118
+ @with_multiprocessing
119
+ @parametrize('backend', ['multiprocessing', 'threading',
120
+ MultiprocessingBackend(), ThreadingBackend()])
121
+ @parametrize("context", [parallel_config, parallel_backend])
122
+ def test_threadpool_limitation_in_child_context_error(context, backend):
123
+
124
+ with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"):
125
+ context(backend, inner_max_num_threads=1)
126
+
127
+
128
+ @parametrize("context", [parallel_config, parallel_backend])
129
+ def test_parallel_n_jobs_none(context):
130
+ # Check that n_jobs=None is interpreted as "unset" in Parallel
131
+ # non regression test for #1473
132
+ with context(backend="threading", n_jobs=2):
133
+ with Parallel(n_jobs=None) as p:
134
+ assert p.n_jobs == 2
135
+
136
+ with context(backend="threading"):
137
+ default_n_jobs = Parallel().n_jobs
138
+ with Parallel(n_jobs=None) as p:
139
+ assert p.n_jobs == default_n_jobs
140
+
141
+
142
+ @parametrize("context", [parallel_config, parallel_backend])
143
+ def test_parallel_config_n_jobs_none(context):
144
+ # Check that n_jobs=None is interpreted as "explicitly set" in
145
+ # parallel_(config/backend)
146
+ # non regression test for #1473
147
+ with context(backend="threading", n_jobs=2):
148
+ with context(backend="threading", n_jobs=None):
149
+ # n_jobs=None resets n_jobs to backend's default
150
+ with Parallel() as p:
151
+ assert p.n_jobs == 1
evalkit_internvl/lib/python3.10/site-packages/joblib/test/test_dask.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function, division, absolute_import
2
+ import os
3
+ import warnings
4
+
5
+ import pytest
6
+ from random import random
7
+ from uuid import uuid4
8
+ from time import sleep
9
+
10
+ from .. import Parallel, delayed, parallel_config
11
+ from ..parallel import ThreadingBackend, AutoBatchingMixin
12
+ from .._dask import DaskDistributedBackend
13
+
14
+ distributed = pytest.importorskip('distributed')
15
+ dask = pytest.importorskip('dask')
16
+
17
+ # These imports need to be after the pytest.importorskip hence the noqa: E402
18
+ from distributed import Client, LocalCluster, get_client # noqa: E402
19
+ from distributed.metrics import time # noqa: E402
20
+ # Note: pytest requires to manually import all fixtures used in the test
21
+ # and their dependencies.
22
+ from distributed.utils_test import cluster, inc, cleanup # noqa: E402, F401
23
+
24
+
25
+ def noop(*args, **kwargs):
26
+ pass
27
+
28
+
29
+ def slow_raise_value_error(condition, duration=0.05):
30
+ sleep(duration)
31
+ if condition:
32
+ raise ValueError("condition evaluated to True")
33
+
34
+
35
+ def count_events(event_name, client):
36
+ worker_events = client.run(lambda dask_worker: dask_worker.log)
37
+ event_counts = {}
38
+ for w, events in worker_events.items():
39
+ event_counts[w] = len([event for event in list(events)
40
+ if event[1] == event_name])
41
+ return event_counts
42
+
43
+
44
+ def test_simple(loop):
45
+ with cluster() as (s, [a, b]):
46
+ with Client(s['address'], loop=loop) as client: # noqa: F841
47
+ with parallel_config(backend='dask'):
48
+ seq = Parallel()(delayed(inc)(i) for i in range(10))
49
+ assert seq == [inc(i) for i in range(10)]
50
+
51
+ with pytest.raises(ValueError):
52
+ Parallel()(delayed(slow_raise_value_error)(i == 3)
53
+ for i in range(10))
54
+
55
+ seq = Parallel()(delayed(inc)(i) for i in range(10))
56
+ assert seq == [inc(i) for i in range(10)]
57
+
58
+
59
+ def test_dask_backend_uses_autobatching(loop):
60
+ assert (DaskDistributedBackend.compute_batch_size
61
+ is AutoBatchingMixin.compute_batch_size)
62
+
63
+ with cluster() as (s, [a, b]):
64
+ with Client(s['address'], loop=loop) as client: # noqa: F841
65
+ with parallel_config(backend='dask'):
66
+ with Parallel() as parallel:
67
+ # The backend should be initialized with a default
68
+ # batch size of 1:
69
+ backend = parallel._backend
70
+ assert isinstance(backend, DaskDistributedBackend)
71
+ assert backend.parallel is parallel
72
+ assert backend._effective_batch_size == 1
73
+
74
+ # Launch many short tasks that should trigger
75
+ # auto-batching:
76
+ parallel(
77
+ delayed(lambda: None)()
78
+ for _ in range(int(1e4))
79
+ )
80
+ assert backend._effective_batch_size > 10
81
+
82
+
83
+ def random2():
84
+ return random()
85
+
86
+
87
+ def test_dont_assume_function_purity(loop):
88
+ with cluster() as (s, [a, b]):
89
+ with Client(s['address'], loop=loop) as client: # noqa: F841
90
+ with parallel_config(backend='dask'):
91
+ x, y = Parallel()(delayed(random2)() for i in range(2))
92
+ assert x != y
93
+
94
+
95
+ @pytest.mark.parametrize("mixed", [True, False])
96
+ def test_dask_funcname(loop, mixed):
97
+ from joblib._dask import Batch
98
+ if not mixed:
99
+ tasks = [delayed(inc)(i) for i in range(4)]
100
+ batch_repr = 'batch_of_inc_4_calls'
101
+ else:
102
+ tasks = [
103
+ delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4)
104
+ ]
105
+ batch_repr = 'mixed_batch_of_inc_4_calls'
106
+
107
+ assert repr(Batch(tasks)) == batch_repr
108
+
109
+ with cluster() as (s, [a, b]):
110
+ with Client(s['address'], loop=loop) as client:
111
+ with parallel_config(backend='dask'):
112
+ _ = Parallel(batch_size=2, pre_dispatch='all')(tasks)
113
+
114
+ def f(dask_scheduler):
115
+ return list(dask_scheduler.transition_log)
116
+ batch_repr = batch_repr.replace('4', '2')
117
+ log = client.run_on_scheduler(f)
118
+ assert all('batch_of_inc' in tup[0] for tup in log)
119
+
120
+
121
+ def test_no_undesired_distributed_cache_hit():
122
+ # Dask has a pickle cache for callables that are called many times. Because
123
+ # the dask backends used to wrap both the functions and the arguments
124
+ # under instances of the Batch callable class this caching mechanism could
125
+ # lead to bugs as described in: https://github.com/joblib/joblib/pull/1055
126
+ # The joblib-dask backend has been refactored to avoid bundling the
127
+ # arguments as an attribute of the Batch instance to avoid this problem.
128
+ # This test serves as non-regression problem.
129
+
130
+ # Use a large number of input arguments to give the AutoBatchingMixin
131
+ # enough tasks to kick-in.
132
+ lists = [[] for _ in range(100)]
133
+ np = pytest.importorskip('numpy')
134
+ X = np.arange(int(1e6))
135
+
136
+ def isolated_operation(list_, data=None):
137
+ if data is not None:
138
+ np.testing.assert_array_equal(data, X)
139
+ list_.append(uuid4().hex)
140
+ return list_
141
+
142
+ cluster = LocalCluster(n_workers=1, threads_per_worker=2)
143
+ client = Client(cluster)
144
+ try:
145
+ with parallel_config(backend='dask'):
146
+ # dispatches joblib.parallel.BatchedCalls
147
+ res = Parallel()(
148
+ delayed(isolated_operation)(list_) for list_ in lists
149
+ )
150
+
151
+ # The original arguments should not have been mutated as the mutation
152
+ # happens in the dask worker process.
153
+ assert lists == [[] for _ in range(100)]
154
+
155
+ # Here we did not pass any large numpy array as argument to
156
+ # isolated_operation so no scattering event should happen under the
157
+ # hood.
158
+ counts = count_events('receive-from-scatter', client)
159
+ assert sum(counts.values()) == 0
160
+ assert all([len(r) == 1 for r in res])
161
+
162
+ with parallel_config(backend='dask'):
163
+ # Append a large array which will be scattered by dask, and
164
+ # dispatch joblib._dask.Batch
165
+ res = Parallel()(
166
+ delayed(isolated_operation)(list_, data=X) for list_ in lists
167
+ )
168
+
169
+ # This time, auto-scattering should have kicked it.
170
+ counts = count_events('receive-from-scatter', client)
171
+ assert sum(counts.values()) > 0
172
+ assert all([len(r) == 1 for r in res])
173
+ finally:
174
+ client.close(timeout=30)
175
+ cluster.close(timeout=30)
176
+
177
+
178
+ class CountSerialized(object):
179
+ def __init__(self, x):
180
+ self.x = x
181
+ self.count = 0
182
+
183
+ def __add__(self, other):
184
+ return self.x + getattr(other, 'x', other)
185
+
186
+ __radd__ = __add__
187
+
188
+ def __reduce__(self):
189
+ self.count += 1
190
+ return (CountSerialized, (self.x,))
191
+
192
+
193
+ def add5(a, b, c, d=0, e=0):
194
+ return a + b + c + d + e
195
+
196
+
197
+ def test_manual_scatter(loop):
198
+ x = CountSerialized(1)
199
+ y = CountSerialized(2)
200
+ z = CountSerialized(3)
201
+
202
+ with cluster() as (s, [a, b]):
203
+ with Client(s['address'], loop=loop) as client: # noqa: F841
204
+ with parallel_config(backend='dask', scatter=[x, y]):
205
+ f = delayed(add5)
206
+ tasks = [f(x, y, z, d=4, e=5),
207
+ f(x, z, y, d=5, e=4),
208
+ f(y, x, z, d=x, e=5),
209
+ f(z, z, x, d=z, e=y)]
210
+ expected = [func(*args, **kwargs)
211
+ for func, args, kwargs in tasks]
212
+ results = Parallel()(tasks)
213
+
214
+ # Scatter must take a list/tuple
215
+ with pytest.raises(TypeError):
216
+ with parallel_config(backend='dask', loop=loop, scatter=1):
217
+ pass
218
+
219
+ assert results == expected
220
+
221
+ # Scattered variables only serialized once
222
+ assert x.count == 1
223
+ assert y.count == 1
224
+ # Depending on the version of distributed, the unscattered z variable
225
+ # is either pickled 4 or 6 times, possibly because of the memoization
226
+ # of objects that appear several times in the arguments of a delayed
227
+ # task.
228
+ assert z.count in (4, 6)
229
+
230
+
231
+ # When the same IOLoop is used for multiple clients in a row, use
232
+ # loop_in_thread instead of loop to prevent the Client from closing it. See
233
+ # dask/distributed #4112
234
+ def test_auto_scatter(loop_in_thread):
235
+ np = pytest.importorskip('numpy')
236
+ data1 = np.ones(int(1e4), dtype=np.uint8)
237
+ data2 = np.ones(int(1e4), dtype=np.uint8)
238
+ data_to_process = ([data1] * 3) + ([data2] * 3)
239
+
240
+ with cluster() as (s, [a, b]):
241
+ with Client(s['address'], loop=loop_in_thread) as client:
242
+ with parallel_config(backend='dask'):
243
+ # Passing the same data as arg and kwarg triggers a single
244
+ # scatter operation whose result is reused.
245
+ Parallel()(delayed(noop)(data, data, i, opt=data)
246
+ for i, data in enumerate(data_to_process))
247
+ # By default large array are automatically scattered with
248
+ # broadcast=1 which means that one worker must directly receive
249
+ # the data from the scatter operation once.
250
+ counts = count_events('receive-from-scatter', client)
251
+ assert counts[a['address']] + counts[b['address']] == 2
252
+
253
+ with cluster() as (s, [a, b]):
254
+ with Client(s['address'], loop=loop_in_thread) as client:
255
+ with parallel_config(backend='dask'):
256
+ Parallel()(delayed(noop)(data1[:3], i) for i in range(5))
257
+ # Small arrays are passed within the task definition without going
258
+ # through a scatter operation.
259
+ counts = count_events('receive-from-scatter', client)
260
+ assert counts[a['address']] == 0
261
+ assert counts[b['address']] == 0
262
+
263
+
264
+ @pytest.mark.parametrize("retry_no", list(range(2)))
265
+ def test_nested_scatter(loop, retry_no):
266
+
267
+ np = pytest.importorskip('numpy')
268
+
269
+ NUM_INNER_TASKS = 10
270
+ NUM_OUTER_TASKS = 10
271
+
272
+ def my_sum(x, i, j):
273
+ return np.sum(x)
274
+
275
+ def outer_function_joblib(array, i):
276
+ client = get_client() # noqa
277
+ with parallel_config(backend="dask"):
278
+ results = Parallel()(
279
+ delayed(my_sum)(array[j:], i, j) for j in range(
280
+ NUM_INNER_TASKS)
281
+ )
282
+ return sum(results)
283
+
284
+ with cluster() as (s, [a, b]):
285
+ with Client(s['address'], loop=loop) as _:
286
+ with parallel_config(backend="dask"):
287
+ my_array = np.ones(10000)
288
+ _ = Parallel()(
289
+ delayed(outer_function_joblib)(
290
+ my_array[i:], i) for i in range(NUM_OUTER_TASKS)
291
+ )
292
+
293
+
294
+ def test_nested_backend_context_manager(loop_in_thread):
295
+ def get_nested_pids():
296
+ pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
297
+ pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
298
+ return pids
299
+
300
+ with cluster() as (s, [a, b]):
301
+ with Client(s['address'], loop=loop_in_thread) as client:
302
+ with parallel_config(backend='dask'):
303
+ pid_groups = Parallel(n_jobs=2)(
304
+ delayed(get_nested_pids)()
305
+ for _ in range(10)
306
+ )
307
+ for pid_group in pid_groups:
308
+ assert len(set(pid_group)) <= 2
309
+
310
+ # No deadlocks
311
+ with Client(s['address'], loop=loop_in_thread) as client: # noqa: F841
312
+ with parallel_config(backend='dask'):
313
+ pid_groups = Parallel(n_jobs=2)(
314
+ delayed(get_nested_pids)()
315
+ for _ in range(10)
316
+ )
317
+ for pid_group in pid_groups:
318
+ assert len(set(pid_group)) <= 2
319
+
320
+
321
+ def test_nested_backend_context_manager_implicit_n_jobs(loop):
322
+ # Check that Parallel with no explicit n_jobs value automatically selects
323
+ # all the dask workers, including in nested calls.
324
+
325
+ def _backend_type(p):
326
+ return p._backend.__class__.__name__
327
+
328
+ def get_nested_implicit_n_jobs():
329
+ with Parallel() as p:
330
+ return _backend_type(p), p.n_jobs
331
+
332
+ with cluster() as (s, [a, b]):
333
+ with Client(s['address'], loop=loop) as client: # noqa: F841
334
+ with parallel_config(backend='dask'):
335
+ with Parallel() as p:
336
+ assert _backend_type(p) == "DaskDistributedBackend"
337
+ assert p.n_jobs == -1
338
+ all_nested_n_jobs = p(
339
+ delayed(get_nested_implicit_n_jobs)()
340
+ for _ in range(2)
341
+ )
342
+ for backend_type, nested_n_jobs in all_nested_n_jobs:
343
+ assert backend_type == "DaskDistributedBackend"
344
+ assert nested_n_jobs == -1
345
+
346
+
347
+ def test_errors(loop):
348
+ with pytest.raises(ValueError) as info:
349
+ with parallel_config(backend='dask'):
350
+ pass
351
+
352
+ assert "create a dask client" in str(info.value).lower()
353
+
354
+
355
+ def test_correct_nested_backend(loop):
356
+ with cluster() as (s, [a, b]):
357
+ with Client(s['address'], loop=loop) as client: # noqa: F841
358
+ # No requirement, should be us
359
+ with parallel_config(backend='dask'):
360
+ result = Parallel(n_jobs=2)(
361
+ delayed(outer)(nested_require=None) for _ in range(1))
362
+ assert isinstance(result[0][0][0], DaskDistributedBackend)
363
+
364
+ # Require threads, should be threading
365
+ with parallel_config(backend='dask'):
366
+ result = Parallel(n_jobs=2)(
367
+ delayed(outer)(nested_require='sharedmem')
368
+ for _ in range(1))
369
+ assert isinstance(result[0][0][0], ThreadingBackend)
370
+
371
+
372
+ def outer(nested_require):
373
+ return Parallel(n_jobs=2, prefer='threads')(
374
+ delayed(middle)(nested_require) for _ in range(1)
375
+ )
376
+
377
+
378
+ def middle(require):
379
+ return Parallel(n_jobs=2, require=require)(
380
+ delayed(inner)() for _ in range(1)
381
+ )
382
+
383
+
384
+ def inner():
385
+ return Parallel()._backend
386
+
387
+
388
+ def test_secede_with_no_processes(loop):
389
+ # https://github.com/dask/distributed/issues/1775
390
+ with Client(loop=loop, processes=False, set_as_default=True):
391
+ with parallel_config(backend='dask'):
392
+ Parallel(n_jobs=4)(delayed(id)(i) for i in range(2))
393
+
394
+
395
+ def _worker_address(_):
396
+ from distributed import get_worker
397
+ return get_worker().address
398
+
399
+
400
+ def test_dask_backend_keywords(loop):
401
+ with cluster() as (s, [a, b]):
402
+ with Client(s['address'], loop=loop) as client: # noqa: F841
403
+ with parallel_config(backend='dask', workers=a['address']):
404
+ seq = Parallel()(
405
+ delayed(_worker_address)(i) for i in range(10))
406
+ assert seq == [a['address']] * 10
407
+
408
+ with parallel_config(backend='dask', workers=b['address']):
409
+ seq = Parallel()(
410
+ delayed(_worker_address)(i) for i in range(10))
411
+ assert seq == [b['address']] * 10
412
+
413
+
414
+ def test_scheduler_tasks_cleanup(loop):
415
+ with Client(processes=False, loop=loop) as client:
416
+ with parallel_config(backend='dask'):
417
+ Parallel()(delayed(inc)(i) for i in range(10))
418
+
419
+ start = time()
420
+ while client.cluster.scheduler.tasks:
421
+ sleep(0.01)
422
+ assert time() < start + 5
423
+
424
+ assert not client.futures
425
+
426
+
427
+ @pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"])
428
+ @pytest.mark.skipif(
429
+ distributed.__version__ <= '2.1.1' and distributed.__version__ >= '1.28.0',
430
+ reason="distributed bug - https://github.com/dask/distributed/pull/2841")
431
+ def test_wait_for_workers(cluster_strategy):
432
+ cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
433
+ client = Client(cluster)
434
+ if cluster_strategy == "adaptive":
435
+ cluster.adapt(minimum=0, maximum=2)
436
+ elif cluster_strategy == "late_scaling":
437
+ # Tell the cluster to start workers but this is a non-blocking call
438
+ # and new workers might take time to connect. In this case the Parallel
439
+ # call should wait for at least one worker to come up before starting
440
+ # to schedule work.
441
+ cluster.scale(2)
442
+ try:
443
+ with parallel_config(backend='dask'):
444
+ # The following should wait a bit for at least one worker to
445
+ # become available.
446
+ Parallel()(delayed(inc)(i) for i in range(10))
447
+ finally:
448
+ client.close()
449
+ cluster.close()
450
+
451
+
452
+ def test_wait_for_workers_timeout():
453
+ # Start a cluster with 0 worker:
454
+ cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
455
+ client = Client(cluster)
456
+ try:
457
+ with parallel_config(backend='dask', wait_for_workers_timeout=0.1):
458
+ # Short timeout: DaskDistributedBackend
459
+ msg = "DaskDistributedBackend has no worker after 0.1 seconds."
460
+ with pytest.raises(TimeoutError, match=msg):
461
+ Parallel()(delayed(inc)(i) for i in range(10))
462
+
463
+ with parallel_config(backend='dask', wait_for_workers_timeout=0):
464
+ # No timeout: fallback to generic joblib failure:
465
+ msg = "DaskDistributedBackend has no active worker"
466
+ with pytest.raises(RuntimeError, match=msg):
467
+ Parallel()(delayed(inc)(i) for i in range(10))
468
+ finally:
469
+ client.close()
470
+ cluster.close()
471
+
472
+
473
+ @pytest.mark.parametrize("backend", ["loky", "multiprocessing"])
474
+ def test_joblib_warning_inside_dask_daemonic_worker(backend):
475
+ cluster = LocalCluster(n_workers=2)
476
+ client = Client(cluster)
477
+ try:
478
+
479
+ def func_using_joblib_parallel():
480
+ # Somehow trying to check the warning type here (e.g. with
481
+ # pytest.warns(UserWarning)) make the test hang. Work-around:
482
+ # return the warning record to the client and the warning check is
483
+ # done client-side.
484
+ with warnings.catch_warnings(record=True) as record:
485
+ Parallel(n_jobs=2, backend=backend)(
486
+ delayed(inc)(i) for i in range(10))
487
+
488
+ return record
489
+
490
+ fut = client.submit(func_using_joblib_parallel)
491
+ record = fut.result()
492
+
493
+ assert len(record) == 1
494
+ warning = record[0].message
495
+ assert isinstance(warning, UserWarning)
496
+ assert "distributed.worker.daemon" in str(warning)
497
+ finally:
498
+ client.close(timeout=30)
499
+ cluster.close(timeout=30)