Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/INSTALLER +1 -0
- evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/METADATA +131 -0
- evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/WHEEL +6 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/__init__.py +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py +18 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py +1487 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py +13 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__init__.py +44 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/_base.py +28 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py +43 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py +193 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py +85 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py +236 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py +378 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py +250 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py +409 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py +102 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/initializers.py +80 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py +1314 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py +285 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz +3 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz +3 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz +3 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz +3 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl +3 -0
.gitattributes
CHANGED
|
@@ -1654,3 +1654,4 @@ evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-lin
|
|
| 1654 |
evalkit_internvl/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1655 |
evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libcudart.60cfec8e.so.11.0 filter=lfs diff=lfs merge=lfs -text
|
| 1656 |
evalkit_tf437/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1654 |
evalkit_internvl/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1655 |
evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libcudart.60cfec8e.so.11.0 filter=lfs diff=lfs merge=lfs -text
|
| 1656 |
evalkit_tf437/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1657 |
+
evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.70530407.so.11 filter=lfs diff=lfs merge=lfs -text
|
evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/METADATA
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: decorator
|
| 3 |
+
Version: 4.4.2
|
| 4 |
+
Summary: Decorators for Humans
|
| 5 |
+
Home-page: https://github.com/micheles/decorator
|
| 6 |
+
Author: Michele Simionato
|
| 7 |
+
Author-email: michele.simionato@gmail.com
|
| 8 |
+
License: new BSD License
|
| 9 |
+
Keywords: decorators generic utility
|
| 10 |
+
Platform: All
|
| 11 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 12 |
+
Classifier: Intended Audience :: Developers
|
| 13 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 14 |
+
Classifier: Natural Language :: English
|
| 15 |
+
Classifier: Operating System :: OS Independent
|
| 16 |
+
Classifier: Programming Language :: Python
|
| 17 |
+
Classifier: Programming Language :: Python :: 2
|
| 18 |
+
Classifier: Programming Language :: Python :: 2.6
|
| 19 |
+
Classifier: Programming Language :: Python :: 2.7
|
| 20 |
+
Classifier: Programming Language :: Python :: 3
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.2
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.3
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.4
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.5
|
| 25 |
+
Classifier: Programming Language :: Python :: 3.6
|
| 26 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 27 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 28 |
+
Classifier: Topic :: Software Development :: Libraries
|
| 29 |
+
Classifier: Topic :: Utilities
|
| 30 |
+
Requires-Python: >=2.6, !=3.0.*, !=3.1.*
|
| 31 |
+
|
| 32 |
+
Decorators for Humans
|
| 33 |
+
=====================
|
| 34 |
+
|
| 35 |
+
The goal of the decorator module is to make it easy to define
|
| 36 |
+
signature-preserving function decorators and decorator factories.
|
| 37 |
+
It also includes an implementation of multiple dispatch and other niceties
|
| 38 |
+
(please check the docs). It is released under a two-clauses
|
| 39 |
+
BSD license, i.e. basically you can do whatever you want with it but I am not
|
| 40 |
+
responsible.
|
| 41 |
+
|
| 42 |
+
Installation
|
| 43 |
+
-------------
|
| 44 |
+
|
| 45 |
+
If you are lazy, just perform
|
| 46 |
+
|
| 47 |
+
``$ pip install decorator``
|
| 48 |
+
|
| 49 |
+
which will install just the module on your system.
|
| 50 |
+
|
| 51 |
+
If you prefer to install the full distribution from source, including
|
| 52 |
+
the documentation, clone the `GitHub repo`_ or download the tarball_, unpack it and run
|
| 53 |
+
|
| 54 |
+
``$ pip install .``
|
| 55 |
+
|
| 56 |
+
in the main directory, possibly as superuser.
|
| 57 |
+
|
| 58 |
+
.. _tarball: https://pypi.org/project/decorator/#files
|
| 59 |
+
.. _GitHub repo: https://github.com/micheles/decorator
|
| 60 |
+
|
| 61 |
+
Testing
|
| 62 |
+
--------
|
| 63 |
+
|
| 64 |
+
If you have the source code installation you can run the tests with
|
| 65 |
+
|
| 66 |
+
`$ python src/tests/test.py -v`
|
| 67 |
+
|
| 68 |
+
or (if you have setuptools installed)
|
| 69 |
+
|
| 70 |
+
`$ python setup.py test`
|
| 71 |
+
|
| 72 |
+
Notice that you may run into trouble if in your system there
|
| 73 |
+
is an older version of the decorator module; in such a case remove the
|
| 74 |
+
old version. It is safe even to copy the module `decorator.py` over
|
| 75 |
+
an existing one, since we kept backward-compatibility for a long time.
|
| 76 |
+
|
| 77 |
+
Repository
|
| 78 |
+
---------------
|
| 79 |
+
|
| 80 |
+
The project is hosted on GitHub. You can look at the source here:
|
| 81 |
+
|
| 82 |
+
https://github.com/micheles/decorator
|
| 83 |
+
|
| 84 |
+
Documentation
|
| 85 |
+
---------------
|
| 86 |
+
|
| 87 |
+
The documentation has been moved to https://github.com/micheles/decorator/blob/master/docs/documentation.md
|
| 88 |
+
|
| 89 |
+
From there you can get a PDF version by simply using the print
|
| 90 |
+
functionality of your browser.
|
| 91 |
+
|
| 92 |
+
Here is the documentation for previous versions of the module:
|
| 93 |
+
|
| 94 |
+
https://github.com/micheles/decorator/blob/4.3.2/docs/tests.documentation.rst
|
| 95 |
+
https://github.com/micheles/decorator/blob/4.2.1/docs/tests.documentation.rst
|
| 96 |
+
https://github.com/micheles/decorator/blob/4.1.2/docs/tests.documentation.rst
|
| 97 |
+
https://github.com/micheles/decorator/blob/4.0.0/documentation.rst
|
| 98 |
+
https://github.com/micheles/decorator/blob/3.4.2/documentation.rst
|
| 99 |
+
|
| 100 |
+
For the impatient
|
| 101 |
+
-----------------
|
| 102 |
+
|
| 103 |
+
Here is an example of how to define a family of decorators tracing slow
|
| 104 |
+
operations:
|
| 105 |
+
|
| 106 |
+
.. code-block:: python
|
| 107 |
+
|
| 108 |
+
from decorator import decorator
|
| 109 |
+
|
| 110 |
+
@decorator
|
| 111 |
+
def warn_slow(func, timelimit=60, *args, **kw):
|
| 112 |
+
t0 = time.time()
|
| 113 |
+
result = func(*args, **kw)
|
| 114 |
+
dt = time.time() - t0
|
| 115 |
+
if dt > timelimit:
|
| 116 |
+
logging.warn('%s took %d seconds', func.__name__, dt)
|
| 117 |
+
else:
|
| 118 |
+
logging.info('%s took %d seconds', func.__name__, dt)
|
| 119 |
+
return result
|
| 120 |
+
|
| 121 |
+
@warn_slow # warn if it takes more than 1 minute
|
| 122 |
+
def preprocess_input_files(inputdir, tempdir):
|
| 123 |
+
...
|
| 124 |
+
|
| 125 |
+
@warn_slow(timelimit=600) # warn if it takes more than 10 minutes
|
| 126 |
+
def run_calculation(tempdir, outdir):
|
| 127 |
+
...
|
| 128 |
+
|
| 129 |
+
Enjoy!
|
| 130 |
+
|
| 131 |
+
|
evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.33.4)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py2-none-any
|
| 5 |
+
Tag: py3-none-any
|
| 6 |
+
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (4.63 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc
ADDED
|
Binary file (598 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc
ADDED
|
Binary file (15.6 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc
ADDED
|
Binary file (1.26 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc
ADDED
|
Binary file (20.2 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (2.65 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc
ADDED
|
Binary file (5.37 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc
ADDED
|
Binary file (3.2 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc
ADDED
|
Binary file (8.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc
ADDED
|
Binary file (6.36 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc
ADDED
|
Binary file (4.35 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc
ADDED
|
Binary file (34 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc
ADDED
|
Binary file (17.2 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc
ADDED
|
Binary file (6.56 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/__init__.py
ADDED
|
File without changes
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import cloudpickle
|
| 2 |
+
from .cloudpickle import * # noqa
|
| 3 |
+
|
| 4 |
+
__doc__ = cloudpickle.__doc__
|
| 5 |
+
|
| 6 |
+
__version__ = "3.0.0"
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa
|
| 9 |
+
"__version__",
|
| 10 |
+
"Pickler",
|
| 11 |
+
"CloudPickler",
|
| 12 |
+
"dumps",
|
| 13 |
+
"loads",
|
| 14 |
+
"dump",
|
| 15 |
+
"load",
|
| 16 |
+
"register_pickle_by_value",
|
| 17 |
+
"unregister_pickle_by_value",
|
| 18 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (413 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc
ADDED
|
Binary file (36.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc
ADDED
|
Binary file (603 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py
ADDED
|
@@ -0,0 +1,1487 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pickler class to extend the standard pickle.Pickler functionality
|
| 2 |
+
|
| 3 |
+
The main objective is to make it natural to perform distributed computing on
|
| 4 |
+
clusters (such as PySpark, Dask, Ray...) with interactively defined code
|
| 5 |
+
(functions, classes, ...) written in notebooks or console.
|
| 6 |
+
|
| 7 |
+
In particular this pickler adds the following features:
|
| 8 |
+
- serialize interactively-defined or locally-defined functions, classes,
|
| 9 |
+
enums, typevars, lambdas and nested functions to compiled byte code;
|
| 10 |
+
- deal with some other non-serializable objects in an ad-hoc manner where
|
| 11 |
+
applicable.
|
| 12 |
+
|
| 13 |
+
This pickler is therefore meant to be used for the communication between short
|
| 14 |
+
lived Python processes running the same version of Python and libraries. In
|
| 15 |
+
particular, it is not meant to be used for long term storage of Python objects.
|
| 16 |
+
|
| 17 |
+
It does not include an unpickler, as standard Python unpickling suffices.
|
| 18 |
+
|
| 19 |
+
This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
|
| 20 |
+
<https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
|
| 21 |
+
|
| 22 |
+
Copyright (c) 2012-now, CloudPickle developers and contributors.
|
| 23 |
+
Copyright (c) 2012, Regents of the University of California.
|
| 24 |
+
Copyright (c) 2009 `PiCloud, Inc. <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
|
| 25 |
+
All rights reserved.
|
| 26 |
+
|
| 27 |
+
Redistribution and use in source and binary forms, with or without
|
| 28 |
+
modification, are permitted provided that the following conditions
|
| 29 |
+
are met:
|
| 30 |
+
* Redistributions of source code must retain the above copyright
|
| 31 |
+
notice, this list of conditions and the following disclaimer.
|
| 32 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 33 |
+
notice, this list of conditions and the following disclaimer in the
|
| 34 |
+
documentation and/or other materials provided with the distribution.
|
| 35 |
+
* Neither the name of the University of California, Berkeley nor the
|
| 36 |
+
names of its contributors may be used to endorse or promote
|
| 37 |
+
products derived from this software without specific prior written
|
| 38 |
+
permission.
|
| 39 |
+
|
| 40 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 41 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 42 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 43 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 44 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 45 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
| 46 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
| 47 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
| 48 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 49 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 50 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
import _collections_abc
|
| 54 |
+
from collections import ChainMap, OrderedDict
|
| 55 |
+
import abc
|
| 56 |
+
import builtins
|
| 57 |
+
import copyreg
|
| 58 |
+
import dataclasses
|
| 59 |
+
import dis
|
| 60 |
+
from enum import Enum
|
| 61 |
+
import io
|
| 62 |
+
import itertools
|
| 63 |
+
import logging
|
| 64 |
+
import opcode
|
| 65 |
+
import pickle
|
| 66 |
+
from pickle import _getattribute
|
| 67 |
+
import platform
|
| 68 |
+
import struct
|
| 69 |
+
import sys
|
| 70 |
+
import threading
|
| 71 |
+
import types
|
| 72 |
+
import typing
|
| 73 |
+
import uuid
|
| 74 |
+
import warnings
|
| 75 |
+
import weakref
|
| 76 |
+
|
| 77 |
+
# The following import is required to be imported in the cloudpickle
|
| 78 |
+
# namespace to be able to load pickle files generated with older versions of
|
| 79 |
+
# cloudpickle. See: tests/test_backward_compat.py
|
| 80 |
+
from types import CellType # noqa: F401
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# cloudpickle is meant for inter process communication: we expect all
|
| 84 |
+
# communicating processes to run the same Python version hence we favor
|
| 85 |
+
# communication speed over compatibility:
|
| 86 |
+
DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
|
| 87 |
+
|
| 88 |
+
# Names of modules whose resources should be treated as dynamic.
|
| 89 |
+
_PICKLE_BY_VALUE_MODULES = set()
|
| 90 |
+
|
| 91 |
+
# Track the provenance of reconstructed dynamic classes to make it possible to
|
| 92 |
+
# reconstruct instances from the matching singleton class definition when
|
| 93 |
+
# appropriate and preserve the usual "isinstance" semantics of Python objects.
|
| 94 |
+
_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
|
| 95 |
+
_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
|
| 96 |
+
_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
|
| 97 |
+
|
| 98 |
+
PYPY = platform.python_implementation() == "PyPy"
|
| 99 |
+
|
| 100 |
+
builtin_code_type = None
|
| 101 |
+
if PYPY:
|
| 102 |
+
# builtin-code objects only exist in pypy
|
| 103 |
+
builtin_code_type = type(float.__new__.__code__)
|
| 104 |
+
|
| 105 |
+
_extract_code_globals_cache = weakref.WeakKeyDictionary()
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _get_or_create_tracker_id(class_def):
|
| 109 |
+
with _DYNAMIC_CLASS_TRACKER_LOCK:
|
| 110 |
+
class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
|
| 111 |
+
if class_tracker_id is None:
|
| 112 |
+
class_tracker_id = uuid.uuid4().hex
|
| 113 |
+
_DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
|
| 114 |
+
_DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
|
| 115 |
+
return class_tracker_id
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def _lookup_class_or_track(class_tracker_id, class_def):
|
| 119 |
+
if class_tracker_id is not None:
|
| 120 |
+
with _DYNAMIC_CLASS_TRACKER_LOCK:
|
| 121 |
+
class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
|
| 122 |
+
class_tracker_id, class_def
|
| 123 |
+
)
|
| 124 |
+
_DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
|
| 125 |
+
return class_def
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def register_pickle_by_value(module):
|
| 129 |
+
"""Register a module to make it functions and classes picklable by value.
|
| 130 |
+
|
| 131 |
+
By default, functions and classes that are attributes of an importable
|
| 132 |
+
module are to be pickled by reference, that is relying on re-importing
|
| 133 |
+
the attribute from the module at load time.
|
| 134 |
+
|
| 135 |
+
If `register_pickle_by_value(module)` is called, all its functions and
|
| 136 |
+
classes are subsequently to be pickled by value, meaning that they can
|
| 137 |
+
be loaded in Python processes where the module is not importable.
|
| 138 |
+
|
| 139 |
+
This is especially useful when developing a module in a distributed
|
| 140 |
+
execution environment: restarting the client Python process with the new
|
| 141 |
+
source code is enough: there is no need to re-install the new version
|
| 142 |
+
of the module on all the worker nodes nor to restart the workers.
|
| 143 |
+
|
| 144 |
+
Note: this feature is considered experimental. See the cloudpickle
|
| 145 |
+
README.md file for more details and limitations.
|
| 146 |
+
"""
|
| 147 |
+
if not isinstance(module, types.ModuleType):
|
| 148 |
+
raise ValueError(f"Input should be a module object, got {str(module)} instead")
|
| 149 |
+
# In the future, cloudpickle may need a way to access any module registered
|
| 150 |
+
# for pickling by value in order to introspect relative imports inside
|
| 151 |
+
# functions pickled by value. (see
|
| 152 |
+
# https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
|
| 153 |
+
# This access can be ensured by checking that module is present in
|
| 154 |
+
# sys.modules at registering time and assuming that it will still be in
|
| 155 |
+
# there when accessed during pickling. Another alternative would be to
|
| 156 |
+
# store a weakref to the module. Even though cloudpickle does not implement
|
| 157 |
+
# this introspection yet, in order to avoid a possible breaking change
|
| 158 |
+
# later, we still enforce the presence of module inside sys.modules.
|
| 159 |
+
if module.__name__ not in sys.modules:
|
| 160 |
+
raise ValueError(
|
| 161 |
+
f"{module} was not imported correctly, have you used an "
|
| 162 |
+
"`import` statement to access it?"
|
| 163 |
+
)
|
| 164 |
+
_PICKLE_BY_VALUE_MODULES.add(module.__name__)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def unregister_pickle_by_value(module):
|
| 168 |
+
"""Unregister that the input module should be pickled by value."""
|
| 169 |
+
if not isinstance(module, types.ModuleType):
|
| 170 |
+
raise ValueError(f"Input should be a module object, got {str(module)} instead")
|
| 171 |
+
if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
|
| 172 |
+
raise ValueError(f"{module} is not registered for pickle by value")
|
| 173 |
+
else:
|
| 174 |
+
_PICKLE_BY_VALUE_MODULES.remove(module.__name__)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def list_registry_pickle_by_value():
|
| 178 |
+
return _PICKLE_BY_VALUE_MODULES.copy()
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def _is_registered_pickle_by_value(module):
|
| 182 |
+
module_name = module.__name__
|
| 183 |
+
if module_name in _PICKLE_BY_VALUE_MODULES:
|
| 184 |
+
return True
|
| 185 |
+
while True:
|
| 186 |
+
parent_name = module_name.rsplit(".", 1)[0]
|
| 187 |
+
if parent_name == module_name:
|
| 188 |
+
break
|
| 189 |
+
if parent_name in _PICKLE_BY_VALUE_MODULES:
|
| 190 |
+
return True
|
| 191 |
+
module_name = parent_name
|
| 192 |
+
return False
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _whichmodule(obj, name):
|
| 196 |
+
"""Find the module an object belongs to.
|
| 197 |
+
|
| 198 |
+
This function differs from ``pickle.whichmodule`` in two ways:
|
| 199 |
+
- it does not mangle the cases where obj's module is __main__ and obj was
|
| 200 |
+
not found in any module.
|
| 201 |
+
- Errors arising during module introspection are ignored, as those errors
|
| 202 |
+
are considered unwanted side effects.
|
| 203 |
+
"""
|
| 204 |
+
module_name = getattr(obj, "__module__", None)
|
| 205 |
+
|
| 206 |
+
if module_name is not None:
|
| 207 |
+
return module_name
|
| 208 |
+
# Protect the iteration by using a copy of sys.modules against dynamic
|
| 209 |
+
# modules that trigger imports of other modules upon calls to getattr or
|
| 210 |
+
# other threads importing at the same time.
|
| 211 |
+
for module_name, module in sys.modules.copy().items():
|
| 212 |
+
# Some modules such as coverage can inject non-module objects inside
|
| 213 |
+
# sys.modules
|
| 214 |
+
if (
|
| 215 |
+
module_name == "__main__"
|
| 216 |
+
or module is None
|
| 217 |
+
or not isinstance(module, types.ModuleType)
|
| 218 |
+
):
|
| 219 |
+
continue
|
| 220 |
+
try:
|
| 221 |
+
if _getattribute(module, name)[0] is obj:
|
| 222 |
+
return module_name
|
| 223 |
+
except Exception:
|
| 224 |
+
pass
|
| 225 |
+
return None
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def _should_pickle_by_reference(obj, name=None):
|
| 229 |
+
"""Test whether an function or a class should be pickled by reference
|
| 230 |
+
|
| 231 |
+
Pickling by reference means by that the object (typically a function or a
|
| 232 |
+
class) is an attribute of a module that is assumed to be importable in the
|
| 233 |
+
target Python environment. Loading will therefore rely on importing the
|
| 234 |
+
module and then calling `getattr` on it to access the function or class.
|
| 235 |
+
|
| 236 |
+
Pickling by reference is the only option to pickle functions and classes
|
| 237 |
+
in the standard library. In cloudpickle the alternative option is to
|
| 238 |
+
pickle by value (for instance for interactively or locally defined
|
| 239 |
+
functions and classes or for attributes of modules that have been
|
| 240 |
+
explicitly registered to be pickled by value.
|
| 241 |
+
"""
|
| 242 |
+
if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
|
| 243 |
+
module_and_name = _lookup_module_and_qualname(obj, name=name)
|
| 244 |
+
if module_and_name is None:
|
| 245 |
+
return False
|
| 246 |
+
module, name = module_and_name
|
| 247 |
+
return not _is_registered_pickle_by_value(module)
|
| 248 |
+
|
| 249 |
+
elif isinstance(obj, types.ModuleType):
|
| 250 |
+
# We assume that sys.modules is primarily used as a cache mechanism for
|
| 251 |
+
# the Python import machinery. Checking if a module has been added in
|
| 252 |
+
# is sys.modules therefore a cheap and simple heuristic to tell us
|
| 253 |
+
# whether we can assume that a given module could be imported by name
|
| 254 |
+
# in another Python process.
|
| 255 |
+
if _is_registered_pickle_by_value(obj):
|
| 256 |
+
return False
|
| 257 |
+
return obj.__name__ in sys.modules
|
| 258 |
+
else:
|
| 259 |
+
raise TypeError(
|
| 260 |
+
"cannot check importability of {} instances".format(type(obj).__name__)
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _lookup_module_and_qualname(obj, name=None):
|
| 265 |
+
if name is None:
|
| 266 |
+
name = getattr(obj, "__qualname__", None)
|
| 267 |
+
if name is None: # pragma: no cover
|
| 268 |
+
# This used to be needed for Python 2.7 support but is probably not
|
| 269 |
+
# needed anymore. However we keep the __name__ introspection in case
|
| 270 |
+
# users of cloudpickle rely on this old behavior for unknown reasons.
|
| 271 |
+
name = getattr(obj, "__name__", None)
|
| 272 |
+
|
| 273 |
+
module_name = _whichmodule(obj, name)
|
| 274 |
+
|
| 275 |
+
if module_name is None:
|
| 276 |
+
# In this case, obj.__module__ is None AND obj was not found in any
|
| 277 |
+
# imported module. obj is thus treated as dynamic.
|
| 278 |
+
return None
|
| 279 |
+
|
| 280 |
+
if module_name == "__main__":
|
| 281 |
+
return None
|
| 282 |
+
|
| 283 |
+
# Note: if module_name is in sys.modules, the corresponding module is
|
| 284 |
+
# assumed importable at unpickling time. See #357
|
| 285 |
+
module = sys.modules.get(module_name, None)
|
| 286 |
+
if module is None:
|
| 287 |
+
# The main reason why obj's module would not be imported is that this
|
| 288 |
+
# module has been dynamically created, using for example
|
| 289 |
+
# types.ModuleType. The other possibility is that module was removed
|
| 290 |
+
# from sys.modules after obj was created/imported. But this case is not
|
| 291 |
+
# supported, as the standard pickle does not support it either.
|
| 292 |
+
return None
|
| 293 |
+
|
| 294 |
+
try:
|
| 295 |
+
obj2, parent = _getattribute(module, name)
|
| 296 |
+
except AttributeError:
|
| 297 |
+
# obj was not found inside the module it points to
|
| 298 |
+
return None
|
| 299 |
+
if obj2 is not obj:
|
| 300 |
+
return None
|
| 301 |
+
return module, name
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def _extract_code_globals(co):
|
| 305 |
+
"""Find all globals names read or written to by codeblock co."""
|
| 306 |
+
out_names = _extract_code_globals_cache.get(co)
|
| 307 |
+
if out_names is None:
|
| 308 |
+
# We use a dict with None values instead of a set to get a
|
| 309 |
+
# deterministic order and avoid introducing non-deterministic pickle
|
| 310 |
+
# bytes as a results.
|
| 311 |
+
out_names = {name: None for name in _walk_global_ops(co)}
|
| 312 |
+
|
| 313 |
+
# Declaring a function inside another one using the "def ..." syntax
|
| 314 |
+
# generates a constant code object corresponding to the one of the
|
| 315 |
+
# nested function's As the nested function may itself need global
|
| 316 |
+
# variables, we need to introspect its code, extract its globals, (look
|
| 317 |
+
# for code object in it's co_consts attribute..) and add the result to
|
| 318 |
+
# code_globals
|
| 319 |
+
if co.co_consts:
|
| 320 |
+
for const in co.co_consts:
|
| 321 |
+
if isinstance(const, types.CodeType):
|
| 322 |
+
out_names.update(_extract_code_globals(const))
|
| 323 |
+
|
| 324 |
+
_extract_code_globals_cache[co] = out_names
|
| 325 |
+
|
| 326 |
+
return out_names
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def _find_imported_submodules(code, top_level_dependencies):
|
| 330 |
+
"""Find currently imported submodules used by a function.
|
| 331 |
+
|
| 332 |
+
Submodules used by a function need to be detected and referenced for the
|
| 333 |
+
function to work correctly at depickling time. Because submodules can be
|
| 334 |
+
referenced as attribute of their parent package (``package.submodule``), we
|
| 335 |
+
need a special introspection technique that does not rely on GLOBAL-related
|
| 336 |
+
opcodes to find references of them in a code object.
|
| 337 |
+
|
| 338 |
+
Example:
|
| 339 |
+
```
|
| 340 |
+
import concurrent.futures
|
| 341 |
+
import cloudpickle
|
| 342 |
+
def func():
|
| 343 |
+
x = concurrent.futures.ThreadPoolExecutor
|
| 344 |
+
if __name__ == '__main__':
|
| 345 |
+
cloudpickle.dumps(func)
|
| 346 |
+
```
|
| 347 |
+
The globals extracted by cloudpickle in the function's state include the
|
| 348 |
+
concurrent package, but not its submodule (here, concurrent.futures), which
|
| 349 |
+
is the module used by func. Find_imported_submodules will detect the usage
|
| 350 |
+
of concurrent.futures. Saving this module alongside with func will ensure
|
| 351 |
+
that calling func once depickled does not fail due to concurrent.futures
|
| 352 |
+
not being imported
|
| 353 |
+
"""
|
| 354 |
+
|
| 355 |
+
subimports = []
|
| 356 |
+
# check if any known dependency is an imported package
|
| 357 |
+
for x in top_level_dependencies:
|
| 358 |
+
if (
|
| 359 |
+
isinstance(x, types.ModuleType)
|
| 360 |
+
and hasattr(x, "__package__")
|
| 361 |
+
and x.__package__
|
| 362 |
+
):
|
| 363 |
+
# check if the package has any currently loaded sub-imports
|
| 364 |
+
prefix = x.__name__ + "."
|
| 365 |
+
# A concurrent thread could mutate sys.modules,
|
| 366 |
+
# make sure we iterate over a copy to avoid exceptions
|
| 367 |
+
for name in list(sys.modules):
|
| 368 |
+
# Older versions of pytest will add a "None" module to
|
| 369 |
+
# sys.modules.
|
| 370 |
+
if name is not None and name.startswith(prefix):
|
| 371 |
+
# check whether the function can address the sub-module
|
| 372 |
+
tokens = set(name[len(prefix) :].split("."))
|
| 373 |
+
if not tokens - set(code.co_names):
|
| 374 |
+
subimports.append(sys.modules[name])
|
| 375 |
+
return subimports
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
# relevant opcodes
|
| 379 |
+
STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"]
|
| 380 |
+
DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"]
|
| 381 |
+
LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"]
|
| 382 |
+
GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
|
| 383 |
+
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
|
| 384 |
+
EXTENDED_ARG = dis.EXTENDED_ARG
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
_BUILTIN_TYPE_NAMES = {}
|
| 388 |
+
for k, v in types.__dict__.items():
|
| 389 |
+
if type(v) is type:
|
| 390 |
+
_BUILTIN_TYPE_NAMES[v] = k
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def _builtin_type(name):
|
| 394 |
+
if name == "ClassType": # pragma: no cover
|
| 395 |
+
# Backward compat to load pickle files generated with cloudpickle
|
| 396 |
+
# < 1.3 even if loading pickle files from older versions is not
|
| 397 |
+
# officially supported.
|
| 398 |
+
return type
|
| 399 |
+
return getattr(types, name)
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
def _walk_global_ops(code):
|
| 403 |
+
"""Yield referenced name for global-referencing instructions in code."""
|
| 404 |
+
for instr in dis.get_instructions(code):
|
| 405 |
+
op = instr.opcode
|
| 406 |
+
if op in GLOBAL_OPS:
|
| 407 |
+
yield instr.argval
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def _extract_class_dict(cls):
|
| 411 |
+
"""Retrieve a copy of the dict of a class without the inherited method."""
|
| 412 |
+
clsdict = dict(cls.__dict__) # copy dict proxy to a dict
|
| 413 |
+
if len(cls.__bases__) == 1:
|
| 414 |
+
inherited_dict = cls.__bases__[0].__dict__
|
| 415 |
+
else:
|
| 416 |
+
inherited_dict = {}
|
| 417 |
+
for base in reversed(cls.__bases__):
|
| 418 |
+
inherited_dict.update(base.__dict__)
|
| 419 |
+
to_remove = []
|
| 420 |
+
for name, value in clsdict.items():
|
| 421 |
+
try:
|
| 422 |
+
base_value = inherited_dict[name]
|
| 423 |
+
if value is base_value:
|
| 424 |
+
to_remove.append(name)
|
| 425 |
+
except KeyError:
|
| 426 |
+
pass
|
| 427 |
+
for name in to_remove:
|
| 428 |
+
clsdict.pop(name)
|
| 429 |
+
return clsdict
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def is_tornado_coroutine(func):
|
| 433 |
+
"""Return whether `func` is a Tornado coroutine function.
|
| 434 |
+
|
| 435 |
+
Running coroutines are not supported.
|
| 436 |
+
"""
|
| 437 |
+
warnings.warn(
|
| 438 |
+
"is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be "
|
| 439 |
+
"removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function "
|
| 440 |
+
"directly instead.",
|
| 441 |
+
category=DeprecationWarning,
|
| 442 |
+
)
|
| 443 |
+
if "tornado.gen" not in sys.modules:
|
| 444 |
+
return False
|
| 445 |
+
gen = sys.modules["tornado.gen"]
|
| 446 |
+
if not hasattr(gen, "is_coroutine_function"):
|
| 447 |
+
# Tornado version is too old
|
| 448 |
+
return False
|
| 449 |
+
return gen.is_coroutine_function(func)
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def subimport(name):
|
| 453 |
+
# We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
|
| 454 |
+
# the name of a submodule, __import__ will return the top-level root module
|
| 455 |
+
# of this submodule. For instance, __import__('os.path') returns the `os`
|
| 456 |
+
# module.
|
| 457 |
+
__import__(name)
|
| 458 |
+
return sys.modules[name]
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def dynamic_subimport(name, vars):
|
| 462 |
+
mod = types.ModuleType(name)
|
| 463 |
+
mod.__dict__.update(vars)
|
| 464 |
+
mod.__dict__["__builtins__"] = builtins.__dict__
|
| 465 |
+
return mod
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def _get_cell_contents(cell):
|
| 469 |
+
try:
|
| 470 |
+
return cell.cell_contents
|
| 471 |
+
except ValueError:
|
| 472 |
+
# Handle empty cells explicitly with a sentinel value.
|
| 473 |
+
return _empty_cell_value
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def instance(cls):
|
| 477 |
+
"""Create a new instance of a class.
|
| 478 |
+
|
| 479 |
+
Parameters
|
| 480 |
+
----------
|
| 481 |
+
cls : type
|
| 482 |
+
The class to create an instance of.
|
| 483 |
+
|
| 484 |
+
Returns
|
| 485 |
+
-------
|
| 486 |
+
instance : cls
|
| 487 |
+
A new instance of ``cls``.
|
| 488 |
+
"""
|
| 489 |
+
return cls()
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
@instance
|
| 493 |
+
class _empty_cell_value:
|
| 494 |
+
"""Sentinel for empty closures."""
|
| 495 |
+
|
| 496 |
+
@classmethod
|
| 497 |
+
def __reduce__(cls):
|
| 498 |
+
return cls.__name__
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def _make_function(code, globals, name, argdefs, closure):
|
| 502 |
+
# Setting __builtins__ in globals is needed for nogil CPython.
|
| 503 |
+
globals["__builtins__"] = __builtins__
|
| 504 |
+
return types.FunctionType(code, globals, name, argdefs, closure)
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def _make_empty_cell():
|
| 508 |
+
if False:
|
| 509 |
+
# trick the compiler into creating an empty cell in our lambda
|
| 510 |
+
cell = None
|
| 511 |
+
raise AssertionError("this route should not be executed")
|
| 512 |
+
|
| 513 |
+
return (lambda: cell).__closure__[0]
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
def _make_cell(value=_empty_cell_value):
|
| 517 |
+
cell = _make_empty_cell()
|
| 518 |
+
if value is not _empty_cell_value:
|
| 519 |
+
cell.cell_contents = value
|
| 520 |
+
return cell
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
def _make_skeleton_class(
|
| 524 |
+
type_constructor, name, bases, type_kwargs, class_tracker_id, extra
|
| 525 |
+
):
|
| 526 |
+
"""Build dynamic class with an empty __dict__ to be filled once memoized
|
| 527 |
+
|
| 528 |
+
If class_tracker_id is not None, try to lookup an existing class definition
|
| 529 |
+
matching that id. If none is found, track a newly reconstructed class
|
| 530 |
+
definition under that id so that other instances stemming from the same
|
| 531 |
+
class id will also reuse this class definition.
|
| 532 |
+
|
| 533 |
+
The "extra" variable is meant to be a dict (or None) that can be used for
|
| 534 |
+
forward compatibility shall the need arise.
|
| 535 |
+
"""
|
| 536 |
+
skeleton_class = types.new_class(
|
| 537 |
+
name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
|
| 538 |
+
)
|
| 539 |
+
return _lookup_class_or_track(class_tracker_id, skeleton_class)
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def _make_skeleton_enum(
|
| 543 |
+
bases, name, qualname, members, module, class_tracker_id, extra
|
| 544 |
+
):
|
| 545 |
+
"""Build dynamic enum with an empty __dict__ to be filled once memoized
|
| 546 |
+
|
| 547 |
+
The creation of the enum class is inspired by the code of
|
| 548 |
+
EnumMeta._create_.
|
| 549 |
+
|
| 550 |
+
If class_tracker_id is not None, try to lookup an existing enum definition
|
| 551 |
+
matching that id. If none is found, track a newly reconstructed enum
|
| 552 |
+
definition under that id so that other instances stemming from the same
|
| 553 |
+
class id will also reuse this enum definition.
|
| 554 |
+
|
| 555 |
+
The "extra" variable is meant to be a dict (or None) that can be used for
|
| 556 |
+
forward compatibility shall the need arise.
|
| 557 |
+
"""
|
| 558 |
+
# enums always inherit from their base Enum class at the last position in
|
| 559 |
+
# the list of base classes:
|
| 560 |
+
enum_base = bases[-1]
|
| 561 |
+
metacls = enum_base.__class__
|
| 562 |
+
classdict = metacls.__prepare__(name, bases)
|
| 563 |
+
|
| 564 |
+
for member_name, member_value in members.items():
|
| 565 |
+
classdict[member_name] = member_value
|
| 566 |
+
enum_class = metacls.__new__(metacls, name, bases, classdict)
|
| 567 |
+
enum_class.__module__ = module
|
| 568 |
+
enum_class.__qualname__ = qualname
|
| 569 |
+
|
| 570 |
+
return _lookup_class_or_track(class_tracker_id, enum_class)
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
|
| 574 |
+
tv = typing.TypeVar(
|
| 575 |
+
name,
|
| 576 |
+
*constraints,
|
| 577 |
+
bound=bound,
|
| 578 |
+
covariant=covariant,
|
| 579 |
+
contravariant=contravariant,
|
| 580 |
+
)
|
| 581 |
+
return _lookup_class_or_track(class_tracker_id, tv)
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
def _decompose_typevar(obj):
|
| 585 |
+
return (
|
| 586 |
+
obj.__name__,
|
| 587 |
+
obj.__bound__,
|
| 588 |
+
obj.__constraints__,
|
| 589 |
+
obj.__covariant__,
|
| 590 |
+
obj.__contravariant__,
|
| 591 |
+
_get_or_create_tracker_id(obj),
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
def _typevar_reduce(obj):
|
| 596 |
+
# TypeVar instances require the module information hence why we
|
| 597 |
+
# are not using the _should_pickle_by_reference directly
|
| 598 |
+
module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
|
| 599 |
+
|
| 600 |
+
if module_and_name is None:
|
| 601 |
+
return (_make_typevar, _decompose_typevar(obj))
|
| 602 |
+
elif _is_registered_pickle_by_value(module_and_name[0]):
|
| 603 |
+
return (_make_typevar, _decompose_typevar(obj))
|
| 604 |
+
|
| 605 |
+
return (getattr, module_and_name)
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
def _get_bases(typ):
|
| 609 |
+
if "__orig_bases__" in getattr(typ, "__dict__", {}):
|
| 610 |
+
# For generic types (see PEP 560)
|
| 611 |
+
# Note that simply checking `hasattr(typ, '__orig_bases__')` is not
|
| 612 |
+
# correct. Subclasses of a fully-parameterized generic class does not
|
| 613 |
+
# have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
|
| 614 |
+
# will return True because it's defined in the base class.
|
| 615 |
+
bases_attr = "__orig_bases__"
|
| 616 |
+
else:
|
| 617 |
+
# For regular class objects
|
| 618 |
+
bases_attr = "__bases__"
|
| 619 |
+
return getattr(typ, bases_attr)
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
def _make_dict_keys(obj, is_ordered=False):
|
| 623 |
+
if is_ordered:
|
| 624 |
+
return OrderedDict.fromkeys(obj).keys()
|
| 625 |
+
else:
|
| 626 |
+
return dict.fromkeys(obj).keys()
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def _make_dict_values(obj, is_ordered=False):
|
| 630 |
+
if is_ordered:
|
| 631 |
+
return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
|
| 632 |
+
else:
|
| 633 |
+
return {i: _ for i, _ in enumerate(obj)}.values()
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def _make_dict_items(obj, is_ordered=False):
|
| 637 |
+
if is_ordered:
|
| 638 |
+
return OrderedDict(obj).items()
|
| 639 |
+
else:
|
| 640 |
+
return obj.items()
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
|
| 644 |
+
# -------------------------------------------------
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
def _class_getnewargs(obj):
|
| 648 |
+
type_kwargs = {}
|
| 649 |
+
if "__module__" in obj.__dict__:
|
| 650 |
+
type_kwargs["__module__"] = obj.__module__
|
| 651 |
+
|
| 652 |
+
__dict__ = obj.__dict__.get("__dict__", None)
|
| 653 |
+
if isinstance(__dict__, property):
|
| 654 |
+
type_kwargs["__dict__"] = __dict__
|
| 655 |
+
|
| 656 |
+
return (
|
| 657 |
+
type(obj),
|
| 658 |
+
obj.__name__,
|
| 659 |
+
_get_bases(obj),
|
| 660 |
+
type_kwargs,
|
| 661 |
+
_get_or_create_tracker_id(obj),
|
| 662 |
+
None,
|
| 663 |
+
)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def _enum_getnewargs(obj):
|
| 667 |
+
members = {e.name: e.value for e in obj}
|
| 668 |
+
return (
|
| 669 |
+
obj.__bases__,
|
| 670 |
+
obj.__name__,
|
| 671 |
+
obj.__qualname__,
|
| 672 |
+
members,
|
| 673 |
+
obj.__module__,
|
| 674 |
+
_get_or_create_tracker_id(obj),
|
| 675 |
+
None,
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
# COLLECTION OF OBJECTS RECONSTRUCTORS
|
| 680 |
+
# ------------------------------------
|
| 681 |
+
def _file_reconstructor(retval):
|
| 682 |
+
return retval
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
# COLLECTION OF OBJECTS STATE GETTERS
|
| 686 |
+
# -----------------------------------
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def _function_getstate(func):
|
| 690 |
+
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
|
| 691 |
+
# attributes will be restored at unpickling time using
|
| 692 |
+
# f.__dict__.update(state)
|
| 693 |
+
# - Put func's members into slotstate. Such attributes will be restored at
|
| 694 |
+
# unpickling time by iterating over slotstate and calling setattr(func,
|
| 695 |
+
# slotname, slotvalue)
|
| 696 |
+
slotstate = {
|
| 697 |
+
"__name__": func.__name__,
|
| 698 |
+
"__qualname__": func.__qualname__,
|
| 699 |
+
"__annotations__": func.__annotations__,
|
| 700 |
+
"__kwdefaults__": func.__kwdefaults__,
|
| 701 |
+
"__defaults__": func.__defaults__,
|
| 702 |
+
"__module__": func.__module__,
|
| 703 |
+
"__doc__": func.__doc__,
|
| 704 |
+
"__closure__": func.__closure__,
|
| 705 |
+
}
|
| 706 |
+
|
| 707 |
+
f_globals_ref = _extract_code_globals(func.__code__)
|
| 708 |
+
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
|
| 709 |
+
|
| 710 |
+
if func.__closure__ is not None:
|
| 711 |
+
closure_values = list(map(_get_cell_contents, func.__closure__))
|
| 712 |
+
else:
|
| 713 |
+
closure_values = ()
|
| 714 |
+
|
| 715 |
+
# Extract currently-imported submodules used by func. Storing these modules
|
| 716 |
+
# in a smoke _cloudpickle_subimports attribute of the object's state will
|
| 717 |
+
# trigger the side effect of importing these modules at unpickling time
|
| 718 |
+
# (which is necessary for func to work correctly once depickled)
|
| 719 |
+
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
|
| 720 |
+
func.__code__, itertools.chain(f_globals.values(), closure_values)
|
| 721 |
+
)
|
| 722 |
+
slotstate["__globals__"] = f_globals
|
| 723 |
+
|
| 724 |
+
state = func.__dict__
|
| 725 |
+
return state, slotstate
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
def _class_getstate(obj):
|
| 729 |
+
clsdict = _extract_class_dict(obj)
|
| 730 |
+
clsdict.pop("__weakref__", None)
|
| 731 |
+
|
| 732 |
+
if issubclass(type(obj), abc.ABCMeta):
|
| 733 |
+
# If obj is an instance of an ABCMeta subclass, don't pickle the
|
| 734 |
+
# cache/negative caches populated during isinstance/issubclass
|
| 735 |
+
# checks, but pickle the list of registered subclasses of obj.
|
| 736 |
+
clsdict.pop("_abc_cache", None)
|
| 737 |
+
clsdict.pop("_abc_negative_cache", None)
|
| 738 |
+
clsdict.pop("_abc_negative_cache_version", None)
|
| 739 |
+
registry = clsdict.pop("_abc_registry", None)
|
| 740 |
+
if registry is None:
|
| 741 |
+
# The abc caches and registered subclasses of a
|
| 742 |
+
# class are bundled into the single _abc_impl attribute
|
| 743 |
+
clsdict.pop("_abc_impl", None)
|
| 744 |
+
(registry, _, _, _) = abc._get_dump(obj)
|
| 745 |
+
|
| 746 |
+
clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
|
| 747 |
+
else:
|
| 748 |
+
# In the above if clause, registry is a set of weakrefs -- in
|
| 749 |
+
# this case, registry is a WeakSet
|
| 750 |
+
clsdict["_abc_impl"] = [type_ for type_ in registry]
|
| 751 |
+
|
| 752 |
+
if "__slots__" in clsdict:
|
| 753 |
+
# pickle string length optimization: member descriptors of obj are
|
| 754 |
+
# created automatically from obj's __slots__ attribute, no need to
|
| 755 |
+
# save them in obj's state
|
| 756 |
+
if isinstance(obj.__slots__, str):
|
| 757 |
+
clsdict.pop(obj.__slots__)
|
| 758 |
+
else:
|
| 759 |
+
for k in obj.__slots__:
|
| 760 |
+
clsdict.pop(k, None)
|
| 761 |
+
|
| 762 |
+
clsdict.pop("__dict__", None) # unpicklable property object
|
| 763 |
+
|
| 764 |
+
return (clsdict, {})
|
| 765 |
+
|
| 766 |
+
|
| 767 |
+
def _enum_getstate(obj):
|
| 768 |
+
clsdict, slotstate = _class_getstate(obj)
|
| 769 |
+
|
| 770 |
+
members = {e.name: e.value for e in obj}
|
| 771 |
+
# Cleanup the clsdict that will be passed to _make_skeleton_enum:
|
| 772 |
+
# Those attributes are already handled by the metaclass.
|
| 773 |
+
for attrname in [
|
| 774 |
+
"_generate_next_value_",
|
| 775 |
+
"_member_names_",
|
| 776 |
+
"_member_map_",
|
| 777 |
+
"_member_type_",
|
| 778 |
+
"_value2member_map_",
|
| 779 |
+
]:
|
| 780 |
+
clsdict.pop(attrname, None)
|
| 781 |
+
for member in members:
|
| 782 |
+
clsdict.pop(member)
|
| 783 |
+
# Special handling of Enum subclasses
|
| 784 |
+
return clsdict, slotstate
|
| 785 |
+
|
| 786 |
+
|
| 787 |
+
# COLLECTIONS OF OBJECTS REDUCERS
|
| 788 |
+
# -------------------------------
|
| 789 |
+
# A reducer is a function taking a single argument (obj), and that returns a
|
| 790 |
+
# tuple with all the necessary data to re-construct obj. Apart from a few
|
| 791 |
+
# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
|
| 792 |
+
# correctly pickle an object.
|
| 793 |
+
# While many built-in objects (Exceptions objects, instances of the "object"
|
| 794 |
+
# class, etc), are shipped with their own built-in reducer (invoked using
|
| 795 |
+
# obj.__reduce__), some do not. The following methods were created to "fill
|
| 796 |
+
# these holes".
|
| 797 |
+
|
| 798 |
+
|
| 799 |
+
def _code_reduce(obj):
|
| 800 |
+
"""code object reducer."""
|
| 801 |
+
# If you are not sure about the order of arguments, take a look at help
|
| 802 |
+
# of the specific type from types, for example:
|
| 803 |
+
# >>> from types import CodeType
|
| 804 |
+
# >>> help(CodeType)
|
| 805 |
+
if hasattr(obj, "co_exceptiontable"):
|
| 806 |
+
# Python 3.11 and later: there are some new attributes
|
| 807 |
+
# related to the enhanced exceptions.
|
| 808 |
+
args = (
|
| 809 |
+
obj.co_argcount,
|
| 810 |
+
obj.co_posonlyargcount,
|
| 811 |
+
obj.co_kwonlyargcount,
|
| 812 |
+
obj.co_nlocals,
|
| 813 |
+
obj.co_stacksize,
|
| 814 |
+
obj.co_flags,
|
| 815 |
+
obj.co_code,
|
| 816 |
+
obj.co_consts,
|
| 817 |
+
obj.co_names,
|
| 818 |
+
obj.co_varnames,
|
| 819 |
+
obj.co_filename,
|
| 820 |
+
obj.co_name,
|
| 821 |
+
obj.co_qualname,
|
| 822 |
+
obj.co_firstlineno,
|
| 823 |
+
obj.co_linetable,
|
| 824 |
+
obj.co_exceptiontable,
|
| 825 |
+
obj.co_freevars,
|
| 826 |
+
obj.co_cellvars,
|
| 827 |
+
)
|
| 828 |
+
elif hasattr(obj, "co_linetable"):
|
| 829 |
+
# Python 3.10 and later: obj.co_lnotab is deprecated and constructor
|
| 830 |
+
# expects obj.co_linetable instead.
|
| 831 |
+
args = (
|
| 832 |
+
obj.co_argcount,
|
| 833 |
+
obj.co_posonlyargcount,
|
| 834 |
+
obj.co_kwonlyargcount,
|
| 835 |
+
obj.co_nlocals,
|
| 836 |
+
obj.co_stacksize,
|
| 837 |
+
obj.co_flags,
|
| 838 |
+
obj.co_code,
|
| 839 |
+
obj.co_consts,
|
| 840 |
+
obj.co_names,
|
| 841 |
+
obj.co_varnames,
|
| 842 |
+
obj.co_filename,
|
| 843 |
+
obj.co_name,
|
| 844 |
+
obj.co_firstlineno,
|
| 845 |
+
obj.co_linetable,
|
| 846 |
+
obj.co_freevars,
|
| 847 |
+
obj.co_cellvars,
|
| 848 |
+
)
|
| 849 |
+
elif hasattr(obj, "co_nmeta"): # pragma: no cover
|
| 850 |
+
# "nogil" Python: modified attributes from 3.9
|
| 851 |
+
args = (
|
| 852 |
+
obj.co_argcount,
|
| 853 |
+
obj.co_posonlyargcount,
|
| 854 |
+
obj.co_kwonlyargcount,
|
| 855 |
+
obj.co_nlocals,
|
| 856 |
+
obj.co_framesize,
|
| 857 |
+
obj.co_ndefaultargs,
|
| 858 |
+
obj.co_nmeta,
|
| 859 |
+
obj.co_flags,
|
| 860 |
+
obj.co_code,
|
| 861 |
+
obj.co_consts,
|
| 862 |
+
obj.co_varnames,
|
| 863 |
+
obj.co_filename,
|
| 864 |
+
obj.co_name,
|
| 865 |
+
obj.co_firstlineno,
|
| 866 |
+
obj.co_lnotab,
|
| 867 |
+
obj.co_exc_handlers,
|
| 868 |
+
obj.co_jump_table,
|
| 869 |
+
obj.co_freevars,
|
| 870 |
+
obj.co_cellvars,
|
| 871 |
+
obj.co_free2reg,
|
| 872 |
+
obj.co_cell2reg,
|
| 873 |
+
)
|
| 874 |
+
else:
|
| 875 |
+
# Backward compat for 3.8 and 3.9
|
| 876 |
+
args = (
|
| 877 |
+
obj.co_argcount,
|
| 878 |
+
obj.co_posonlyargcount,
|
| 879 |
+
obj.co_kwonlyargcount,
|
| 880 |
+
obj.co_nlocals,
|
| 881 |
+
obj.co_stacksize,
|
| 882 |
+
obj.co_flags,
|
| 883 |
+
obj.co_code,
|
| 884 |
+
obj.co_consts,
|
| 885 |
+
obj.co_names,
|
| 886 |
+
obj.co_varnames,
|
| 887 |
+
obj.co_filename,
|
| 888 |
+
obj.co_name,
|
| 889 |
+
obj.co_firstlineno,
|
| 890 |
+
obj.co_lnotab,
|
| 891 |
+
obj.co_freevars,
|
| 892 |
+
obj.co_cellvars,
|
| 893 |
+
)
|
| 894 |
+
return types.CodeType, args
|
| 895 |
+
|
| 896 |
+
|
| 897 |
+
def _cell_reduce(obj):
|
| 898 |
+
"""Cell (containing values of a function's free variables) reducer."""
|
| 899 |
+
try:
|
| 900 |
+
obj.cell_contents
|
| 901 |
+
except ValueError: # cell is empty
|
| 902 |
+
return _make_empty_cell, ()
|
| 903 |
+
else:
|
| 904 |
+
return _make_cell, (obj.cell_contents,)
|
| 905 |
+
|
| 906 |
+
|
| 907 |
+
def _classmethod_reduce(obj):
|
| 908 |
+
orig_func = obj.__func__
|
| 909 |
+
return type(obj), (orig_func,)
|
| 910 |
+
|
| 911 |
+
|
| 912 |
+
def _file_reduce(obj):
|
| 913 |
+
"""Save a file."""
|
| 914 |
+
import io
|
| 915 |
+
|
| 916 |
+
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
|
| 917 |
+
raise pickle.PicklingError(
|
| 918 |
+
"Cannot pickle files that do not map to an actual file"
|
| 919 |
+
)
|
| 920 |
+
if obj is sys.stdout:
|
| 921 |
+
return getattr, (sys, "stdout")
|
| 922 |
+
if obj is sys.stderr:
|
| 923 |
+
return getattr, (sys, "stderr")
|
| 924 |
+
if obj is sys.stdin:
|
| 925 |
+
raise pickle.PicklingError("Cannot pickle standard input")
|
| 926 |
+
if obj.closed:
|
| 927 |
+
raise pickle.PicklingError("Cannot pickle closed files")
|
| 928 |
+
if hasattr(obj, "isatty") and obj.isatty():
|
| 929 |
+
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
|
| 930 |
+
if "r" not in obj.mode and "+" not in obj.mode:
|
| 931 |
+
raise pickle.PicklingError(
|
| 932 |
+
"Cannot pickle files that are not opened for reading: %s" % obj.mode
|
| 933 |
+
)
|
| 934 |
+
|
| 935 |
+
name = obj.name
|
| 936 |
+
|
| 937 |
+
retval = io.StringIO()
|
| 938 |
+
|
| 939 |
+
try:
|
| 940 |
+
# Read the whole file
|
| 941 |
+
curloc = obj.tell()
|
| 942 |
+
obj.seek(0)
|
| 943 |
+
contents = obj.read()
|
| 944 |
+
obj.seek(curloc)
|
| 945 |
+
except OSError as e:
|
| 946 |
+
raise pickle.PicklingError(
|
| 947 |
+
"Cannot pickle file %s as it cannot be read" % name
|
| 948 |
+
) from e
|
| 949 |
+
retval.write(contents)
|
| 950 |
+
retval.seek(curloc)
|
| 951 |
+
|
| 952 |
+
retval.name = name
|
| 953 |
+
return _file_reconstructor, (retval,)
|
| 954 |
+
|
| 955 |
+
|
| 956 |
+
def _getset_descriptor_reduce(obj):
|
| 957 |
+
return getattr, (obj.__objclass__, obj.__name__)
|
| 958 |
+
|
| 959 |
+
|
| 960 |
+
def _mappingproxy_reduce(obj):
|
| 961 |
+
return types.MappingProxyType, (dict(obj),)
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
def _memoryview_reduce(obj):
|
| 965 |
+
return bytes, (obj.tobytes(),)
|
| 966 |
+
|
| 967 |
+
|
| 968 |
+
def _module_reduce(obj):
|
| 969 |
+
if _should_pickle_by_reference(obj):
|
| 970 |
+
return subimport, (obj.__name__,)
|
| 971 |
+
else:
|
| 972 |
+
# Some external libraries can populate the "__builtins__" entry of a
|
| 973 |
+
# module's `__dict__` with unpicklable objects (see #316). For that
|
| 974 |
+
# reason, we do not attempt to pickle the "__builtins__" entry, and
|
| 975 |
+
# restore a default value for it at unpickling time.
|
| 976 |
+
state = obj.__dict__.copy()
|
| 977 |
+
state.pop("__builtins__", None)
|
| 978 |
+
return dynamic_subimport, (obj.__name__, state)
|
| 979 |
+
|
| 980 |
+
|
| 981 |
+
def _method_reduce(obj):
|
| 982 |
+
return (types.MethodType, (obj.__func__, obj.__self__))
|
| 983 |
+
|
| 984 |
+
|
| 985 |
+
def _logger_reduce(obj):
|
| 986 |
+
return logging.getLogger, (obj.name,)
|
| 987 |
+
|
| 988 |
+
|
| 989 |
+
def _root_logger_reduce(obj):
|
| 990 |
+
return logging.getLogger, ()
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
def _property_reduce(obj):
|
| 994 |
+
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
|
| 995 |
+
|
| 996 |
+
|
| 997 |
+
def _weakset_reduce(obj):
|
| 998 |
+
return weakref.WeakSet, (list(obj),)
|
| 999 |
+
|
| 1000 |
+
|
| 1001 |
+
def _dynamic_class_reduce(obj):
|
| 1002 |
+
"""Save a class that can't be referenced as a module attribute.
|
| 1003 |
+
|
| 1004 |
+
This method is used to serialize classes that are defined inside
|
| 1005 |
+
functions, or that otherwise can't be serialized as attribute lookups
|
| 1006 |
+
from importable modules.
|
| 1007 |
+
"""
|
| 1008 |
+
if Enum is not None and issubclass(obj, Enum):
|
| 1009 |
+
return (
|
| 1010 |
+
_make_skeleton_enum,
|
| 1011 |
+
_enum_getnewargs(obj),
|
| 1012 |
+
_enum_getstate(obj),
|
| 1013 |
+
None,
|
| 1014 |
+
None,
|
| 1015 |
+
_class_setstate,
|
| 1016 |
+
)
|
| 1017 |
+
else:
|
| 1018 |
+
return (
|
| 1019 |
+
_make_skeleton_class,
|
| 1020 |
+
_class_getnewargs(obj),
|
| 1021 |
+
_class_getstate(obj),
|
| 1022 |
+
None,
|
| 1023 |
+
None,
|
| 1024 |
+
_class_setstate,
|
| 1025 |
+
)
|
| 1026 |
+
|
| 1027 |
+
|
| 1028 |
+
def _class_reduce(obj):
|
| 1029 |
+
"""Select the reducer depending on the dynamic nature of the class obj."""
|
| 1030 |
+
if obj is type(None): # noqa
|
| 1031 |
+
return type, (None,)
|
| 1032 |
+
elif obj is type(Ellipsis):
|
| 1033 |
+
return type, (Ellipsis,)
|
| 1034 |
+
elif obj is type(NotImplemented):
|
| 1035 |
+
return type, (NotImplemented,)
|
| 1036 |
+
elif obj in _BUILTIN_TYPE_NAMES:
|
| 1037 |
+
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
|
| 1038 |
+
elif not _should_pickle_by_reference(obj):
|
| 1039 |
+
return _dynamic_class_reduce(obj)
|
| 1040 |
+
return NotImplemented
|
| 1041 |
+
|
| 1042 |
+
|
| 1043 |
+
def _dict_keys_reduce(obj):
|
| 1044 |
+
# Safer not to ship the full dict as sending the rest might
|
| 1045 |
+
# be unintended and could potentially cause leaking of
|
| 1046 |
+
# sensitive information
|
| 1047 |
+
return _make_dict_keys, (list(obj),)
|
| 1048 |
+
|
| 1049 |
+
|
| 1050 |
+
def _dict_values_reduce(obj):
|
| 1051 |
+
# Safer not to ship the full dict as sending the rest might
|
| 1052 |
+
# be unintended and could potentially cause leaking of
|
| 1053 |
+
# sensitive information
|
| 1054 |
+
return _make_dict_values, (list(obj),)
|
| 1055 |
+
|
| 1056 |
+
|
| 1057 |
+
def _dict_items_reduce(obj):
|
| 1058 |
+
return _make_dict_items, (dict(obj),)
|
| 1059 |
+
|
| 1060 |
+
|
| 1061 |
+
def _odict_keys_reduce(obj):
|
| 1062 |
+
# Safer not to ship the full dict as sending the rest might
|
| 1063 |
+
# be unintended and could potentially cause leaking of
|
| 1064 |
+
# sensitive information
|
| 1065 |
+
return _make_dict_keys, (list(obj), True)
|
| 1066 |
+
|
| 1067 |
+
|
| 1068 |
+
def _odict_values_reduce(obj):
|
| 1069 |
+
# Safer not to ship the full dict as sending the rest might
|
| 1070 |
+
# be unintended and could potentially cause leaking of
|
| 1071 |
+
# sensitive information
|
| 1072 |
+
return _make_dict_values, (list(obj), True)
|
| 1073 |
+
|
| 1074 |
+
|
| 1075 |
+
def _odict_items_reduce(obj):
|
| 1076 |
+
return _make_dict_items, (dict(obj), True)
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
def _dataclass_field_base_reduce(obj):
|
| 1080 |
+
return _get_dataclass_field_type_sentinel, (obj.name,)
|
| 1081 |
+
|
| 1082 |
+
|
| 1083 |
+
# COLLECTIONS OF OBJECTS STATE SETTERS
|
| 1084 |
+
# ------------------------------------
|
| 1085 |
+
# state setters are called at unpickling time, once the object is created and
|
| 1086 |
+
# it has to be updated to how it was at unpickling time.
|
| 1087 |
+
|
| 1088 |
+
|
| 1089 |
+
def _function_setstate(obj, state):
|
| 1090 |
+
"""Update the state of a dynamic function.
|
| 1091 |
+
|
| 1092 |
+
As __closure__ and __globals__ are readonly attributes of a function, we
|
| 1093 |
+
cannot rely on the native setstate routine of pickle.load_build, that calls
|
| 1094 |
+
setattr on items of the slotstate. Instead, we have to modify them inplace.
|
| 1095 |
+
"""
|
| 1096 |
+
state, slotstate = state
|
| 1097 |
+
obj.__dict__.update(state)
|
| 1098 |
+
|
| 1099 |
+
obj_globals = slotstate.pop("__globals__")
|
| 1100 |
+
obj_closure = slotstate.pop("__closure__")
|
| 1101 |
+
# _cloudpickle_subimports is a set of submodules that must be loaded for
|
| 1102 |
+
# the pickled function to work correctly at unpickling time. Now that these
|
| 1103 |
+
# submodules are depickled (hence imported), they can be removed from the
|
| 1104 |
+
# object's state (the object state only served as a reference holder to
|
| 1105 |
+
# these submodules)
|
| 1106 |
+
slotstate.pop("_cloudpickle_submodules")
|
| 1107 |
+
|
| 1108 |
+
obj.__globals__.update(obj_globals)
|
| 1109 |
+
obj.__globals__["__builtins__"] = __builtins__
|
| 1110 |
+
|
| 1111 |
+
if obj_closure is not None:
|
| 1112 |
+
for i, cell in enumerate(obj_closure):
|
| 1113 |
+
try:
|
| 1114 |
+
value = cell.cell_contents
|
| 1115 |
+
except ValueError: # cell is empty
|
| 1116 |
+
continue
|
| 1117 |
+
obj.__closure__[i].cell_contents = value
|
| 1118 |
+
|
| 1119 |
+
for k, v in slotstate.items():
|
| 1120 |
+
setattr(obj, k, v)
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
def _class_setstate(obj, state):
|
| 1124 |
+
state, slotstate = state
|
| 1125 |
+
registry = None
|
| 1126 |
+
for attrname, attr in state.items():
|
| 1127 |
+
if attrname == "_abc_impl":
|
| 1128 |
+
registry = attr
|
| 1129 |
+
else:
|
| 1130 |
+
setattr(obj, attrname, attr)
|
| 1131 |
+
if registry is not None:
|
| 1132 |
+
for subclass in registry:
|
| 1133 |
+
obj.register(subclass)
|
| 1134 |
+
|
| 1135 |
+
return obj
|
| 1136 |
+
|
| 1137 |
+
|
| 1138 |
+
# COLLECTION OF DATACLASS UTILITIES
|
| 1139 |
+
# ---------------------------------
|
| 1140 |
+
# There are some internal sentinel values whose identity must be preserved when
|
| 1141 |
+
# unpickling dataclass fields. Each sentinel value has a unique name that we can
|
| 1142 |
+
# use to retrieve its identity at unpickling time.
|
| 1143 |
+
|
| 1144 |
+
|
| 1145 |
+
_DATACLASSE_FIELD_TYPE_SENTINELS = {
|
| 1146 |
+
dataclasses._FIELD.name: dataclasses._FIELD,
|
| 1147 |
+
dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
|
| 1148 |
+
dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
|
| 1149 |
+
}
|
| 1150 |
+
|
| 1151 |
+
|
| 1152 |
+
def _get_dataclass_field_type_sentinel(name):
|
| 1153 |
+
return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
|
| 1154 |
+
|
| 1155 |
+
|
| 1156 |
+
class Pickler(pickle.Pickler):
|
| 1157 |
+
# set of reducers defined and used by cloudpickle (private)
|
| 1158 |
+
_dispatch_table = {}
|
| 1159 |
+
_dispatch_table[classmethod] = _classmethod_reduce
|
| 1160 |
+
_dispatch_table[io.TextIOWrapper] = _file_reduce
|
| 1161 |
+
_dispatch_table[logging.Logger] = _logger_reduce
|
| 1162 |
+
_dispatch_table[logging.RootLogger] = _root_logger_reduce
|
| 1163 |
+
_dispatch_table[memoryview] = _memoryview_reduce
|
| 1164 |
+
_dispatch_table[property] = _property_reduce
|
| 1165 |
+
_dispatch_table[staticmethod] = _classmethod_reduce
|
| 1166 |
+
_dispatch_table[CellType] = _cell_reduce
|
| 1167 |
+
_dispatch_table[types.CodeType] = _code_reduce
|
| 1168 |
+
_dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
|
| 1169 |
+
_dispatch_table[types.ModuleType] = _module_reduce
|
| 1170 |
+
_dispatch_table[types.MethodType] = _method_reduce
|
| 1171 |
+
_dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
|
| 1172 |
+
_dispatch_table[weakref.WeakSet] = _weakset_reduce
|
| 1173 |
+
_dispatch_table[typing.TypeVar] = _typevar_reduce
|
| 1174 |
+
_dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
|
| 1175 |
+
_dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
|
| 1176 |
+
_dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
|
| 1177 |
+
_dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
|
| 1178 |
+
_dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
|
| 1179 |
+
_dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
|
| 1180 |
+
_dispatch_table[abc.abstractmethod] = _classmethod_reduce
|
| 1181 |
+
_dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
|
| 1182 |
+
_dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
|
| 1183 |
+
_dispatch_table[abc.abstractproperty] = _property_reduce
|
| 1184 |
+
_dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce
|
| 1185 |
+
|
| 1186 |
+
dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
|
| 1187 |
+
|
| 1188 |
+
# function reducers are defined as instance methods of cloudpickle.Pickler
|
| 1189 |
+
# objects, as they rely on a cloudpickle.Pickler attribute (globals_ref)
|
| 1190 |
+
def _dynamic_function_reduce(self, func):
|
| 1191 |
+
"""Reduce a function that is not pickleable via attribute lookup."""
|
| 1192 |
+
newargs = self._function_getnewargs(func)
|
| 1193 |
+
state = _function_getstate(func)
|
| 1194 |
+
return (_make_function, newargs, state, None, None, _function_setstate)
|
| 1195 |
+
|
| 1196 |
+
def _function_reduce(self, obj):
|
| 1197 |
+
"""Reducer for function objects.
|
| 1198 |
+
|
| 1199 |
+
If obj is a top-level attribute of a file-backed module, this reducer
|
| 1200 |
+
returns NotImplemented, making the cloudpickle.Pickler fall back to
|
| 1201 |
+
traditional pickle.Pickler routines to save obj. Otherwise, it reduces
|
| 1202 |
+
obj using a custom cloudpickle reducer designed specifically to handle
|
| 1203 |
+
dynamic functions.
|
| 1204 |
+
"""
|
| 1205 |
+
if _should_pickle_by_reference(obj):
|
| 1206 |
+
return NotImplemented
|
| 1207 |
+
else:
|
| 1208 |
+
return self._dynamic_function_reduce(obj)
|
| 1209 |
+
|
| 1210 |
+
def _function_getnewargs(self, func):
|
| 1211 |
+
code = func.__code__
|
| 1212 |
+
|
| 1213 |
+
# base_globals represents the future global namespace of func at
|
| 1214 |
+
# unpickling time. Looking it up and storing it in
|
| 1215 |
+
# cloudpickle.Pickler.globals_ref allow functions sharing the same
|
| 1216 |
+
# globals at pickling time to also share them once unpickled, at one
|
| 1217 |
+
# condition: since globals_ref is an attribute of a cloudpickle.Pickler
|
| 1218 |
+
# instance, and that a new cloudpickle.Pickler is created each time
|
| 1219 |
+
# cloudpickle.dump or cloudpickle.dumps is called, functions also need
|
| 1220 |
+
# to be saved within the same invocation of
|
| 1221 |
+
# cloudpickle.dump/cloudpickle.dumps (for example:
|
| 1222 |
+
# cloudpickle.dumps([f1, f2])). There is no such limitation when using
|
| 1223 |
+
# cloudpickle.Pickler.dump, as long as the multiple invocations are
|
| 1224 |
+
# bound to the same cloudpickle.Pickler instance.
|
| 1225 |
+
base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
|
| 1226 |
+
|
| 1227 |
+
if base_globals == {}:
|
| 1228 |
+
# Add module attributes used to resolve relative imports
|
| 1229 |
+
# instructions inside func.
|
| 1230 |
+
for k in ["__package__", "__name__", "__path__", "__file__"]:
|
| 1231 |
+
if k in func.__globals__:
|
| 1232 |
+
base_globals[k] = func.__globals__[k]
|
| 1233 |
+
|
| 1234 |
+
# Do not bind the free variables before the function is created to
|
| 1235 |
+
# avoid infinite recursion.
|
| 1236 |
+
if func.__closure__ is None:
|
| 1237 |
+
closure = None
|
| 1238 |
+
else:
|
| 1239 |
+
closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars)))
|
| 1240 |
+
|
| 1241 |
+
return code, base_globals, None, None, closure
|
| 1242 |
+
|
| 1243 |
+
def dump(self, obj):
|
| 1244 |
+
try:
|
| 1245 |
+
return super().dump(obj)
|
| 1246 |
+
except RuntimeError as e:
|
| 1247 |
+
if len(e.args) > 0 and "recursion" in e.args[0]:
|
| 1248 |
+
msg = "Could not pickle object as excessively deep recursion required."
|
| 1249 |
+
raise pickle.PicklingError(msg) from e
|
| 1250 |
+
else:
|
| 1251 |
+
raise
|
| 1252 |
+
|
| 1253 |
+
def __init__(self, file, protocol=None, buffer_callback=None):
|
| 1254 |
+
if protocol is None:
|
| 1255 |
+
protocol = DEFAULT_PROTOCOL
|
| 1256 |
+
super().__init__(file, protocol=protocol, buffer_callback=buffer_callback)
|
| 1257 |
+
# map functions __globals__ attribute ids, to ensure that functions
|
| 1258 |
+
# sharing the same global namespace at pickling time also share
|
| 1259 |
+
# their global namespace at unpickling time.
|
| 1260 |
+
self.globals_ref = {}
|
| 1261 |
+
self.proto = int(protocol)
|
| 1262 |
+
|
| 1263 |
+
if not PYPY:
|
| 1264 |
+
# pickle.Pickler is the C implementation of the CPython pickler and
|
| 1265 |
+
# therefore we rely on reduce_override method to customize the pickler
|
| 1266 |
+
# behavior.
|
| 1267 |
+
|
| 1268 |
+
# `cloudpickle.Pickler.dispatch` is only left for backward
|
| 1269 |
+
# compatibility - note that when using protocol 5,
|
| 1270 |
+
# `cloudpickle.Pickler.dispatch` is not an extension of
|
| 1271 |
+
# `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler`
|
| 1272 |
+
# subclasses the C-implemented `pickle.Pickler`, which does not expose
|
| 1273 |
+
# a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler`
|
| 1274 |
+
# used `cloudpickle.Pickler.dispatch` as a class-level attribute
|
| 1275 |
+
# storing all reducers implemented by cloudpickle, but the attribute
|
| 1276 |
+
# name was not a great choice given because it would collide with a
|
| 1277 |
+
# similarly named attribute in the pure-Python `pickle._Pickler`
|
| 1278 |
+
# implementation in the standard library.
|
| 1279 |
+
dispatch = dispatch_table
|
| 1280 |
+
|
| 1281 |
+
# Implementation of the reducer_override callback, in order to
|
| 1282 |
+
# efficiently serialize dynamic functions and classes by subclassing
|
| 1283 |
+
# the C-implemented `pickle.Pickler`.
|
| 1284 |
+
# TODO: decorrelate reducer_override (which is tied to CPython's
|
| 1285 |
+
# implementation - would it make sense to backport it to pypy? - and
|
| 1286 |
+
# pickle's protocol 5 which is implementation agnostic. Currently, the
|
| 1287 |
+
# availability of both notions coincide on CPython's pickle, but it may
|
| 1288 |
+
# not be the case anymore when pypy implements protocol 5.
|
| 1289 |
+
|
| 1290 |
+
def reducer_override(self, obj):
|
| 1291 |
+
"""Type-agnostic reducing callback for function and classes.
|
| 1292 |
+
|
| 1293 |
+
For performance reasons, subclasses of the C `pickle.Pickler` class
|
| 1294 |
+
cannot register custom reducers for functions and classes in the
|
| 1295 |
+
dispatch_table attribute. Reducers for such types must instead
|
| 1296 |
+
implemented via the special `reducer_override` method.
|
| 1297 |
+
|
| 1298 |
+
Note that this method will be called for any object except a few
|
| 1299 |
+
builtin-types (int, lists, dicts etc.), which differs from reducers
|
| 1300 |
+
in the Pickler's dispatch_table, each of them being invoked for
|
| 1301 |
+
objects of a specific type only.
|
| 1302 |
+
|
| 1303 |
+
This property comes in handy for classes: although most classes are
|
| 1304 |
+
instances of the ``type`` metaclass, some of them can be instances
|
| 1305 |
+
of other custom metaclasses (such as enum.EnumMeta for example). In
|
| 1306 |
+
particular, the metaclass will likely not be known in advance, and
|
| 1307 |
+
thus cannot be special-cased using an entry in the dispatch_table.
|
| 1308 |
+
reducer_override, among other things, allows us to register a
|
| 1309 |
+
reducer that will be called for any class, independently of its
|
| 1310 |
+
type.
|
| 1311 |
+
|
| 1312 |
+
Notes:
|
| 1313 |
+
|
| 1314 |
+
* reducer_override has the priority over dispatch_table-registered
|
| 1315 |
+
reducers.
|
| 1316 |
+
* reducer_override can be used to fix other limitations of
|
| 1317 |
+
cloudpickle for other types that suffered from type-specific
|
| 1318 |
+
reducers, such as Exceptions. See
|
| 1319 |
+
https://github.com/cloudpipe/cloudpickle/issues/248
|
| 1320 |
+
"""
|
| 1321 |
+
t = type(obj)
|
| 1322 |
+
try:
|
| 1323 |
+
is_anyclass = issubclass(t, type)
|
| 1324 |
+
except TypeError: # t is not a class (old Boost; see SF #502085)
|
| 1325 |
+
is_anyclass = False
|
| 1326 |
+
|
| 1327 |
+
if is_anyclass:
|
| 1328 |
+
return _class_reduce(obj)
|
| 1329 |
+
elif isinstance(obj, types.FunctionType):
|
| 1330 |
+
return self._function_reduce(obj)
|
| 1331 |
+
else:
|
| 1332 |
+
# fallback to save_global, including the Pickler's
|
| 1333 |
+
# dispatch_table
|
| 1334 |
+
return NotImplemented
|
| 1335 |
+
|
| 1336 |
+
else:
|
| 1337 |
+
# When reducer_override is not available, hack the pure-Python
|
| 1338 |
+
# Pickler's types.FunctionType and type savers. Note: the type saver
|
| 1339 |
+
# must override Pickler.save_global, because pickle.py contains a
|
| 1340 |
+
# hard-coded call to save_global when pickling meta-classes.
|
| 1341 |
+
dispatch = pickle.Pickler.dispatch.copy()
|
| 1342 |
+
|
| 1343 |
+
def _save_reduce_pickle5(
|
| 1344 |
+
self,
|
| 1345 |
+
func,
|
| 1346 |
+
args,
|
| 1347 |
+
state=None,
|
| 1348 |
+
listitems=None,
|
| 1349 |
+
dictitems=None,
|
| 1350 |
+
state_setter=None,
|
| 1351 |
+
obj=None,
|
| 1352 |
+
):
|
| 1353 |
+
save = self.save
|
| 1354 |
+
write = self.write
|
| 1355 |
+
self.save_reduce(
|
| 1356 |
+
func,
|
| 1357 |
+
args,
|
| 1358 |
+
state=None,
|
| 1359 |
+
listitems=listitems,
|
| 1360 |
+
dictitems=dictitems,
|
| 1361 |
+
obj=obj,
|
| 1362 |
+
)
|
| 1363 |
+
# backport of the Python 3.8 state_setter pickle operations
|
| 1364 |
+
save(state_setter)
|
| 1365 |
+
save(obj) # simple BINGET opcode as obj is already memoized.
|
| 1366 |
+
save(state)
|
| 1367 |
+
write(pickle.TUPLE2)
|
| 1368 |
+
# Trigger a state_setter(obj, state) function call.
|
| 1369 |
+
write(pickle.REDUCE)
|
| 1370 |
+
# The purpose of state_setter is to carry-out an
|
| 1371 |
+
# inplace modification of obj. We do not care about what the
|
| 1372 |
+
# method might return, so its output is eventually removed from
|
| 1373 |
+
# the stack.
|
| 1374 |
+
write(pickle.POP)
|
| 1375 |
+
|
| 1376 |
+
def save_global(self, obj, name=None, pack=struct.pack):
|
| 1377 |
+
"""Main dispatch method.
|
| 1378 |
+
|
| 1379 |
+
The name of this method is somewhat misleading: all types get
|
| 1380 |
+
dispatched here.
|
| 1381 |
+
"""
|
| 1382 |
+
if obj is type(None): # noqa
|
| 1383 |
+
return self.save_reduce(type, (None,), obj=obj)
|
| 1384 |
+
elif obj is type(Ellipsis):
|
| 1385 |
+
return self.save_reduce(type, (Ellipsis,), obj=obj)
|
| 1386 |
+
elif obj is type(NotImplemented):
|
| 1387 |
+
return self.save_reduce(type, (NotImplemented,), obj=obj)
|
| 1388 |
+
elif obj in _BUILTIN_TYPE_NAMES:
|
| 1389 |
+
return self.save_reduce(
|
| 1390 |
+
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj
|
| 1391 |
+
)
|
| 1392 |
+
|
| 1393 |
+
if name is not None:
|
| 1394 |
+
super().save_global(obj, name=name)
|
| 1395 |
+
elif not _should_pickle_by_reference(obj, name=name):
|
| 1396 |
+
self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
|
| 1397 |
+
else:
|
| 1398 |
+
super().save_global(obj, name=name)
|
| 1399 |
+
|
| 1400 |
+
dispatch[type] = save_global
|
| 1401 |
+
|
| 1402 |
+
def save_function(self, obj, name=None):
|
| 1403 |
+
"""Registered with the dispatch to handle all function types.
|
| 1404 |
+
|
| 1405 |
+
Determines what kind of function obj is (e.g. lambda, defined at
|
| 1406 |
+
interactive prompt, etc) and handles the pickling appropriately.
|
| 1407 |
+
"""
|
| 1408 |
+
if _should_pickle_by_reference(obj, name=name):
|
| 1409 |
+
return super().save_global(obj, name=name)
|
| 1410 |
+
elif PYPY and isinstance(obj.__code__, builtin_code_type):
|
| 1411 |
+
return self.save_pypy_builtin_func(obj)
|
| 1412 |
+
else:
|
| 1413 |
+
return self._save_reduce_pickle5(
|
| 1414 |
+
*self._dynamic_function_reduce(obj), obj=obj
|
| 1415 |
+
)
|
| 1416 |
+
|
| 1417 |
+
def save_pypy_builtin_func(self, obj):
|
| 1418 |
+
"""Save pypy equivalent of builtin functions.
|
| 1419 |
+
|
| 1420 |
+
PyPy does not have the concept of builtin-functions. Instead,
|
| 1421 |
+
builtin-functions are simple function instances, but with a
|
| 1422 |
+
builtin-code attribute.
|
| 1423 |
+
Most of the time, builtin functions should be pickled by attribute.
|
| 1424 |
+
But PyPy has flaky support for __qualname__, so some builtin
|
| 1425 |
+
functions such as float.__new__ will be classified as dynamic. For
|
| 1426 |
+
this reason only, we created this special routine. Because
|
| 1427 |
+
builtin-functions are not expected to have closure or globals,
|
| 1428 |
+
there is no additional hack (compared the one already implemented
|
| 1429 |
+
in pickle) to protect ourselves from reference cycles. A simple
|
| 1430 |
+
(reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
|
| 1431 |
+
also that PyPy improved their support for __qualname__ in v3.6, so
|
| 1432 |
+
this routing should be removed when cloudpickle supports only PyPy
|
| 1433 |
+
3.6 and later.
|
| 1434 |
+
"""
|
| 1435 |
+
rv = (
|
| 1436 |
+
types.FunctionType,
|
| 1437 |
+
(obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__),
|
| 1438 |
+
obj.__dict__,
|
| 1439 |
+
)
|
| 1440 |
+
self.save_reduce(*rv, obj=obj)
|
| 1441 |
+
|
| 1442 |
+
dispatch[types.FunctionType] = save_function
|
| 1443 |
+
|
| 1444 |
+
|
| 1445 |
+
# Shorthands similar to pickle.dump/pickle.dumps
|
| 1446 |
+
|
| 1447 |
+
|
| 1448 |
+
def dump(obj, file, protocol=None, buffer_callback=None):
|
| 1449 |
+
"""Serialize obj as bytes streamed into file
|
| 1450 |
+
|
| 1451 |
+
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
|
| 1452 |
+
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
|
| 1453 |
+
speed between processes running the same Python version.
|
| 1454 |
+
|
| 1455 |
+
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
|
| 1456 |
+
compatibility with older versions of Python (although this is not always
|
| 1457 |
+
guaranteed to work because cloudpickle relies on some internal
|
| 1458 |
+
implementation details that can change from one Python version to the
|
| 1459 |
+
next).
|
| 1460 |
+
"""
|
| 1461 |
+
Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj)
|
| 1462 |
+
|
| 1463 |
+
|
| 1464 |
+
def dumps(obj, protocol=None, buffer_callback=None):
|
| 1465 |
+
"""Serialize obj as a string of bytes allocated in memory
|
| 1466 |
+
|
| 1467 |
+
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
|
| 1468 |
+
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
|
| 1469 |
+
speed between processes running the same Python version.
|
| 1470 |
+
|
| 1471 |
+
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
|
| 1472 |
+
compatibility with older versions of Python (although this is not always
|
| 1473 |
+
guaranteed to work because cloudpickle relies on some internal
|
| 1474 |
+
implementation details that can change from one Python version to the
|
| 1475 |
+
next).
|
| 1476 |
+
"""
|
| 1477 |
+
with io.BytesIO() as file:
|
| 1478 |
+
cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback)
|
| 1479 |
+
cp.dump(obj)
|
| 1480 |
+
return file.getvalue()
|
| 1481 |
+
|
| 1482 |
+
|
| 1483 |
+
# Include pickles unloading functions in this namespace for convenience.
|
| 1484 |
+
load, loads = pickle.load, pickle.loads
|
| 1485 |
+
|
| 1486 |
+
# Backward compat alias.
|
| 1487 |
+
CloudPickler = Pickler
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Compatibility module.
|
| 2 |
+
|
| 3 |
+
It can be necessary to load files generated by previous versions of cloudpickle
|
| 4 |
+
that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
|
| 5 |
+
namespace.
|
| 6 |
+
|
| 7 |
+
See: tests/test_backward_compat.py
|
| 8 |
+
"""
|
| 9 |
+
from . import cloudpickle
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def __getattr__(name):
|
| 13 |
+
return getattr(cloudpickle, name)
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__init__.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.
|
| 2 |
+
It provides a robust and dynamic implementation os the
|
| 3 |
+
:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which
|
| 4 |
+
hide the pool management under the hood.
|
| 5 |
+
"""
|
| 6 |
+
from concurrent.futures import (
|
| 7 |
+
ALL_COMPLETED,
|
| 8 |
+
FIRST_COMPLETED,
|
| 9 |
+
FIRST_EXCEPTION,
|
| 10 |
+
CancelledError,
|
| 11 |
+
Executor,
|
| 12 |
+
TimeoutError,
|
| 13 |
+
as_completed,
|
| 14 |
+
wait,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
from ._base import Future
|
| 18 |
+
from .backend.context import cpu_count
|
| 19 |
+
from .backend.reduction import set_loky_pickler
|
| 20 |
+
from .reusable_executor import get_reusable_executor
|
| 21 |
+
from .cloudpickle_wrapper import wrap_non_picklable_objects
|
| 22 |
+
from .process_executor import BrokenProcessPool, ProcessPoolExecutor
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
__all__ = [
|
| 26 |
+
"get_reusable_executor",
|
| 27 |
+
"cpu_count",
|
| 28 |
+
"wait",
|
| 29 |
+
"as_completed",
|
| 30 |
+
"Future",
|
| 31 |
+
"Executor",
|
| 32 |
+
"ProcessPoolExecutor",
|
| 33 |
+
"BrokenProcessPool",
|
| 34 |
+
"CancelledError",
|
| 35 |
+
"TimeoutError",
|
| 36 |
+
"FIRST_COMPLETED",
|
| 37 |
+
"FIRST_EXCEPTION",
|
| 38 |
+
"ALL_COMPLETED",
|
| 39 |
+
"wrap_non_picklable_objects",
|
| 40 |
+
"set_loky_pickler",
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
__version__ = "3.4.1"
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc
ADDED
|
Binary file (738 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc
ADDED
|
Binary file (3.69 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc
ADDED
|
Binary file (2.37 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc
ADDED
|
Binary file (7.5 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/_base.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Modification of concurrent.futures.Future
|
| 3 |
+
#
|
| 4 |
+
# author: Thomas Moreau and Olivier Grisel
|
| 5 |
+
#
|
| 6 |
+
# adapted from concurrent/futures/_base.py (17/02/2017)
|
| 7 |
+
# * Do not use yield from
|
| 8 |
+
# * Use old super syntax
|
| 9 |
+
#
|
| 10 |
+
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
| 11 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 12 |
+
|
| 13 |
+
from concurrent.futures import Future as _BaseFuture
|
| 14 |
+
from concurrent.futures._base import LOGGER
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# To make loky._base.Future instances awaitable by concurrent.futures.wait,
|
| 18 |
+
# derive our custom Future class from _BaseFuture. _invoke_callback is the only
|
| 19 |
+
# modification made to this class in loky.
|
| 20 |
+
# TODO investigate why using `concurrent.futures.Future` directly does not
|
| 21 |
+
# always work in our test suite.
|
| 22 |
+
class Future(_BaseFuture):
|
| 23 |
+
def _invoke_callbacks(self):
|
| 24 |
+
for callback in self._done_callbacks:
|
| 25 |
+
try:
|
| 26 |
+
callback(self)
|
| 27 |
+
except BaseException:
|
| 28 |
+
LOGGER.exception(f"exception calling callback for {self!r}")
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc
ADDED
|
Binary file (4.25 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Launch a subprocess using forkexec and make sure only the needed fd are
|
| 3 |
+
# shared in the two process.
|
| 4 |
+
#
|
| 5 |
+
# author: Thomas Moreau and Olivier Grisel
|
| 6 |
+
#
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def close_fds(keep_fds): # pragma: no cover
|
| 12 |
+
"""Close all the file descriptors except those in keep_fds."""
|
| 13 |
+
|
| 14 |
+
# Make sure to keep stdout and stderr open for logging purpose
|
| 15 |
+
keep_fds = {*keep_fds, 1, 2}
|
| 16 |
+
|
| 17 |
+
# We try to retrieve all the open fds
|
| 18 |
+
try:
|
| 19 |
+
open_fds = {int(fd) for fd in os.listdir("/proc/self/fd")}
|
| 20 |
+
except FileNotFoundError:
|
| 21 |
+
import resource
|
| 22 |
+
|
| 23 |
+
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
|
| 24 |
+
open_fds = {*range(max_nfds)}
|
| 25 |
+
|
| 26 |
+
for i in open_fds - keep_fds:
|
| 27 |
+
try:
|
| 28 |
+
os.close(i)
|
| 29 |
+
except OSError:
|
| 30 |
+
pass
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def fork_exec(cmd, keep_fds, env=None):
|
| 34 |
+
# copy the environment variables to set in the child process
|
| 35 |
+
env = env or {}
|
| 36 |
+
child_env = {**os.environ, **env}
|
| 37 |
+
|
| 38 |
+
pid = os.fork()
|
| 39 |
+
if pid == 0: # pragma: no cover
|
| 40 |
+
close_fds(keep_fds)
|
| 41 |
+
os.execve(sys.executable, cmd, child_env)
|
| 42 |
+
else:
|
| 43 |
+
return pid
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Popen for LokyProcess.
|
| 3 |
+
#
|
| 4 |
+
# author: Thomas Moreau and Olivier Grisel
|
| 5 |
+
#
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import signal
|
| 9 |
+
import pickle
|
| 10 |
+
from io import BytesIO
|
| 11 |
+
from multiprocessing import util, process
|
| 12 |
+
from multiprocessing.connection import wait
|
| 13 |
+
from multiprocessing.context import set_spawning_popen
|
| 14 |
+
|
| 15 |
+
from . import reduction, resource_tracker, spawn
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
__all__ = ["Popen"]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
#
|
| 22 |
+
# Wrapper for an fd used while launching a process
|
| 23 |
+
#
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class _DupFd:
|
| 27 |
+
def __init__(self, fd):
|
| 28 |
+
self.fd = reduction._mk_inheritable(fd)
|
| 29 |
+
|
| 30 |
+
def detach(self):
|
| 31 |
+
return self.fd
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
#
|
| 35 |
+
# Start child process using subprocess.Popen
|
| 36 |
+
#
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class Popen:
|
| 40 |
+
method = "loky"
|
| 41 |
+
DupFd = _DupFd
|
| 42 |
+
|
| 43 |
+
def __init__(self, process_obj):
|
| 44 |
+
sys.stdout.flush()
|
| 45 |
+
sys.stderr.flush()
|
| 46 |
+
self.returncode = None
|
| 47 |
+
self._fds = []
|
| 48 |
+
self._launch(process_obj)
|
| 49 |
+
|
| 50 |
+
def duplicate_for_child(self, fd):
|
| 51 |
+
self._fds.append(fd)
|
| 52 |
+
return reduction._mk_inheritable(fd)
|
| 53 |
+
|
| 54 |
+
def poll(self, flag=os.WNOHANG):
|
| 55 |
+
if self.returncode is None:
|
| 56 |
+
while True:
|
| 57 |
+
try:
|
| 58 |
+
pid, sts = os.waitpid(self.pid, flag)
|
| 59 |
+
except OSError:
|
| 60 |
+
# Child process not yet created. See #1731717
|
| 61 |
+
# e.errno == errno.ECHILD == 10
|
| 62 |
+
return None
|
| 63 |
+
else:
|
| 64 |
+
break
|
| 65 |
+
if pid == self.pid:
|
| 66 |
+
if os.WIFSIGNALED(sts):
|
| 67 |
+
self.returncode = -os.WTERMSIG(sts)
|
| 68 |
+
else:
|
| 69 |
+
assert os.WIFEXITED(sts)
|
| 70 |
+
self.returncode = os.WEXITSTATUS(sts)
|
| 71 |
+
return self.returncode
|
| 72 |
+
|
| 73 |
+
def wait(self, timeout=None):
|
| 74 |
+
if self.returncode is None:
|
| 75 |
+
if timeout is not None:
|
| 76 |
+
if not wait([self.sentinel], timeout):
|
| 77 |
+
return None
|
| 78 |
+
# This shouldn't block if wait() returned successfully.
|
| 79 |
+
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
|
| 80 |
+
return self.returncode
|
| 81 |
+
|
| 82 |
+
def terminate(self):
|
| 83 |
+
if self.returncode is None:
|
| 84 |
+
try:
|
| 85 |
+
os.kill(self.pid, signal.SIGTERM)
|
| 86 |
+
except ProcessLookupError:
|
| 87 |
+
pass
|
| 88 |
+
except OSError:
|
| 89 |
+
if self.wait(timeout=0.1) is None:
|
| 90 |
+
raise
|
| 91 |
+
|
| 92 |
+
def _launch(self, process_obj):
|
| 93 |
+
|
| 94 |
+
tracker_fd = resource_tracker._resource_tracker.getfd()
|
| 95 |
+
|
| 96 |
+
fp = BytesIO()
|
| 97 |
+
set_spawning_popen(self)
|
| 98 |
+
try:
|
| 99 |
+
prep_data = spawn.get_preparation_data(
|
| 100 |
+
process_obj._name,
|
| 101 |
+
getattr(process_obj, "init_main_module", True),
|
| 102 |
+
)
|
| 103 |
+
reduction.dump(prep_data, fp)
|
| 104 |
+
reduction.dump(process_obj, fp)
|
| 105 |
+
|
| 106 |
+
finally:
|
| 107 |
+
set_spawning_popen(None)
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
parent_r, child_w = os.pipe()
|
| 111 |
+
child_r, parent_w = os.pipe()
|
| 112 |
+
# for fd in self._fds:
|
| 113 |
+
# _mk_inheritable(fd)
|
| 114 |
+
|
| 115 |
+
cmd_python = [sys.executable]
|
| 116 |
+
cmd_python += ["-m", self.__module__]
|
| 117 |
+
cmd_python += ["--process-name", str(process_obj.name)]
|
| 118 |
+
cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))]
|
| 119 |
+
reduction._mk_inheritable(child_w)
|
| 120 |
+
reduction._mk_inheritable(tracker_fd)
|
| 121 |
+
self._fds += [child_r, child_w, tracker_fd]
|
| 122 |
+
if sys.version_info >= (3, 8) and os.name == "posix":
|
| 123 |
+
mp_tracker_fd = prep_data["mp_tracker_args"]["fd"]
|
| 124 |
+
self.duplicate_for_child(mp_tracker_fd)
|
| 125 |
+
|
| 126 |
+
from .fork_exec import fork_exec
|
| 127 |
+
|
| 128 |
+
pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
|
| 129 |
+
util.debug(
|
| 130 |
+
f"launched python with pid {pid} and cmd:\n{cmd_python}"
|
| 131 |
+
)
|
| 132 |
+
self.sentinel = parent_r
|
| 133 |
+
|
| 134 |
+
method = "getbuffer"
|
| 135 |
+
if not hasattr(fp, method):
|
| 136 |
+
method = "getvalue"
|
| 137 |
+
with os.fdopen(parent_w, "wb") as f:
|
| 138 |
+
f.write(getattr(fp, method)())
|
| 139 |
+
self.pid = pid
|
| 140 |
+
finally:
|
| 141 |
+
if parent_r is not None:
|
| 142 |
+
util.Finalize(self, os.close, (parent_r,))
|
| 143 |
+
for fd in (child_r, child_w):
|
| 144 |
+
if fd is not None:
|
| 145 |
+
os.close(fd)
|
| 146 |
+
|
| 147 |
+
@staticmethod
|
| 148 |
+
def thread_is_spawning():
|
| 149 |
+
return True
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
if __name__ == "__main__":
|
| 153 |
+
import argparse
|
| 154 |
+
|
| 155 |
+
parser = argparse.ArgumentParser("Command line parser")
|
| 156 |
+
parser.add_argument(
|
| 157 |
+
"--pipe", type=int, required=True, help="File handle for the pipe"
|
| 158 |
+
)
|
| 159 |
+
parser.add_argument(
|
| 160 |
+
"--process-name",
|
| 161 |
+
type=str,
|
| 162 |
+
default=None,
|
| 163 |
+
help="Identifier for debugging purpose",
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
args = parser.parse_args()
|
| 167 |
+
|
| 168 |
+
info = {}
|
| 169 |
+
exitcode = 1
|
| 170 |
+
try:
|
| 171 |
+
with os.fdopen(args.pipe, "rb") as from_parent:
|
| 172 |
+
process.current_process()._inheriting = True
|
| 173 |
+
try:
|
| 174 |
+
prep_data = pickle.load(from_parent)
|
| 175 |
+
spawn.prepare(prep_data)
|
| 176 |
+
process_obj = pickle.load(from_parent)
|
| 177 |
+
finally:
|
| 178 |
+
del process.current_process()._inheriting
|
| 179 |
+
|
| 180 |
+
exitcode = process_obj._bootstrap()
|
| 181 |
+
except Exception:
|
| 182 |
+
print("\n\n" + "-" * 80)
|
| 183 |
+
print(f"{args.process_name} failed with traceback: ")
|
| 184 |
+
print("-" * 80)
|
| 185 |
+
import traceback
|
| 186 |
+
|
| 187 |
+
print(traceback.format_exc())
|
| 188 |
+
print("\n" + "-" * 80)
|
| 189 |
+
finally:
|
| 190 |
+
if from_parent is not None:
|
| 191 |
+
from_parent.close()
|
| 192 |
+
|
| 193 |
+
sys.exit(exitcode)
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# LokyProcess implementation
|
| 3 |
+
#
|
| 4 |
+
# authors: Thomas Moreau and Olivier Grisel
|
| 5 |
+
#
|
| 6 |
+
# based on multiprocessing/process.py (17/02/2017)
|
| 7 |
+
#
|
| 8 |
+
import sys
|
| 9 |
+
from multiprocessing.context import assert_spawning
|
| 10 |
+
from multiprocessing.process import BaseProcess
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class LokyProcess(BaseProcess):
|
| 14 |
+
_start_method = "loky"
|
| 15 |
+
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
group=None,
|
| 19 |
+
target=None,
|
| 20 |
+
name=None,
|
| 21 |
+
args=(),
|
| 22 |
+
kwargs={},
|
| 23 |
+
daemon=None,
|
| 24 |
+
init_main_module=False,
|
| 25 |
+
env=None,
|
| 26 |
+
):
|
| 27 |
+
super().__init__(
|
| 28 |
+
group=group,
|
| 29 |
+
target=target,
|
| 30 |
+
name=name,
|
| 31 |
+
args=args,
|
| 32 |
+
kwargs=kwargs,
|
| 33 |
+
daemon=daemon,
|
| 34 |
+
)
|
| 35 |
+
self.env = {} if env is None else env
|
| 36 |
+
self.authkey = self.authkey
|
| 37 |
+
self.init_main_module = init_main_module
|
| 38 |
+
|
| 39 |
+
@staticmethod
|
| 40 |
+
def _Popen(process_obj):
|
| 41 |
+
if sys.platform == "win32":
|
| 42 |
+
from .popen_loky_win32 import Popen
|
| 43 |
+
else:
|
| 44 |
+
from .popen_loky_posix import Popen
|
| 45 |
+
return Popen(process_obj)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class LokyInitMainProcess(LokyProcess):
|
| 49 |
+
_start_method = "loky_init_main"
|
| 50 |
+
|
| 51 |
+
def __init__(
|
| 52 |
+
self,
|
| 53 |
+
group=None,
|
| 54 |
+
target=None,
|
| 55 |
+
name=None,
|
| 56 |
+
args=(),
|
| 57 |
+
kwargs={},
|
| 58 |
+
daemon=None,
|
| 59 |
+
):
|
| 60 |
+
super().__init__(
|
| 61 |
+
group=group,
|
| 62 |
+
target=target,
|
| 63 |
+
name=name,
|
| 64 |
+
args=args,
|
| 65 |
+
kwargs=kwargs,
|
| 66 |
+
daemon=daemon,
|
| 67 |
+
init_main_module=True,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
#
|
| 72 |
+
# We subclass bytes to avoid accidental transmission of auth keys over network
|
| 73 |
+
#
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class AuthenticationKey(bytes):
|
| 77 |
+
def __reduce__(self):
|
| 78 |
+
try:
|
| 79 |
+
assert_spawning(self)
|
| 80 |
+
except RuntimeError:
|
| 81 |
+
raise TypeError(
|
| 82 |
+
"Pickling an AuthenticationKey object is "
|
| 83 |
+
"disallowed for security reasons"
|
| 84 |
+
)
|
| 85 |
+
return AuthenticationKey, (bytes(self),)
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Queue and SimpleQueue implementation for loky
|
| 3 |
+
#
|
| 4 |
+
# authors: Thomas Moreau, Olivier Grisel
|
| 5 |
+
#
|
| 6 |
+
# based on multiprocessing/queues.py (16/02/2017)
|
| 7 |
+
# * Add some custom reducers for the Queues/SimpleQueue to tweak the
|
| 8 |
+
# pickling process. (overload Queue._feed/SimpleQueue.put)
|
| 9 |
+
#
|
| 10 |
+
import os
|
| 11 |
+
import sys
|
| 12 |
+
import errno
|
| 13 |
+
import weakref
|
| 14 |
+
import threading
|
| 15 |
+
from multiprocessing import util
|
| 16 |
+
from multiprocessing.queues import (
|
| 17 |
+
Full,
|
| 18 |
+
Queue as mp_Queue,
|
| 19 |
+
SimpleQueue as mp_SimpleQueue,
|
| 20 |
+
_sentinel,
|
| 21 |
+
)
|
| 22 |
+
from multiprocessing.context import assert_spawning
|
| 23 |
+
|
| 24 |
+
from .reduction import dumps
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
__all__ = ["Queue", "SimpleQueue", "Full"]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Queue(mp_Queue):
|
| 31 |
+
def __init__(self, maxsize=0, reducers=None, ctx=None):
|
| 32 |
+
super().__init__(maxsize=maxsize, ctx=ctx)
|
| 33 |
+
self._reducers = reducers
|
| 34 |
+
|
| 35 |
+
# Use custom queue set/get state to be able to reduce the custom reducers
|
| 36 |
+
def __getstate__(self):
|
| 37 |
+
assert_spawning(self)
|
| 38 |
+
return (
|
| 39 |
+
self._ignore_epipe,
|
| 40 |
+
self._maxsize,
|
| 41 |
+
self._reader,
|
| 42 |
+
self._writer,
|
| 43 |
+
self._reducers,
|
| 44 |
+
self._rlock,
|
| 45 |
+
self._wlock,
|
| 46 |
+
self._sem,
|
| 47 |
+
self._opid,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
def __setstate__(self, state):
|
| 51 |
+
(
|
| 52 |
+
self._ignore_epipe,
|
| 53 |
+
self._maxsize,
|
| 54 |
+
self._reader,
|
| 55 |
+
self._writer,
|
| 56 |
+
self._reducers,
|
| 57 |
+
self._rlock,
|
| 58 |
+
self._wlock,
|
| 59 |
+
self._sem,
|
| 60 |
+
self._opid,
|
| 61 |
+
) = state
|
| 62 |
+
if sys.version_info >= (3, 9):
|
| 63 |
+
self._reset()
|
| 64 |
+
else:
|
| 65 |
+
self._after_fork()
|
| 66 |
+
|
| 67 |
+
# Overload _start_thread to correctly call our custom _feed
|
| 68 |
+
def _start_thread(self):
|
| 69 |
+
util.debug("Queue._start_thread()")
|
| 70 |
+
|
| 71 |
+
# Start thread which transfers data from buffer to pipe
|
| 72 |
+
self._buffer.clear()
|
| 73 |
+
self._thread = threading.Thread(
|
| 74 |
+
target=Queue._feed,
|
| 75 |
+
args=(
|
| 76 |
+
self._buffer,
|
| 77 |
+
self._notempty,
|
| 78 |
+
self._send_bytes,
|
| 79 |
+
self._wlock,
|
| 80 |
+
self._writer.close,
|
| 81 |
+
self._reducers,
|
| 82 |
+
self._ignore_epipe,
|
| 83 |
+
self._on_queue_feeder_error,
|
| 84 |
+
self._sem,
|
| 85 |
+
),
|
| 86 |
+
name="QueueFeederThread",
|
| 87 |
+
)
|
| 88 |
+
self._thread.daemon = True
|
| 89 |
+
|
| 90 |
+
util.debug("doing self._thread.start()")
|
| 91 |
+
self._thread.start()
|
| 92 |
+
util.debug("... done self._thread.start()")
|
| 93 |
+
|
| 94 |
+
# On process exit we will wait for data to be flushed to pipe.
|
| 95 |
+
#
|
| 96 |
+
# However, if this process created the queue then all
|
| 97 |
+
# processes which use the queue will be descendants of this
|
| 98 |
+
# process. Therefore waiting for the queue to be flushed
|
| 99 |
+
# is pointless once all the child processes have been joined.
|
| 100 |
+
created_by_this_process = self._opid == os.getpid()
|
| 101 |
+
if not self._joincancelled and not created_by_this_process:
|
| 102 |
+
self._jointhread = util.Finalize(
|
| 103 |
+
self._thread,
|
| 104 |
+
Queue._finalize_join,
|
| 105 |
+
[weakref.ref(self._thread)],
|
| 106 |
+
exitpriority=-5,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# Send sentinel to the thread queue object when garbage collected
|
| 110 |
+
self._close = util.Finalize(
|
| 111 |
+
self,
|
| 112 |
+
Queue._finalize_close,
|
| 113 |
+
[self._buffer, self._notempty],
|
| 114 |
+
exitpriority=10,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
# Overload the _feed methods to use our custom pickling strategy.
|
| 118 |
+
@staticmethod
|
| 119 |
+
def _feed(
|
| 120 |
+
buffer,
|
| 121 |
+
notempty,
|
| 122 |
+
send_bytes,
|
| 123 |
+
writelock,
|
| 124 |
+
close,
|
| 125 |
+
reducers,
|
| 126 |
+
ignore_epipe,
|
| 127 |
+
onerror,
|
| 128 |
+
queue_sem,
|
| 129 |
+
):
|
| 130 |
+
util.debug("starting thread to feed data to pipe")
|
| 131 |
+
nacquire = notempty.acquire
|
| 132 |
+
nrelease = notempty.release
|
| 133 |
+
nwait = notempty.wait
|
| 134 |
+
bpopleft = buffer.popleft
|
| 135 |
+
sentinel = _sentinel
|
| 136 |
+
if sys.platform != "win32":
|
| 137 |
+
wacquire = writelock.acquire
|
| 138 |
+
wrelease = writelock.release
|
| 139 |
+
else:
|
| 140 |
+
wacquire = None
|
| 141 |
+
|
| 142 |
+
while True:
|
| 143 |
+
try:
|
| 144 |
+
nacquire()
|
| 145 |
+
try:
|
| 146 |
+
if not buffer:
|
| 147 |
+
nwait()
|
| 148 |
+
finally:
|
| 149 |
+
nrelease()
|
| 150 |
+
try:
|
| 151 |
+
while True:
|
| 152 |
+
obj = bpopleft()
|
| 153 |
+
if obj is sentinel:
|
| 154 |
+
util.debug("feeder thread got sentinel -- exiting")
|
| 155 |
+
close()
|
| 156 |
+
return
|
| 157 |
+
|
| 158 |
+
# serialize the data before acquiring the lock
|
| 159 |
+
obj_ = dumps(obj, reducers=reducers)
|
| 160 |
+
if wacquire is None:
|
| 161 |
+
send_bytes(obj_)
|
| 162 |
+
else:
|
| 163 |
+
wacquire()
|
| 164 |
+
try:
|
| 165 |
+
send_bytes(obj_)
|
| 166 |
+
finally:
|
| 167 |
+
wrelease()
|
| 168 |
+
# Remove references early to avoid leaking memory
|
| 169 |
+
del obj, obj_
|
| 170 |
+
except IndexError:
|
| 171 |
+
pass
|
| 172 |
+
except BaseException as e:
|
| 173 |
+
if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE:
|
| 174 |
+
return
|
| 175 |
+
# Since this runs in a daemon thread the resources it uses
|
| 176 |
+
# may be become unusable while the process is cleaning up.
|
| 177 |
+
# We ignore errors which happen after the process has
|
| 178 |
+
# started to cleanup.
|
| 179 |
+
if util.is_exiting():
|
| 180 |
+
util.info(f"error in queue thread: {e}")
|
| 181 |
+
return
|
| 182 |
+
else:
|
| 183 |
+
queue_sem.release()
|
| 184 |
+
onerror(e, obj)
|
| 185 |
+
|
| 186 |
+
def _on_queue_feeder_error(self, e, obj):
|
| 187 |
+
"""
|
| 188 |
+
Private API hook called when feeding data in the background thread
|
| 189 |
+
raises an exception. For overriding by concurrent.futures.
|
| 190 |
+
"""
|
| 191 |
+
import traceback
|
| 192 |
+
|
| 193 |
+
traceback.print_exc()
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class SimpleQueue(mp_SimpleQueue):
|
| 197 |
+
def __init__(self, reducers=None, ctx=None):
|
| 198 |
+
super().__init__(ctx=ctx)
|
| 199 |
+
|
| 200 |
+
# Add possiblity to use custom reducers
|
| 201 |
+
self._reducers = reducers
|
| 202 |
+
|
| 203 |
+
def close(self):
|
| 204 |
+
self._reader.close()
|
| 205 |
+
self._writer.close()
|
| 206 |
+
|
| 207 |
+
# Use custom queue set/get state to be able to reduce the custom reducers
|
| 208 |
+
def __getstate__(self):
|
| 209 |
+
assert_spawning(self)
|
| 210 |
+
return (
|
| 211 |
+
self._reader,
|
| 212 |
+
self._writer,
|
| 213 |
+
self._reducers,
|
| 214 |
+
self._rlock,
|
| 215 |
+
self._wlock,
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
def __setstate__(self, state):
|
| 219 |
+
(
|
| 220 |
+
self._reader,
|
| 221 |
+
self._writer,
|
| 222 |
+
self._reducers,
|
| 223 |
+
self._rlock,
|
| 224 |
+
self._wlock,
|
| 225 |
+
) = state
|
| 226 |
+
|
| 227 |
+
# Overload put to use our customizable reducer
|
| 228 |
+
def put(self, obj):
|
| 229 |
+
# serialize the data before acquiring the lock
|
| 230 |
+
obj = dumps(obj, reducers=self._reducers)
|
| 231 |
+
if self._wlock is None:
|
| 232 |
+
# writes to a message oriented win32 pipe are atomic
|
| 233 |
+
self._writer.send_bytes(obj)
|
| 234 |
+
else:
|
| 235 |
+
with self._wlock:
|
| 236 |
+
self._writer.send_bytes(obj)
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Server process to keep track of unlinked resources, like folders and
|
| 3 |
+
# semaphores and clean them.
|
| 4 |
+
#
|
| 5 |
+
# author: Thomas Moreau
|
| 6 |
+
#
|
| 7 |
+
# adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
|
| 8 |
+
# * include custom spawnv_passfds to start the process
|
| 9 |
+
# * add some VERBOSE logging
|
| 10 |
+
#
|
| 11 |
+
# TODO: multiprocessing.resource_tracker was contributed to Python 3.8 so
|
| 12 |
+
# once loky drops support for Python 3.7 it might be possible to stop
|
| 13 |
+
# maintaining this loky-specific fork. As a consequence, it might also be
|
| 14 |
+
# possible to stop maintaining the loky.backend.synchronize fork of
|
| 15 |
+
# multiprocessing.synchronize.
|
| 16 |
+
|
| 17 |
+
#
|
| 18 |
+
# On Unix we run a server process which keeps track of unlinked
|
| 19 |
+
# resources. The server ignores SIGINT and SIGTERM and reads from a
|
| 20 |
+
# pipe. The resource_tracker implements a reference counting scheme: each time
|
| 21 |
+
# a Python process anticipates the shared usage of a resource by another
|
| 22 |
+
# process, it signals the resource_tracker of this shared usage, and in return,
|
| 23 |
+
# the resource_tracker increments the resource's reference count by 1.
|
| 24 |
+
# Similarly, when access to a resource is closed by a Python process, the
|
| 25 |
+
# process notifies the resource_tracker by asking it to decrement the
|
| 26 |
+
# resource's reference count by 1. When the reference count drops to 0, the
|
| 27 |
+
# resource_tracker attempts to clean up the underlying resource.
|
| 28 |
+
|
| 29 |
+
# Finally, every other process connected to the resource tracker has a copy of
|
| 30 |
+
# the writable end of the pipe used to communicate with it, so the resource
|
| 31 |
+
# tracker gets EOF when all other processes have exited. Then the
|
| 32 |
+
# resource_tracker process unlinks any remaining leaked resources (with
|
| 33 |
+
# reference count above 0)
|
| 34 |
+
|
| 35 |
+
# For semaphores, this is important because the system only supports a limited
|
| 36 |
+
# number of named semaphores, and they will not be automatically removed till
|
| 37 |
+
# the next reboot. Without this resource tracker process, "killall python"
|
| 38 |
+
# would probably leave unlinked semaphores.
|
| 39 |
+
|
| 40 |
+
# Note that this behavior differs from CPython's resource_tracker, which only
|
| 41 |
+
# implements list of shared resources, and not a proper refcounting scheme.
|
| 42 |
+
# Also, CPython's resource tracker will only attempt to cleanup those shared
|
| 43 |
+
# resources once all procsses connected to the resouce tracker have exited.
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
import os
|
| 47 |
+
import shutil
|
| 48 |
+
import sys
|
| 49 |
+
import signal
|
| 50 |
+
import warnings
|
| 51 |
+
import threading
|
| 52 |
+
from _multiprocessing import sem_unlink
|
| 53 |
+
from multiprocessing import util
|
| 54 |
+
|
| 55 |
+
from . import spawn
|
| 56 |
+
|
| 57 |
+
if sys.platform == "win32":
|
| 58 |
+
import _winapi
|
| 59 |
+
import msvcrt
|
| 60 |
+
from multiprocessing.reduction import duplicate
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
__all__ = ["ensure_running", "register", "unregister"]
|
| 64 |
+
|
| 65 |
+
_HAVE_SIGMASK = hasattr(signal, "pthread_sigmask")
|
| 66 |
+
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
|
| 67 |
+
|
| 68 |
+
_CLEANUP_FUNCS = {"folder": shutil.rmtree, "file": os.unlink}
|
| 69 |
+
|
| 70 |
+
if os.name == "posix":
|
| 71 |
+
_CLEANUP_FUNCS["semlock"] = sem_unlink
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
VERBOSE = False
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class ResourceTracker:
|
| 78 |
+
def __init__(self):
|
| 79 |
+
self._lock = threading.Lock()
|
| 80 |
+
self._fd = None
|
| 81 |
+
self._pid = None
|
| 82 |
+
|
| 83 |
+
def getfd(self):
|
| 84 |
+
self.ensure_running()
|
| 85 |
+
return self._fd
|
| 86 |
+
|
| 87 |
+
def ensure_running(self):
|
| 88 |
+
"""Make sure that resource tracker process is running.
|
| 89 |
+
|
| 90 |
+
This can be run from any process. Usually a child process will use
|
| 91 |
+
the resource created by its parent."""
|
| 92 |
+
with self._lock:
|
| 93 |
+
if self._fd is not None:
|
| 94 |
+
# resource tracker was launched before, is it still running?
|
| 95 |
+
if self._check_alive():
|
| 96 |
+
# => still alive
|
| 97 |
+
return
|
| 98 |
+
# => dead, launch it again
|
| 99 |
+
os.close(self._fd)
|
| 100 |
+
if os.name == "posix":
|
| 101 |
+
try:
|
| 102 |
+
# At this point, the resource_tracker process has been
|
| 103 |
+
# killed or crashed. Let's remove the process entry
|
| 104 |
+
# from the process table to avoid zombie processes.
|
| 105 |
+
os.waitpid(self._pid, 0)
|
| 106 |
+
except OSError:
|
| 107 |
+
# The process was terminated or is a child from an
|
| 108 |
+
# ancestor of the current process.
|
| 109 |
+
pass
|
| 110 |
+
self._fd = None
|
| 111 |
+
self._pid = None
|
| 112 |
+
|
| 113 |
+
warnings.warn(
|
| 114 |
+
"resource_tracker: process died unexpectedly, "
|
| 115 |
+
"relaunching. Some folders/sempahores might "
|
| 116 |
+
"leak."
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
fds_to_pass = []
|
| 120 |
+
try:
|
| 121 |
+
fds_to_pass.append(sys.stderr.fileno())
|
| 122 |
+
except Exception:
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
r, w = os.pipe()
|
| 126 |
+
if sys.platform == "win32":
|
| 127 |
+
_r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
|
| 128 |
+
os.close(r)
|
| 129 |
+
r = _r
|
| 130 |
+
|
| 131 |
+
cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})"
|
| 132 |
+
try:
|
| 133 |
+
fds_to_pass.append(r)
|
| 134 |
+
# process will out live us, so no need to wait on pid
|
| 135 |
+
exe = spawn.get_executable()
|
| 136 |
+
args = [exe, *util._args_from_interpreter_flags(), "-c", cmd]
|
| 137 |
+
util.debug(f"launching resource tracker: {args}")
|
| 138 |
+
# bpo-33613: Register a signal mask that will block the
|
| 139 |
+
# signals. This signal mask will be inherited by the child
|
| 140 |
+
# that is going to be spawned and will protect the child from a
|
| 141 |
+
# race condition that can make the child die before it
|
| 142 |
+
# registers signal handlers for SIGINT and SIGTERM. The mask is
|
| 143 |
+
# unregistered after spawning the child.
|
| 144 |
+
try:
|
| 145 |
+
if _HAVE_SIGMASK:
|
| 146 |
+
signal.pthread_sigmask(
|
| 147 |
+
signal.SIG_BLOCK, _IGNORED_SIGNALS
|
| 148 |
+
)
|
| 149 |
+
pid = spawnv_passfds(exe, args, fds_to_pass)
|
| 150 |
+
finally:
|
| 151 |
+
if _HAVE_SIGMASK:
|
| 152 |
+
signal.pthread_sigmask(
|
| 153 |
+
signal.SIG_UNBLOCK, _IGNORED_SIGNALS
|
| 154 |
+
)
|
| 155 |
+
except BaseException:
|
| 156 |
+
os.close(w)
|
| 157 |
+
raise
|
| 158 |
+
else:
|
| 159 |
+
self._fd = w
|
| 160 |
+
self._pid = pid
|
| 161 |
+
finally:
|
| 162 |
+
if sys.platform == "win32":
|
| 163 |
+
_winapi.CloseHandle(r)
|
| 164 |
+
else:
|
| 165 |
+
os.close(r)
|
| 166 |
+
|
| 167 |
+
def _check_alive(self):
|
| 168 |
+
"""Check for the existence of the resource tracker process."""
|
| 169 |
+
try:
|
| 170 |
+
self._send("PROBE", "", "")
|
| 171 |
+
except BrokenPipeError:
|
| 172 |
+
return False
|
| 173 |
+
else:
|
| 174 |
+
return True
|
| 175 |
+
|
| 176 |
+
def register(self, name, rtype):
|
| 177 |
+
"""Register a named resource, and increment its refcount."""
|
| 178 |
+
self.ensure_running()
|
| 179 |
+
self._send("REGISTER", name, rtype)
|
| 180 |
+
|
| 181 |
+
def unregister(self, name, rtype):
|
| 182 |
+
"""Unregister a named resource with resource tracker."""
|
| 183 |
+
self.ensure_running()
|
| 184 |
+
self._send("UNREGISTER", name, rtype)
|
| 185 |
+
|
| 186 |
+
def maybe_unlink(self, name, rtype):
|
| 187 |
+
"""Decrement the refcount of a resource, and delete it if it hits 0"""
|
| 188 |
+
self.ensure_running()
|
| 189 |
+
self._send("MAYBE_UNLINK", name, rtype)
|
| 190 |
+
|
| 191 |
+
def _send(self, cmd, name, rtype):
|
| 192 |
+
if len(name) > 512:
|
| 193 |
+
# posix guarantees that writes to a pipe of less than PIPE_BUF
|
| 194 |
+
# bytes are atomic, and that PIPE_BUF >= 512
|
| 195 |
+
raise ValueError("name too long")
|
| 196 |
+
msg = f"{cmd}:{name}:{rtype}\n".encode("ascii")
|
| 197 |
+
nbytes = os.write(self._fd, msg)
|
| 198 |
+
assert nbytes == len(msg)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
_resource_tracker = ResourceTracker()
|
| 202 |
+
ensure_running = _resource_tracker.ensure_running
|
| 203 |
+
register = _resource_tracker.register
|
| 204 |
+
maybe_unlink = _resource_tracker.maybe_unlink
|
| 205 |
+
unregister = _resource_tracker.unregister
|
| 206 |
+
getfd = _resource_tracker.getfd
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def main(fd, verbose=0):
|
| 210 |
+
"""Run resource tracker."""
|
| 211 |
+
# protect the process from ^C and "killall python" etc
|
| 212 |
+
if verbose:
|
| 213 |
+
util.log_to_stderr(level=util.DEBUG)
|
| 214 |
+
|
| 215 |
+
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
| 216 |
+
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
| 217 |
+
|
| 218 |
+
if _HAVE_SIGMASK:
|
| 219 |
+
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
|
| 220 |
+
|
| 221 |
+
for f in (sys.stdin, sys.stdout):
|
| 222 |
+
try:
|
| 223 |
+
f.close()
|
| 224 |
+
except Exception:
|
| 225 |
+
pass
|
| 226 |
+
|
| 227 |
+
if verbose:
|
| 228 |
+
util.debug("Main resource tracker is running")
|
| 229 |
+
|
| 230 |
+
registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()}
|
| 231 |
+
try:
|
| 232 |
+
# keep track of registered/unregistered resources
|
| 233 |
+
if sys.platform == "win32":
|
| 234 |
+
fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
|
| 235 |
+
with open(fd, "rb") as f:
|
| 236 |
+
while True:
|
| 237 |
+
line = f.readline()
|
| 238 |
+
if line == b"": # EOF
|
| 239 |
+
break
|
| 240 |
+
try:
|
| 241 |
+
splitted = line.strip().decode("ascii").split(":")
|
| 242 |
+
# name can potentially contain separator symbols (for
|
| 243 |
+
# instance folders on Windows)
|
| 244 |
+
cmd, name, rtype = (
|
| 245 |
+
splitted[0],
|
| 246 |
+
":".join(splitted[1:-1]),
|
| 247 |
+
splitted[-1],
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
if cmd == "PROBE":
|
| 251 |
+
continue
|
| 252 |
+
|
| 253 |
+
if rtype not in _CLEANUP_FUNCS:
|
| 254 |
+
raise ValueError(
|
| 255 |
+
f"Cannot register {name} for automatic cleanup: "
|
| 256 |
+
f"unknown resource type ({rtype}). Resource type "
|
| 257 |
+
"should be one of the following: "
|
| 258 |
+
f"{list(_CLEANUP_FUNCS.keys())}"
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
if cmd == "REGISTER":
|
| 262 |
+
if name not in registry[rtype]:
|
| 263 |
+
registry[rtype][name] = 1
|
| 264 |
+
else:
|
| 265 |
+
registry[rtype][name] += 1
|
| 266 |
+
|
| 267 |
+
if verbose:
|
| 268 |
+
util.debug(
|
| 269 |
+
"[ResourceTracker] incremented refcount of "
|
| 270 |
+
f"{rtype} {name} "
|
| 271 |
+
f"(current {registry[rtype][name]})"
|
| 272 |
+
)
|
| 273 |
+
elif cmd == "UNREGISTER":
|
| 274 |
+
del registry[rtype][name]
|
| 275 |
+
if verbose:
|
| 276 |
+
util.debug(
|
| 277 |
+
f"[ResourceTracker] unregister {name} {rtype}: "
|
| 278 |
+
f"registry({len(registry)})"
|
| 279 |
+
)
|
| 280 |
+
elif cmd == "MAYBE_UNLINK":
|
| 281 |
+
registry[rtype][name] -= 1
|
| 282 |
+
if verbose:
|
| 283 |
+
util.debug(
|
| 284 |
+
"[ResourceTracker] decremented refcount of "
|
| 285 |
+
f"{rtype} {name} "
|
| 286 |
+
f"(current {registry[rtype][name]})"
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
if registry[rtype][name] == 0:
|
| 290 |
+
del registry[rtype][name]
|
| 291 |
+
try:
|
| 292 |
+
if verbose:
|
| 293 |
+
util.debug(
|
| 294 |
+
f"[ResourceTracker] unlink {name}"
|
| 295 |
+
)
|
| 296 |
+
_CLEANUP_FUNCS[rtype](name)
|
| 297 |
+
except Exception as e:
|
| 298 |
+
warnings.warn(
|
| 299 |
+
f"resource_tracker: {name}: {e!r}"
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
else:
|
| 303 |
+
raise RuntimeError(f"unrecognized command {cmd!r}")
|
| 304 |
+
except BaseException:
|
| 305 |
+
try:
|
| 306 |
+
sys.excepthook(*sys.exc_info())
|
| 307 |
+
except BaseException:
|
| 308 |
+
pass
|
| 309 |
+
finally:
|
| 310 |
+
# all processes have terminated; cleanup any remaining resources
|
| 311 |
+
def _unlink_resources(rtype_registry, rtype):
|
| 312 |
+
if rtype_registry:
|
| 313 |
+
try:
|
| 314 |
+
warnings.warn(
|
| 315 |
+
"resource_tracker: There appear to be "
|
| 316 |
+
f"{len(rtype_registry)} leaked {rtype} objects to "
|
| 317 |
+
"clean up at shutdown"
|
| 318 |
+
)
|
| 319 |
+
except Exception:
|
| 320 |
+
pass
|
| 321 |
+
for name in rtype_registry:
|
| 322 |
+
# For some reason the process which created and registered this
|
| 323 |
+
# resource has failed to unregister it. Presumably it has
|
| 324 |
+
# died. We therefore clean it up.
|
| 325 |
+
try:
|
| 326 |
+
_CLEANUP_FUNCS[rtype](name)
|
| 327 |
+
if verbose:
|
| 328 |
+
util.debug(f"[ResourceTracker] unlink {name}")
|
| 329 |
+
except Exception as e:
|
| 330 |
+
warnings.warn(f"resource_tracker: {name}: {e!r}")
|
| 331 |
+
|
| 332 |
+
for rtype, rtype_registry in registry.items():
|
| 333 |
+
if rtype == "folder":
|
| 334 |
+
continue
|
| 335 |
+
else:
|
| 336 |
+
_unlink_resources(rtype_registry, rtype)
|
| 337 |
+
|
| 338 |
+
# The default cleanup routine for folders deletes everything inside
|
| 339 |
+
# those folders recursively, which can include other resources tracked
|
| 340 |
+
# by the resource tracker). To limit the risk of the resource tracker
|
| 341 |
+
# attempting to delete twice a resource (once as part of a tracked
|
| 342 |
+
# folder, and once as a resource), we delete the folders after all
|
| 343 |
+
# other resource types.
|
| 344 |
+
if "folder" in registry:
|
| 345 |
+
_unlink_resources(registry["folder"], "folder")
|
| 346 |
+
|
| 347 |
+
if verbose:
|
| 348 |
+
util.debug("resource tracker shut down")
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
#
|
| 352 |
+
# Start a program with only specified fds kept open
|
| 353 |
+
#
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def spawnv_passfds(path, args, passfds):
|
| 357 |
+
passfds = sorted(passfds)
|
| 358 |
+
if sys.platform != "win32":
|
| 359 |
+
errpipe_read, errpipe_write = os.pipe()
|
| 360 |
+
try:
|
| 361 |
+
from .reduction import _mk_inheritable
|
| 362 |
+
from .fork_exec import fork_exec
|
| 363 |
+
|
| 364 |
+
_pass = [_mk_inheritable(fd) for fd in passfds]
|
| 365 |
+
return fork_exec(args, _pass)
|
| 366 |
+
finally:
|
| 367 |
+
os.close(errpipe_read)
|
| 368 |
+
os.close(errpipe_write)
|
| 369 |
+
else:
|
| 370 |
+
cmd = " ".join(f'"{x}"' for x in args)
|
| 371 |
+
try:
|
| 372 |
+
_, ht, pid, _ = _winapi.CreateProcess(
|
| 373 |
+
path, cmd, None, None, True, 0, None, None, None
|
| 374 |
+
)
|
| 375 |
+
_winapi.CloseHandle(ht)
|
| 376 |
+
except BaseException:
|
| 377 |
+
pass
|
| 378 |
+
return pid
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Prepares and processes the data to setup the new process environment
|
| 3 |
+
#
|
| 4 |
+
# author: Thomas Moreau and Olivier Grisel
|
| 5 |
+
#
|
| 6 |
+
# adapted from multiprocessing/spawn.py (17/02/2017)
|
| 7 |
+
# * Improve logging data
|
| 8 |
+
#
|
| 9 |
+
import os
|
| 10 |
+
import sys
|
| 11 |
+
import runpy
|
| 12 |
+
import textwrap
|
| 13 |
+
import types
|
| 14 |
+
from multiprocessing import process, util
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
if sys.platform != "win32":
|
| 18 |
+
WINEXE = False
|
| 19 |
+
WINSERVICE = False
|
| 20 |
+
else:
|
| 21 |
+
import msvcrt
|
| 22 |
+
from multiprocessing.reduction import duplicate
|
| 23 |
+
|
| 24 |
+
WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False)
|
| 25 |
+
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
| 26 |
+
|
| 27 |
+
if WINSERVICE:
|
| 28 |
+
_python_exe = os.path.join(sys.exec_prefix, "python.exe")
|
| 29 |
+
else:
|
| 30 |
+
_python_exe = sys.executable
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def get_executable():
|
| 34 |
+
return _python_exe
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _check_not_importing_main():
|
| 38 |
+
if getattr(process.current_process(), "_inheriting", False):
|
| 39 |
+
raise RuntimeError(
|
| 40 |
+
textwrap.dedent(
|
| 41 |
+
"""\
|
| 42 |
+
An attempt has been made to start a new process before the
|
| 43 |
+
current process has finished its bootstrapping phase.
|
| 44 |
+
|
| 45 |
+
This probably means that you are not using fork to start your
|
| 46 |
+
child processes and you have forgotten to use the proper idiom
|
| 47 |
+
in the main module:
|
| 48 |
+
|
| 49 |
+
if __name__ == '__main__':
|
| 50 |
+
freeze_support()
|
| 51 |
+
...
|
| 52 |
+
|
| 53 |
+
The "freeze_support()" line can be omitted if the program
|
| 54 |
+
is not going to be frozen to produce an executable."""
|
| 55 |
+
)
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def get_preparation_data(name, init_main_module=True):
|
| 60 |
+
"""Return info about parent needed by child to unpickle process object."""
|
| 61 |
+
_check_not_importing_main()
|
| 62 |
+
d = dict(
|
| 63 |
+
log_to_stderr=util._log_to_stderr,
|
| 64 |
+
authkey=bytes(process.current_process().authkey),
|
| 65 |
+
name=name,
|
| 66 |
+
sys_argv=sys.argv,
|
| 67 |
+
orig_dir=process.ORIGINAL_DIR,
|
| 68 |
+
dir=os.getcwd(),
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# Send sys_path and make sure the current directory will not be changed
|
| 72 |
+
d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path]
|
| 73 |
+
|
| 74 |
+
# Make sure to pass the information if the multiprocessing logger is active
|
| 75 |
+
if util._logger is not None:
|
| 76 |
+
d["log_level"] = util._logger.getEffectiveLevel()
|
| 77 |
+
if util._logger.handlers:
|
| 78 |
+
h = util._logger.handlers[0]
|
| 79 |
+
d["log_fmt"] = h.formatter._fmt
|
| 80 |
+
|
| 81 |
+
# Tell the child how to communicate with the resource_tracker
|
| 82 |
+
from .resource_tracker import _resource_tracker
|
| 83 |
+
|
| 84 |
+
_resource_tracker.ensure_running()
|
| 85 |
+
d["tracker_args"] = {"pid": _resource_tracker._pid}
|
| 86 |
+
if sys.platform == "win32":
|
| 87 |
+
d["tracker_args"]["fh"] = msvcrt.get_osfhandle(_resource_tracker._fd)
|
| 88 |
+
else:
|
| 89 |
+
d["tracker_args"]["fd"] = _resource_tracker._fd
|
| 90 |
+
|
| 91 |
+
if sys.version_info >= (3, 8) and os.name == "posix":
|
| 92 |
+
# joblib/loky#242: allow loky processes to retrieve the resource
|
| 93 |
+
# tracker of their parent in case the child processes depickles
|
| 94 |
+
# shared_memory objects, that are still tracked by multiprocessing's
|
| 95 |
+
# resource_tracker by default.
|
| 96 |
+
# XXX: this is a workaround that may be error prone: in the future, it
|
| 97 |
+
# would be better to have loky subclass multiprocessing's shared_memory
|
| 98 |
+
# to force registration of shared_memory segments via loky's
|
| 99 |
+
# resource_tracker.
|
| 100 |
+
from multiprocessing.resource_tracker import (
|
| 101 |
+
_resource_tracker as mp_resource_tracker,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
# multiprocessing's resource_tracker must be running before loky
|
| 105 |
+
# process is created (othewise the child won't be able to use it if it
|
| 106 |
+
# is created later on)
|
| 107 |
+
mp_resource_tracker.ensure_running()
|
| 108 |
+
d["mp_tracker_args"] = {
|
| 109 |
+
"fd": mp_resource_tracker._fd,
|
| 110 |
+
"pid": mp_resource_tracker._pid,
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
# Figure out whether to initialise main in the subprocess as a module
|
| 114 |
+
# or through direct execution (or to leave it alone entirely)
|
| 115 |
+
if init_main_module:
|
| 116 |
+
main_module = sys.modules["__main__"]
|
| 117 |
+
try:
|
| 118 |
+
main_mod_name = getattr(main_module.__spec__, "name", None)
|
| 119 |
+
except BaseException:
|
| 120 |
+
main_mod_name = None
|
| 121 |
+
if main_mod_name is not None:
|
| 122 |
+
d["init_main_from_name"] = main_mod_name
|
| 123 |
+
elif sys.platform != "win32" or (not WINEXE and not WINSERVICE):
|
| 124 |
+
main_path = getattr(main_module, "__file__", None)
|
| 125 |
+
if main_path is not None:
|
| 126 |
+
if (
|
| 127 |
+
not os.path.isabs(main_path)
|
| 128 |
+
and process.ORIGINAL_DIR is not None
|
| 129 |
+
):
|
| 130 |
+
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
|
| 131 |
+
d["init_main_from_path"] = os.path.normpath(main_path)
|
| 132 |
+
|
| 133 |
+
return d
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
#
|
| 137 |
+
# Prepare current process
|
| 138 |
+
#
|
| 139 |
+
old_main_modules = []
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def prepare(data, parent_sentinel=None):
|
| 143 |
+
"""Try to get current process ready to unpickle process object."""
|
| 144 |
+
if "name" in data:
|
| 145 |
+
process.current_process().name = data["name"]
|
| 146 |
+
|
| 147 |
+
if "authkey" in data:
|
| 148 |
+
process.current_process().authkey = data["authkey"]
|
| 149 |
+
|
| 150 |
+
if "log_to_stderr" in data and data["log_to_stderr"]:
|
| 151 |
+
util.log_to_stderr()
|
| 152 |
+
|
| 153 |
+
if "log_level" in data:
|
| 154 |
+
util.get_logger().setLevel(data["log_level"])
|
| 155 |
+
|
| 156 |
+
if "log_fmt" in data:
|
| 157 |
+
import logging
|
| 158 |
+
|
| 159 |
+
util.get_logger().handlers[0].setFormatter(
|
| 160 |
+
logging.Formatter(data["log_fmt"])
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
if "sys_path" in data:
|
| 164 |
+
sys.path = data["sys_path"]
|
| 165 |
+
|
| 166 |
+
if "sys_argv" in data:
|
| 167 |
+
sys.argv = data["sys_argv"]
|
| 168 |
+
|
| 169 |
+
if "dir" in data:
|
| 170 |
+
os.chdir(data["dir"])
|
| 171 |
+
|
| 172 |
+
if "orig_dir" in data:
|
| 173 |
+
process.ORIGINAL_DIR = data["orig_dir"]
|
| 174 |
+
|
| 175 |
+
if "mp_tracker_args" in data:
|
| 176 |
+
from multiprocessing.resource_tracker import (
|
| 177 |
+
_resource_tracker as mp_resource_tracker,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
mp_resource_tracker._fd = data["mp_tracker_args"]["fd"]
|
| 181 |
+
mp_resource_tracker._pid = data["mp_tracker_args"]["pid"]
|
| 182 |
+
if "tracker_args" in data:
|
| 183 |
+
from .resource_tracker import _resource_tracker
|
| 184 |
+
|
| 185 |
+
_resource_tracker._pid = data["tracker_args"]["pid"]
|
| 186 |
+
if sys.platform == "win32":
|
| 187 |
+
handle = data["tracker_args"]["fh"]
|
| 188 |
+
handle = duplicate(handle, source_process=parent_sentinel)
|
| 189 |
+
_resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
|
| 190 |
+
else:
|
| 191 |
+
_resource_tracker._fd = data["tracker_args"]["fd"]
|
| 192 |
+
|
| 193 |
+
if "init_main_from_name" in data:
|
| 194 |
+
_fixup_main_from_name(data["init_main_from_name"])
|
| 195 |
+
elif "init_main_from_path" in data:
|
| 196 |
+
_fixup_main_from_path(data["init_main_from_path"])
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# Multiprocessing module helpers to fix up the main module in
|
| 200 |
+
# spawned subprocesses
|
| 201 |
+
def _fixup_main_from_name(mod_name):
|
| 202 |
+
# __main__.py files for packages, directories, zip archives, etc, run
|
| 203 |
+
# their "main only" code unconditionally, so we don't even try to
|
| 204 |
+
# populate anything in __main__, nor do we make any changes to
|
| 205 |
+
# __main__ attributes
|
| 206 |
+
current_main = sys.modules["__main__"]
|
| 207 |
+
if mod_name == "__main__" or mod_name.endswith(".__main__"):
|
| 208 |
+
return
|
| 209 |
+
|
| 210 |
+
# If this process was forked, __main__ may already be populated
|
| 211 |
+
if getattr(current_main.__spec__, "name", None) == mod_name:
|
| 212 |
+
return
|
| 213 |
+
|
| 214 |
+
# Otherwise, __main__ may contain some non-main code where we need to
|
| 215 |
+
# support unpickling it properly. We rerun it as __mp_main__ and make
|
| 216 |
+
# the normal __main__ an alias to that
|
| 217 |
+
old_main_modules.append(current_main)
|
| 218 |
+
main_module = types.ModuleType("__mp_main__")
|
| 219 |
+
main_content = runpy.run_module(
|
| 220 |
+
mod_name, run_name="__mp_main__", alter_sys=True
|
| 221 |
+
)
|
| 222 |
+
main_module.__dict__.update(main_content)
|
| 223 |
+
sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def _fixup_main_from_path(main_path):
|
| 227 |
+
# If this process was forked, __main__ may already be populated
|
| 228 |
+
current_main = sys.modules["__main__"]
|
| 229 |
+
|
| 230 |
+
# Unfortunately, the main ipython launch script historically had no
|
| 231 |
+
# "if __name__ == '__main__'" guard, so we work around that
|
| 232 |
+
# by treating it like a __main__.py file
|
| 233 |
+
# See https://github.com/ipython/ipython/issues/4698
|
| 234 |
+
main_name = os.path.splitext(os.path.basename(main_path))[0]
|
| 235 |
+
if main_name == "ipython":
|
| 236 |
+
return
|
| 237 |
+
|
| 238 |
+
# Otherwise, if __file__ already has the setting we expect,
|
| 239 |
+
# there's nothing more to do
|
| 240 |
+
if getattr(current_main, "__file__", None) == main_path:
|
| 241 |
+
return
|
| 242 |
+
|
| 243 |
+
# If the parent process has sent a path through rather than a module
|
| 244 |
+
# name we assume it is an executable script that may contain
|
| 245 |
+
# non-main code that needs to be executed
|
| 246 |
+
old_main_modules.append(current_main)
|
| 247 |
+
main_module = types.ModuleType("__mp_main__")
|
| 248 |
+
main_content = runpy.run_path(main_path, run_name="__mp_main__")
|
| 249 |
+
main_module.__dict__.update(main_content)
|
| 250 |
+
sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py
ADDED
|
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Synchronization primitives based on our SemLock implementation
|
| 3 |
+
#
|
| 4 |
+
# author: Thomas Moreau and Olivier Grisel
|
| 5 |
+
#
|
| 6 |
+
# adapted from multiprocessing/synchronize.py (17/02/2017)
|
| 7 |
+
# * Remove ctx argument for compatibility reason
|
| 8 |
+
# * Registers a cleanup function with the loky resource_tracker to remove the
|
| 9 |
+
# semaphore when the process dies instead.
|
| 10 |
+
#
|
| 11 |
+
# TODO: investigate which Python version is required to be able to use
|
| 12 |
+
# multiprocessing.resource_tracker and therefore multiprocessing.synchronize
|
| 13 |
+
# instead of a loky-specific fork.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import sys
|
| 17 |
+
import tempfile
|
| 18 |
+
import threading
|
| 19 |
+
import _multiprocessing
|
| 20 |
+
from time import time as _time
|
| 21 |
+
from multiprocessing import process, util
|
| 22 |
+
from multiprocessing.context import assert_spawning
|
| 23 |
+
|
| 24 |
+
from . import resource_tracker
|
| 25 |
+
|
| 26 |
+
__all__ = [
|
| 27 |
+
"Lock",
|
| 28 |
+
"RLock",
|
| 29 |
+
"Semaphore",
|
| 30 |
+
"BoundedSemaphore",
|
| 31 |
+
"Condition",
|
| 32 |
+
"Event",
|
| 33 |
+
]
|
| 34 |
+
# Try to import the mp.synchronize module cleanly, if it fails
|
| 35 |
+
# raise ImportError for platforms lacking a working sem_open implementation.
|
| 36 |
+
# See issue 3770
|
| 37 |
+
try:
|
| 38 |
+
from _multiprocessing import SemLock as _SemLock
|
| 39 |
+
from _multiprocessing import sem_unlink
|
| 40 |
+
except ImportError:
|
| 41 |
+
raise ImportError(
|
| 42 |
+
"This platform lacks a functioning sem_open"
|
| 43 |
+
" implementation, therefore, the required"
|
| 44 |
+
" synchronization primitives needed will not"
|
| 45 |
+
" function, see issue 3770."
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
#
|
| 49 |
+
# Constants
|
| 50 |
+
#
|
| 51 |
+
|
| 52 |
+
RECURSIVE_MUTEX, SEMAPHORE = range(2)
|
| 53 |
+
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
#
|
| 57 |
+
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
|
| 58 |
+
#
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class SemLock:
|
| 62 |
+
|
| 63 |
+
_rand = tempfile._RandomNameSequence()
|
| 64 |
+
|
| 65 |
+
def __init__(self, kind, value, maxvalue, name=None):
|
| 66 |
+
# unlink_now is only used on win32 or when we are using fork.
|
| 67 |
+
unlink_now = False
|
| 68 |
+
if name is None:
|
| 69 |
+
# Try to find an unused name for the SemLock instance.
|
| 70 |
+
for _ in range(100):
|
| 71 |
+
try:
|
| 72 |
+
self._semlock = _SemLock(
|
| 73 |
+
kind, value, maxvalue, SemLock._make_name(), unlink_now
|
| 74 |
+
)
|
| 75 |
+
except FileExistsError: # pragma: no cover
|
| 76 |
+
pass
|
| 77 |
+
else:
|
| 78 |
+
break
|
| 79 |
+
else: # pragma: no cover
|
| 80 |
+
raise FileExistsError("cannot find name for semaphore")
|
| 81 |
+
else:
|
| 82 |
+
self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now)
|
| 83 |
+
self.name = name
|
| 84 |
+
util.debug(
|
| 85 |
+
f"created semlock with handle {self._semlock.handle} and name "
|
| 86 |
+
f'"{self.name}"'
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
self._make_methods()
|
| 90 |
+
|
| 91 |
+
def _after_fork(obj):
|
| 92 |
+
obj._semlock._after_fork()
|
| 93 |
+
|
| 94 |
+
util.register_after_fork(self, _after_fork)
|
| 95 |
+
|
| 96 |
+
# When the object is garbage collected or the
|
| 97 |
+
# process shuts down we unlink the semaphore name
|
| 98 |
+
resource_tracker.register(self._semlock.name, "semlock")
|
| 99 |
+
util.Finalize(
|
| 100 |
+
self, SemLock._cleanup, (self._semlock.name,), exitpriority=0
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
@staticmethod
|
| 104 |
+
def _cleanup(name):
|
| 105 |
+
try:
|
| 106 |
+
sem_unlink(name)
|
| 107 |
+
except FileNotFoundError:
|
| 108 |
+
# Already unlinked, possibly by user code: ignore and make sure to
|
| 109 |
+
# unregister the semaphore from the resource tracker.
|
| 110 |
+
pass
|
| 111 |
+
finally:
|
| 112 |
+
resource_tracker.unregister(name, "semlock")
|
| 113 |
+
|
| 114 |
+
def _make_methods(self):
|
| 115 |
+
self.acquire = self._semlock.acquire
|
| 116 |
+
self.release = self._semlock.release
|
| 117 |
+
|
| 118 |
+
def __enter__(self):
|
| 119 |
+
return self._semlock.acquire()
|
| 120 |
+
|
| 121 |
+
def __exit__(self, *args):
|
| 122 |
+
return self._semlock.release()
|
| 123 |
+
|
| 124 |
+
def __getstate__(self):
|
| 125 |
+
assert_spawning(self)
|
| 126 |
+
sl = self._semlock
|
| 127 |
+
h = sl.handle
|
| 128 |
+
return (h, sl.kind, sl.maxvalue, sl.name)
|
| 129 |
+
|
| 130 |
+
def __setstate__(self, state):
|
| 131 |
+
self._semlock = _SemLock._rebuild(*state)
|
| 132 |
+
util.debug(
|
| 133 |
+
f'recreated blocker with handle {state[0]!r} and name "{state[3]}"'
|
| 134 |
+
)
|
| 135 |
+
self._make_methods()
|
| 136 |
+
|
| 137 |
+
@staticmethod
|
| 138 |
+
def _make_name():
|
| 139 |
+
# OSX does not support long names for semaphores
|
| 140 |
+
return f"/loky-{os.getpid()}-{next(SemLock._rand)}"
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
#
|
| 144 |
+
# Semaphore
|
| 145 |
+
#
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class Semaphore(SemLock):
|
| 149 |
+
def __init__(self, value=1):
|
| 150 |
+
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
|
| 151 |
+
|
| 152 |
+
def get_value(self):
|
| 153 |
+
if sys.platform == "darwin":
|
| 154 |
+
raise NotImplementedError("OSX does not implement sem_getvalue")
|
| 155 |
+
return self._semlock._get_value()
|
| 156 |
+
|
| 157 |
+
def __repr__(self):
|
| 158 |
+
try:
|
| 159 |
+
value = self._semlock._get_value()
|
| 160 |
+
except Exception:
|
| 161 |
+
value = "unknown"
|
| 162 |
+
return f"<{self.__class__.__name__}(value={value})>"
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
#
|
| 166 |
+
# Bounded semaphore
|
| 167 |
+
#
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class BoundedSemaphore(Semaphore):
|
| 171 |
+
def __init__(self, value=1):
|
| 172 |
+
SemLock.__init__(self, SEMAPHORE, value, value)
|
| 173 |
+
|
| 174 |
+
def __repr__(self):
|
| 175 |
+
try:
|
| 176 |
+
value = self._semlock._get_value()
|
| 177 |
+
except Exception:
|
| 178 |
+
value = "unknown"
|
| 179 |
+
return (
|
| 180 |
+
f"<{self.__class__.__name__}(value={value}, "
|
| 181 |
+
f"maxvalue={self._semlock.maxvalue})>"
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
#
|
| 186 |
+
# Non-recursive lock
|
| 187 |
+
#
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class Lock(SemLock):
|
| 191 |
+
def __init__(self):
|
| 192 |
+
super().__init__(SEMAPHORE, 1, 1)
|
| 193 |
+
|
| 194 |
+
def __repr__(self):
|
| 195 |
+
try:
|
| 196 |
+
if self._semlock._is_mine():
|
| 197 |
+
name = process.current_process().name
|
| 198 |
+
if threading.current_thread().name != "MainThread":
|
| 199 |
+
name = f"{name}|{threading.current_thread().name}"
|
| 200 |
+
elif self._semlock._get_value() == 1:
|
| 201 |
+
name = "None"
|
| 202 |
+
elif self._semlock._count() > 0:
|
| 203 |
+
name = "SomeOtherThread"
|
| 204 |
+
else:
|
| 205 |
+
name = "SomeOtherProcess"
|
| 206 |
+
except Exception:
|
| 207 |
+
name = "unknown"
|
| 208 |
+
return f"<{self.__class__.__name__}(owner={name})>"
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
#
|
| 212 |
+
# Recursive lock
|
| 213 |
+
#
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
class RLock(SemLock):
|
| 217 |
+
def __init__(self):
|
| 218 |
+
super().__init__(RECURSIVE_MUTEX, 1, 1)
|
| 219 |
+
|
| 220 |
+
def __repr__(self):
|
| 221 |
+
try:
|
| 222 |
+
if self._semlock._is_mine():
|
| 223 |
+
name = process.current_process().name
|
| 224 |
+
if threading.current_thread().name != "MainThread":
|
| 225 |
+
name = f"{name}|{threading.current_thread().name}"
|
| 226 |
+
count = self._semlock._count()
|
| 227 |
+
elif self._semlock._get_value() == 1:
|
| 228 |
+
name, count = "None", 0
|
| 229 |
+
elif self._semlock._count() > 0:
|
| 230 |
+
name, count = "SomeOtherThread", "nonzero"
|
| 231 |
+
else:
|
| 232 |
+
name, count = "SomeOtherProcess", "nonzero"
|
| 233 |
+
except Exception:
|
| 234 |
+
name, count = "unknown", "unknown"
|
| 235 |
+
return f"<{self.__class__.__name__}({name}, {count})>"
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
#
|
| 239 |
+
# Condition variable
|
| 240 |
+
#
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class Condition:
|
| 244 |
+
def __init__(self, lock=None):
|
| 245 |
+
self._lock = lock or RLock()
|
| 246 |
+
self._sleeping_count = Semaphore(0)
|
| 247 |
+
self._woken_count = Semaphore(0)
|
| 248 |
+
self._wait_semaphore = Semaphore(0)
|
| 249 |
+
self._make_methods()
|
| 250 |
+
|
| 251 |
+
def __getstate__(self):
|
| 252 |
+
assert_spawning(self)
|
| 253 |
+
return (
|
| 254 |
+
self._lock,
|
| 255 |
+
self._sleeping_count,
|
| 256 |
+
self._woken_count,
|
| 257 |
+
self._wait_semaphore,
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
def __setstate__(self, state):
|
| 261 |
+
(
|
| 262 |
+
self._lock,
|
| 263 |
+
self._sleeping_count,
|
| 264 |
+
self._woken_count,
|
| 265 |
+
self._wait_semaphore,
|
| 266 |
+
) = state
|
| 267 |
+
self._make_methods()
|
| 268 |
+
|
| 269 |
+
def __enter__(self):
|
| 270 |
+
return self._lock.__enter__()
|
| 271 |
+
|
| 272 |
+
def __exit__(self, *args):
|
| 273 |
+
return self._lock.__exit__(*args)
|
| 274 |
+
|
| 275 |
+
def _make_methods(self):
|
| 276 |
+
self.acquire = self._lock.acquire
|
| 277 |
+
self.release = self._lock.release
|
| 278 |
+
|
| 279 |
+
def __repr__(self):
|
| 280 |
+
try:
|
| 281 |
+
num_waiters = (
|
| 282 |
+
self._sleeping_count._semlock._get_value()
|
| 283 |
+
- self._woken_count._semlock._get_value()
|
| 284 |
+
)
|
| 285 |
+
except Exception:
|
| 286 |
+
num_waiters = "unknown"
|
| 287 |
+
return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>"
|
| 288 |
+
|
| 289 |
+
def wait(self, timeout=None):
|
| 290 |
+
assert (
|
| 291 |
+
self._lock._semlock._is_mine()
|
| 292 |
+
), "must acquire() condition before using wait()"
|
| 293 |
+
|
| 294 |
+
# indicate that this thread is going to sleep
|
| 295 |
+
self._sleeping_count.release()
|
| 296 |
+
|
| 297 |
+
# release lock
|
| 298 |
+
count = self._lock._semlock._count()
|
| 299 |
+
for _ in range(count):
|
| 300 |
+
self._lock.release()
|
| 301 |
+
|
| 302 |
+
try:
|
| 303 |
+
# wait for notification or timeout
|
| 304 |
+
return self._wait_semaphore.acquire(True, timeout)
|
| 305 |
+
finally:
|
| 306 |
+
# indicate that this thread has woken
|
| 307 |
+
self._woken_count.release()
|
| 308 |
+
|
| 309 |
+
# reacquire lock
|
| 310 |
+
for _ in range(count):
|
| 311 |
+
self._lock.acquire()
|
| 312 |
+
|
| 313 |
+
def notify(self):
|
| 314 |
+
assert self._lock._semlock._is_mine(), "lock is not owned"
|
| 315 |
+
assert not self._wait_semaphore.acquire(False)
|
| 316 |
+
|
| 317 |
+
# to take account of timeouts since last notify() we subtract
|
| 318 |
+
# woken_count from sleeping_count and rezero woken_count
|
| 319 |
+
while self._woken_count.acquire(False):
|
| 320 |
+
res = self._sleeping_count.acquire(False)
|
| 321 |
+
assert res
|
| 322 |
+
|
| 323 |
+
if self._sleeping_count.acquire(False): # try grabbing a sleeper
|
| 324 |
+
self._wait_semaphore.release() # wake up one sleeper
|
| 325 |
+
self._woken_count.acquire() # wait for the sleeper to wake
|
| 326 |
+
|
| 327 |
+
# rezero _wait_semaphore in case a timeout just happened
|
| 328 |
+
self._wait_semaphore.acquire(False)
|
| 329 |
+
|
| 330 |
+
def notify_all(self):
|
| 331 |
+
assert self._lock._semlock._is_mine(), "lock is not owned"
|
| 332 |
+
assert not self._wait_semaphore.acquire(False)
|
| 333 |
+
|
| 334 |
+
# to take account of timeouts since last notify*() we subtract
|
| 335 |
+
# woken_count from sleeping_count and rezero woken_count
|
| 336 |
+
while self._woken_count.acquire(False):
|
| 337 |
+
res = self._sleeping_count.acquire(False)
|
| 338 |
+
assert res
|
| 339 |
+
|
| 340 |
+
sleepers = 0
|
| 341 |
+
while self._sleeping_count.acquire(False):
|
| 342 |
+
self._wait_semaphore.release() # wake up one sleeper
|
| 343 |
+
sleepers += 1
|
| 344 |
+
|
| 345 |
+
if sleepers:
|
| 346 |
+
for _ in range(sleepers):
|
| 347 |
+
self._woken_count.acquire() # wait for a sleeper to wake
|
| 348 |
+
|
| 349 |
+
# rezero wait_semaphore in case some timeouts just happened
|
| 350 |
+
while self._wait_semaphore.acquire(False):
|
| 351 |
+
pass
|
| 352 |
+
|
| 353 |
+
def wait_for(self, predicate, timeout=None):
|
| 354 |
+
result = predicate()
|
| 355 |
+
if result:
|
| 356 |
+
return result
|
| 357 |
+
if timeout is not None:
|
| 358 |
+
endtime = _time() + timeout
|
| 359 |
+
else:
|
| 360 |
+
endtime = None
|
| 361 |
+
waittime = None
|
| 362 |
+
while not result:
|
| 363 |
+
if endtime is not None:
|
| 364 |
+
waittime = endtime - _time()
|
| 365 |
+
if waittime <= 0:
|
| 366 |
+
break
|
| 367 |
+
self.wait(waittime)
|
| 368 |
+
result = predicate()
|
| 369 |
+
return result
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
#
|
| 373 |
+
# Event
|
| 374 |
+
#
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
class Event:
|
| 378 |
+
def __init__(self):
|
| 379 |
+
self._cond = Condition(Lock())
|
| 380 |
+
self._flag = Semaphore(0)
|
| 381 |
+
|
| 382 |
+
def is_set(self):
|
| 383 |
+
with self._cond:
|
| 384 |
+
if self._flag.acquire(False):
|
| 385 |
+
self._flag.release()
|
| 386 |
+
return True
|
| 387 |
+
return False
|
| 388 |
+
|
| 389 |
+
def set(self):
|
| 390 |
+
with self._cond:
|
| 391 |
+
self._flag.acquire(False)
|
| 392 |
+
self._flag.release()
|
| 393 |
+
self._cond.notify_all()
|
| 394 |
+
|
| 395 |
+
def clear(self):
|
| 396 |
+
with self._cond:
|
| 397 |
+
self._flag.acquire(False)
|
| 398 |
+
|
| 399 |
+
def wait(self, timeout=None):
|
| 400 |
+
with self._cond:
|
| 401 |
+
if self._flag.acquire(False):
|
| 402 |
+
self._flag.release()
|
| 403 |
+
else:
|
| 404 |
+
self._cond.wait(timeout)
|
| 405 |
+
|
| 406 |
+
if self._flag.acquire(False):
|
| 407 |
+
self._flag.release()
|
| 408 |
+
return True
|
| 409 |
+
return False
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from functools import partial
|
| 3 |
+
from joblib.externals.cloudpickle import dumps, loads
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
WRAP_CACHE = {}
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class CloudpickledObjectWrapper:
|
| 10 |
+
def __init__(self, obj, keep_wrapper=False):
|
| 11 |
+
self._obj = obj
|
| 12 |
+
self._keep_wrapper = keep_wrapper
|
| 13 |
+
|
| 14 |
+
def __reduce__(self):
|
| 15 |
+
_pickled_object = dumps(self._obj)
|
| 16 |
+
if not self._keep_wrapper:
|
| 17 |
+
return loads, (_pickled_object,)
|
| 18 |
+
|
| 19 |
+
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
|
| 20 |
+
|
| 21 |
+
def __getattr__(self, attr):
|
| 22 |
+
# Ensure that the wrapped object can be used seemlessly as the
|
| 23 |
+
# previous object.
|
| 24 |
+
if attr not in ["_obj", "_keep_wrapper"]:
|
| 25 |
+
return getattr(self._obj, attr)
|
| 26 |
+
return getattr(self, attr)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Make sure the wrapped object conserves the callable property
|
| 30 |
+
class CallableObjectWrapper(CloudpickledObjectWrapper):
|
| 31 |
+
def __call__(self, *args, **kwargs):
|
| 32 |
+
return self._obj(*args, **kwargs)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _wrap_non_picklable_objects(obj, keep_wrapper):
|
| 36 |
+
if callable(obj):
|
| 37 |
+
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
|
| 38 |
+
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _reconstruct_wrapper(_pickled_object, keep_wrapper):
|
| 42 |
+
obj = loads(_pickled_object)
|
| 43 |
+
return _wrap_non_picklable_objects(obj, keep_wrapper)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _wrap_objects_when_needed(obj):
|
| 47 |
+
# Function to introspect an object and decide if it should be wrapped or
|
| 48 |
+
# not.
|
| 49 |
+
need_wrap = "__main__" in getattr(obj, "__module__", "")
|
| 50 |
+
if isinstance(obj, partial):
|
| 51 |
+
return partial(
|
| 52 |
+
_wrap_objects_when_needed(obj.func),
|
| 53 |
+
*[_wrap_objects_when_needed(a) for a in obj.args],
|
| 54 |
+
**{
|
| 55 |
+
k: _wrap_objects_when_needed(v)
|
| 56 |
+
for k, v in obj.keywords.items()
|
| 57 |
+
}
|
| 58 |
+
)
|
| 59 |
+
if callable(obj):
|
| 60 |
+
# Need wrap if the object is a function defined in a local scope of
|
| 61 |
+
# another function.
|
| 62 |
+
func_code = getattr(obj, "__code__", "")
|
| 63 |
+
need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
|
| 64 |
+
|
| 65 |
+
# Need wrap if the obj is a lambda expression
|
| 66 |
+
func_name = getattr(obj, "__name__", "")
|
| 67 |
+
need_wrap |= "<lambda>" in func_name
|
| 68 |
+
|
| 69 |
+
if not need_wrap:
|
| 70 |
+
return obj
|
| 71 |
+
|
| 72 |
+
wrapped_obj = WRAP_CACHE.get(obj)
|
| 73 |
+
if wrapped_obj is None:
|
| 74 |
+
wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
|
| 75 |
+
WRAP_CACHE[obj] = wrapped_obj
|
| 76 |
+
return wrapped_obj
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def wrap_non_picklable_objects(obj, keep_wrapper=True):
|
| 80 |
+
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
|
| 81 |
+
|
| 82 |
+
Note that this wrapper tends to slow down the serialization process as it
|
| 83 |
+
is done with cloudpickle which is typically slower compared to pickle. The
|
| 84 |
+
proper way to solve serialization issues is to avoid defining functions and
|
| 85 |
+
objects in the main scripts and to implement __reduce__ functions for
|
| 86 |
+
complex classes.
|
| 87 |
+
"""
|
| 88 |
+
# If obj is a class, create a CloudpickledClassWrapper which instantiates
|
| 89 |
+
# the object internally and wrap it directly in a CloudpickledObjectWrapper
|
| 90 |
+
if inspect.isclass(obj):
|
| 91 |
+
|
| 92 |
+
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
|
| 93 |
+
def __init__(self, *args, **kwargs):
|
| 94 |
+
self._obj = obj(*args, **kwargs)
|
| 95 |
+
self._keep_wrapper = keep_wrapper
|
| 96 |
+
|
| 97 |
+
CloudpickledClassWrapper.__name__ = obj.__name__
|
| 98 |
+
return CloudpickledClassWrapper
|
| 99 |
+
|
| 100 |
+
# If obj is an instance of a class, just wrap it in a regular
|
| 101 |
+
# CloudpickledObjectWrapper
|
| 102 |
+
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/initializers.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def _viztracer_init(init_kwargs):
|
| 5 |
+
"""Initialize viztracer's profiler in worker processes"""
|
| 6 |
+
from viztracer import VizTracer
|
| 7 |
+
|
| 8 |
+
tracer = VizTracer(**init_kwargs)
|
| 9 |
+
tracer.register_exit()
|
| 10 |
+
tracer.start()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _make_viztracer_initializer_and_initargs():
|
| 14 |
+
try:
|
| 15 |
+
import viztracer
|
| 16 |
+
|
| 17 |
+
tracer = viztracer.get_tracer()
|
| 18 |
+
if tracer is not None and getattr(tracer, "enable", False):
|
| 19 |
+
# Profiler is active: introspect its configuration to
|
| 20 |
+
# initialize the workers with the same configuration.
|
| 21 |
+
return _viztracer_init, (tracer.init_kwargs,)
|
| 22 |
+
except ImportError:
|
| 23 |
+
# viztracer is not installed: nothing to do
|
| 24 |
+
pass
|
| 25 |
+
except Exception as e:
|
| 26 |
+
# In case viztracer's API evolve, we do not want to crash loky but
|
| 27 |
+
# we want to know about it to be able to update loky.
|
| 28 |
+
warnings.warn(f"Unable to introspect viztracer state: {e}")
|
| 29 |
+
return None, ()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class _ChainedInitializer:
|
| 33 |
+
"""Compound worker initializer
|
| 34 |
+
|
| 35 |
+
This is meant to be used in conjunction with _chain_initializers to
|
| 36 |
+
produce the necessary chained_args list to be passed to __call__.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, initializers):
|
| 40 |
+
self._initializers = initializers
|
| 41 |
+
|
| 42 |
+
def __call__(self, *chained_args):
|
| 43 |
+
for initializer, args in zip(self._initializers, chained_args):
|
| 44 |
+
initializer(*args)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def _chain_initializers(initializer_and_args):
|
| 48 |
+
"""Convenience helper to combine a sequence of initializers.
|
| 49 |
+
|
| 50 |
+
If some initializers are None, they are filtered out.
|
| 51 |
+
"""
|
| 52 |
+
filtered_initializers = []
|
| 53 |
+
filtered_initargs = []
|
| 54 |
+
for initializer, initargs in initializer_and_args:
|
| 55 |
+
if initializer is not None:
|
| 56 |
+
filtered_initializers.append(initializer)
|
| 57 |
+
filtered_initargs.append(initargs)
|
| 58 |
+
|
| 59 |
+
if not filtered_initializers:
|
| 60 |
+
return None, ()
|
| 61 |
+
elif len(filtered_initializers) == 1:
|
| 62 |
+
return filtered_initializers[0], filtered_initargs[0]
|
| 63 |
+
else:
|
| 64 |
+
return _ChainedInitializer(filtered_initializers), filtered_initargs
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _prepare_initializer(initializer, initargs):
|
| 68 |
+
if initializer is not None and not callable(initializer):
|
| 69 |
+
raise TypeError(
|
| 70 |
+
f"initializer must be a callable, got: {initializer!r}"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# Introspect runtime to determine if we need to propagate the viztracer
|
| 74 |
+
# profiler information to the workers:
|
| 75 |
+
return _chain_initializers(
|
| 76 |
+
[
|
| 77 |
+
(initializer, initargs),
|
| 78 |
+
_make_viztracer_initializer_and_initargs(),
|
| 79 |
+
]
|
| 80 |
+
)
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py
ADDED
|
@@ -0,0 +1,1314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Re-implementation of the ProcessPoolExecutor more robust to faults
|
| 3 |
+
#
|
| 4 |
+
# author: Thomas Moreau and Olivier Grisel
|
| 5 |
+
#
|
| 6 |
+
# adapted from concurrent/futures/process_pool_executor.py (17/02/2017)
|
| 7 |
+
# * Add an extra management thread to detect executor_manager_thread failures,
|
| 8 |
+
# * Improve the shutdown process to avoid deadlocks,
|
| 9 |
+
# * Add timeout for workers,
|
| 10 |
+
# * More robust pickling process.
|
| 11 |
+
#
|
| 12 |
+
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
| 13 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 14 |
+
|
| 15 |
+
"""Implements ProcessPoolExecutor.
|
| 16 |
+
|
| 17 |
+
The follow diagram and text describe the data-flow through the system:
|
| 18 |
+
|
| 19 |
+
|======================= In-process =====================|== Out-of-process ==|
|
| 20 |
+
|
| 21 |
+
+----------+ +----------+ +--------+ +-----------+ +---------+
|
| 22 |
+
| | => | Work Ids | | | | Call Q | | Process |
|
| 23 |
+
| | +----------+ | | +-----------+ | Pool |
|
| 24 |
+
| | | ... | | | | ... | +---------+
|
| 25 |
+
| | | 6 | => | | => | 5, call() | => | |
|
| 26 |
+
| | | 7 | | | | ... | | |
|
| 27 |
+
| Process | | ... | | Local | +-----------+ | Process |
|
| 28 |
+
| Pool | +----------+ | Worker | | #1..n |
|
| 29 |
+
| Executor | | Thread | | |
|
| 30 |
+
| | +----------- + | | +-----------+ | |
|
| 31 |
+
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
|
| 32 |
+
| | +------------+ | | +-----------+ | |
|
| 33 |
+
| | | 6: call() | | | | ... | | |
|
| 34 |
+
| | | future | +--------+ | 4, result | | |
|
| 35 |
+
| | | ... | | 3, except | | |
|
| 36 |
+
+----------+ +------------+ +-----------+ +---------+
|
| 37 |
+
|
| 38 |
+
Executor.submit() called:
|
| 39 |
+
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
|
| 40 |
+
- adds the id of the _WorkItem to the "Work Ids" queue
|
| 41 |
+
|
| 42 |
+
Local worker thread:
|
| 43 |
+
- reads work ids from the "Work Ids" queue and looks up the corresponding
|
| 44 |
+
WorkItem from the "Work Items" dict: if the work item has been cancelled then
|
| 45 |
+
it is simply removed from the dict, otherwise it is repackaged as a
|
| 46 |
+
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
|
| 47 |
+
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
|
| 48 |
+
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
|
| 49 |
+
- reads _ResultItems from "Result Q", updates the future stored in the
|
| 50 |
+
"Work Items" dict and deletes the dict entry
|
| 51 |
+
|
| 52 |
+
Process #1..n:
|
| 53 |
+
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
|
| 54 |
+
_ResultItems in "Result Q"
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
__author__ = "Thomas Moreau (thomas.moreau.2010@gmail.com)"
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
import os
|
| 62 |
+
import gc
|
| 63 |
+
import sys
|
| 64 |
+
import queue
|
| 65 |
+
import struct
|
| 66 |
+
import weakref
|
| 67 |
+
import warnings
|
| 68 |
+
import itertools
|
| 69 |
+
import traceback
|
| 70 |
+
import threading
|
| 71 |
+
from time import time, sleep
|
| 72 |
+
import multiprocessing as mp
|
| 73 |
+
from functools import partial
|
| 74 |
+
from pickle import PicklingError
|
| 75 |
+
from concurrent.futures import Executor
|
| 76 |
+
from concurrent.futures._base import LOGGER
|
| 77 |
+
from concurrent.futures.process import BrokenProcessPool as _BPPException
|
| 78 |
+
from multiprocessing.connection import wait
|
| 79 |
+
|
| 80 |
+
from ._base import Future
|
| 81 |
+
from .backend import get_context
|
| 82 |
+
from .backend.context import cpu_count, _MAX_WINDOWS_WORKERS
|
| 83 |
+
from .backend.queues import Queue, SimpleQueue
|
| 84 |
+
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
|
| 85 |
+
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
|
| 86 |
+
from .initializers import _prepare_initializer
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# Mechanism to prevent infinite process spawning. When a worker of a
|
| 90 |
+
# ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new
|
| 91 |
+
# Executor, a LokyRecursionError is raised
|
| 92 |
+
MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10))
|
| 93 |
+
_CURRENT_DEPTH = 0
|
| 94 |
+
|
| 95 |
+
# Minimum time interval between two consecutive memory leak protection checks.
|
| 96 |
+
_MEMORY_LEAK_CHECK_DELAY = 1.0
|
| 97 |
+
|
| 98 |
+
# Number of bytes of memory usage allowed over the reference process size.
|
| 99 |
+
_MAX_MEMORY_LEAK_SIZE = int(3e8)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
try:
|
| 103 |
+
from psutil import Process
|
| 104 |
+
|
| 105 |
+
_USE_PSUTIL = True
|
| 106 |
+
|
| 107 |
+
def _get_memory_usage(pid, force_gc=False):
|
| 108 |
+
if force_gc:
|
| 109 |
+
gc.collect()
|
| 110 |
+
|
| 111 |
+
mem_size = Process(pid).memory_info().rss
|
| 112 |
+
mp.util.debug(f"psutil return memory size: {mem_size}")
|
| 113 |
+
return mem_size
|
| 114 |
+
|
| 115 |
+
except ImportError:
|
| 116 |
+
_USE_PSUTIL = False
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class _ThreadWakeup:
|
| 120 |
+
def __init__(self):
|
| 121 |
+
self._closed = False
|
| 122 |
+
self._reader, self._writer = mp.Pipe(duplex=False)
|
| 123 |
+
|
| 124 |
+
def close(self):
|
| 125 |
+
if not self._closed:
|
| 126 |
+
self._closed = True
|
| 127 |
+
self._writer.close()
|
| 128 |
+
self._reader.close()
|
| 129 |
+
|
| 130 |
+
def wakeup(self):
|
| 131 |
+
if not self._closed:
|
| 132 |
+
self._writer.send_bytes(b"")
|
| 133 |
+
|
| 134 |
+
def clear(self):
|
| 135 |
+
if not self._closed:
|
| 136 |
+
while self._reader.poll():
|
| 137 |
+
self._reader.recv_bytes()
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class _ExecutorFlags:
|
| 141 |
+
"""necessary references to maintain executor states without preventing gc
|
| 142 |
+
|
| 143 |
+
It permits to keep the information needed by executor_manager_thread
|
| 144 |
+
and crash_detection_thread to maintain the pool without preventing the
|
| 145 |
+
garbage collection of unreferenced executors.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
def __init__(self, shutdown_lock):
|
| 149 |
+
|
| 150 |
+
self.shutdown = False
|
| 151 |
+
self.broken = None
|
| 152 |
+
self.kill_workers = False
|
| 153 |
+
self.shutdown_lock = shutdown_lock
|
| 154 |
+
|
| 155 |
+
def flag_as_shutting_down(self, kill_workers=None):
|
| 156 |
+
with self.shutdown_lock:
|
| 157 |
+
self.shutdown = True
|
| 158 |
+
if kill_workers is not None:
|
| 159 |
+
self.kill_workers = kill_workers
|
| 160 |
+
|
| 161 |
+
def flag_as_broken(self, broken):
|
| 162 |
+
with self.shutdown_lock:
|
| 163 |
+
self.shutdown = True
|
| 164 |
+
self.broken = broken
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
# Prior to 3.9, executor_manager_thread is created as daemon thread. This means
|
| 168 |
+
# that it is not joined automatically when the interpreter is shutting down.
|
| 169 |
+
# To work around this problem, an exit handler is installed to tell the
|
| 170 |
+
# thread to exit when the interpreter is shutting down and then waits until
|
| 171 |
+
# it finishes. The thread needs to be daemonized because the atexit hooks are
|
| 172 |
+
# called after all non daemonized threads are joined.
|
| 173 |
+
#
|
| 174 |
+
# Starting 3.9, there exists a specific atexit hook to be called before joining
|
| 175 |
+
# the threads so the executor_manager_thread does not need to be daemonized
|
| 176 |
+
# anymore.
|
| 177 |
+
#
|
| 178 |
+
# The atexit hooks are registered when starting the first ProcessPoolExecutor
|
| 179 |
+
# to avoid import having an effect on the interpreter.
|
| 180 |
+
|
| 181 |
+
_global_shutdown = False
|
| 182 |
+
_global_shutdown_lock = threading.Lock()
|
| 183 |
+
_threads_wakeups = weakref.WeakKeyDictionary()
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def _python_exit():
|
| 187 |
+
global _global_shutdown
|
| 188 |
+
_global_shutdown = True
|
| 189 |
+
|
| 190 |
+
# Materialize the list of items to avoid error due to iterating over
|
| 191 |
+
# changing size dictionary.
|
| 192 |
+
items = list(_threads_wakeups.items())
|
| 193 |
+
if len(items) > 0:
|
| 194 |
+
mp.util.debug(
|
| 195 |
+
"Interpreter shutting down. Waking up {len(items)}"
|
| 196 |
+
f"executor_manager_thread:\n{items}"
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Wake up the executor_manager_thread's so they can detect the interpreter
|
| 200 |
+
# is shutting down and exit.
|
| 201 |
+
for _, (shutdown_lock, thread_wakeup) in items:
|
| 202 |
+
with shutdown_lock:
|
| 203 |
+
thread_wakeup.wakeup()
|
| 204 |
+
|
| 205 |
+
# Collect the executor_manager_thread's to make sure we exit cleanly.
|
| 206 |
+
for thread, _ in items:
|
| 207 |
+
# This locks is to prevent situations where an executor is gc'ed in one
|
| 208 |
+
# thread while the atexit finalizer is running in another thread. This
|
| 209 |
+
# can happen when joblib is used in pypy for instance.
|
| 210 |
+
with _global_shutdown_lock:
|
| 211 |
+
thread.join()
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# With the fork context, _thread_wakeups is propagated to children.
|
| 215 |
+
# Clear it after fork to avoid some situation that can cause some
|
| 216 |
+
# freeze when joining the workers.
|
| 217 |
+
mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear())
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
# Module variable to register the at_exit call
|
| 221 |
+
process_pool_executor_at_exit = None
|
| 222 |
+
|
| 223 |
+
# Controls how many more calls than processes will be queued in the call queue.
|
| 224 |
+
# A smaller number will mean that processes spend more time idle waiting for
|
| 225 |
+
# work while a larger number will make Future.cancel() succeed less frequently
|
| 226 |
+
# (Futures in the call queue cannot be cancelled).
|
| 227 |
+
EXTRA_QUEUED_CALLS = 1
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class _RemoteTraceback(Exception):
|
| 231 |
+
"""Embed stringification of remote traceback in local traceback"""
|
| 232 |
+
|
| 233 |
+
def __init__(self, tb=None):
|
| 234 |
+
self.tb = f'\n"""\n{tb}"""'
|
| 235 |
+
|
| 236 |
+
def __str__(self):
|
| 237 |
+
return self.tb
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
# Do not inherit from BaseException to mirror
|
| 241 |
+
# concurrent.futures.process._ExceptionWithTraceback
|
| 242 |
+
class _ExceptionWithTraceback:
|
| 243 |
+
def __init__(self, exc):
|
| 244 |
+
tb = getattr(exc, "__traceback__", None)
|
| 245 |
+
if tb is None:
|
| 246 |
+
_, _, tb = sys.exc_info()
|
| 247 |
+
tb = traceback.format_exception(type(exc), exc, tb)
|
| 248 |
+
tb = "".join(tb)
|
| 249 |
+
self.exc = exc
|
| 250 |
+
self.tb = tb
|
| 251 |
+
|
| 252 |
+
def __reduce__(self):
|
| 253 |
+
return _rebuild_exc, (self.exc, self.tb)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def _rebuild_exc(exc, tb):
|
| 257 |
+
exc.__cause__ = _RemoteTraceback(tb)
|
| 258 |
+
return exc
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
class _WorkItem:
|
| 262 |
+
|
| 263 |
+
__slots__ = ["future", "fn", "args", "kwargs"]
|
| 264 |
+
|
| 265 |
+
def __init__(self, future, fn, args, kwargs):
|
| 266 |
+
self.future = future
|
| 267 |
+
self.fn = fn
|
| 268 |
+
self.args = args
|
| 269 |
+
self.kwargs = kwargs
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class _ResultItem:
|
| 273 |
+
def __init__(self, work_id, exception=None, result=None):
|
| 274 |
+
self.work_id = work_id
|
| 275 |
+
self.exception = exception
|
| 276 |
+
self.result = result
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class _CallItem:
|
| 280 |
+
def __init__(self, work_id, fn, args, kwargs):
|
| 281 |
+
self.work_id = work_id
|
| 282 |
+
self.fn = fn
|
| 283 |
+
self.args = args
|
| 284 |
+
self.kwargs = kwargs
|
| 285 |
+
|
| 286 |
+
# Store the current loky_pickler so it is correctly set in the worker
|
| 287 |
+
self.loky_pickler = get_loky_pickler_name()
|
| 288 |
+
|
| 289 |
+
def __call__(self):
|
| 290 |
+
set_loky_pickler(self.loky_pickler)
|
| 291 |
+
return self.fn(*self.args, **self.kwargs)
|
| 292 |
+
|
| 293 |
+
def __repr__(self):
|
| 294 |
+
return (
|
| 295 |
+
f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})"
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
class _SafeQueue(Queue):
|
| 300 |
+
"""Safe Queue set exception to the future object linked to a job"""
|
| 301 |
+
|
| 302 |
+
def __init__(
|
| 303 |
+
self,
|
| 304 |
+
max_size=0,
|
| 305 |
+
ctx=None,
|
| 306 |
+
pending_work_items=None,
|
| 307 |
+
running_work_items=None,
|
| 308 |
+
thread_wakeup=None,
|
| 309 |
+
reducers=None,
|
| 310 |
+
):
|
| 311 |
+
self.thread_wakeup = thread_wakeup
|
| 312 |
+
self.pending_work_items = pending_work_items
|
| 313 |
+
self.running_work_items = running_work_items
|
| 314 |
+
super().__init__(max_size, reducers=reducers, ctx=ctx)
|
| 315 |
+
|
| 316 |
+
def _on_queue_feeder_error(self, e, obj):
|
| 317 |
+
if isinstance(obj, _CallItem):
|
| 318 |
+
# format traceback only works on python3
|
| 319 |
+
if isinstance(e, struct.error):
|
| 320 |
+
raised_error = RuntimeError(
|
| 321 |
+
"The task could not be sent to the workers as it is too "
|
| 322 |
+
"large for `send_bytes`."
|
| 323 |
+
)
|
| 324 |
+
else:
|
| 325 |
+
raised_error = PicklingError(
|
| 326 |
+
"Could not pickle the task to send it to the workers."
|
| 327 |
+
)
|
| 328 |
+
tb = traceback.format_exception(
|
| 329 |
+
type(e), e, getattr(e, "__traceback__", None)
|
| 330 |
+
)
|
| 331 |
+
raised_error.__cause__ = _RemoteTraceback("".join(tb))
|
| 332 |
+
work_item = self.pending_work_items.pop(obj.work_id, None)
|
| 333 |
+
self.running_work_items.remove(obj.work_id)
|
| 334 |
+
# work_item can be None if another process terminated. In this
|
| 335 |
+
# case, the executor_manager_thread fails all work_items with
|
| 336 |
+
# BrokenProcessPool
|
| 337 |
+
if work_item is not None:
|
| 338 |
+
work_item.future.set_exception(raised_error)
|
| 339 |
+
del work_item
|
| 340 |
+
self.thread_wakeup.wakeup()
|
| 341 |
+
else:
|
| 342 |
+
super()._on_queue_feeder_error(e, obj)
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def _get_chunks(chunksize, *iterables):
|
| 346 |
+
"""Iterates over zip()ed iterables in chunks."""
|
| 347 |
+
it = zip(*iterables)
|
| 348 |
+
while True:
|
| 349 |
+
chunk = tuple(itertools.islice(it, chunksize))
|
| 350 |
+
if not chunk:
|
| 351 |
+
return
|
| 352 |
+
yield chunk
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def _process_chunk(fn, chunk):
|
| 356 |
+
"""Processes a chunk of an iterable passed to map.
|
| 357 |
+
|
| 358 |
+
Runs the function passed to map() on a chunk of the
|
| 359 |
+
iterable passed to map.
|
| 360 |
+
|
| 361 |
+
This function is run in a separate process.
|
| 362 |
+
|
| 363 |
+
"""
|
| 364 |
+
return [fn(*args) for args in chunk]
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def _sendback_result(result_queue, work_id, result=None, exception=None):
|
| 368 |
+
"""Safely send back the given result or exception"""
|
| 369 |
+
try:
|
| 370 |
+
result_queue.put(
|
| 371 |
+
_ResultItem(work_id, result=result, exception=exception)
|
| 372 |
+
)
|
| 373 |
+
except BaseException as e:
|
| 374 |
+
exc = _ExceptionWithTraceback(e)
|
| 375 |
+
result_queue.put(_ResultItem(work_id, exception=exc))
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def _process_worker(
|
| 379 |
+
call_queue,
|
| 380 |
+
result_queue,
|
| 381 |
+
initializer,
|
| 382 |
+
initargs,
|
| 383 |
+
processes_management_lock,
|
| 384 |
+
timeout,
|
| 385 |
+
worker_exit_lock,
|
| 386 |
+
current_depth,
|
| 387 |
+
):
|
| 388 |
+
"""Evaluates calls from call_queue and places the results in result_queue.
|
| 389 |
+
|
| 390 |
+
This worker is run in a separate process.
|
| 391 |
+
|
| 392 |
+
Args:
|
| 393 |
+
call_queue: A ctx.Queue of _CallItems that will be read and
|
| 394 |
+
evaluated by the worker.
|
| 395 |
+
result_queue: A ctx.Queue of _ResultItems that will written
|
| 396 |
+
to by the worker.
|
| 397 |
+
initializer: A callable initializer, or None
|
| 398 |
+
initargs: A tuple of args for the initializer
|
| 399 |
+
processes_management_lock: A ctx.Lock avoiding worker timeout while
|
| 400 |
+
some workers are being spawned.
|
| 401 |
+
timeout: maximum time to wait for a new item in the call_queue. If that
|
| 402 |
+
time is expired, the worker will shutdown.
|
| 403 |
+
worker_exit_lock: Lock to avoid flagging the executor as broken on
|
| 404 |
+
workers timeout.
|
| 405 |
+
current_depth: Nested parallelism level, to avoid infinite spawning.
|
| 406 |
+
"""
|
| 407 |
+
if initializer is not None:
|
| 408 |
+
try:
|
| 409 |
+
initializer(*initargs)
|
| 410 |
+
except BaseException:
|
| 411 |
+
LOGGER.critical("Exception in initializer:", exc_info=True)
|
| 412 |
+
# The parent will notice that the process stopped and
|
| 413 |
+
# mark the pool broken
|
| 414 |
+
return
|
| 415 |
+
|
| 416 |
+
# set the global _CURRENT_DEPTH mechanism to limit recursive call
|
| 417 |
+
global _CURRENT_DEPTH
|
| 418 |
+
_CURRENT_DEPTH = current_depth
|
| 419 |
+
_process_reference_size = None
|
| 420 |
+
_last_memory_leak_check = None
|
| 421 |
+
pid = os.getpid()
|
| 422 |
+
|
| 423 |
+
mp.util.debug(f"Worker started with timeout={timeout}")
|
| 424 |
+
while True:
|
| 425 |
+
try:
|
| 426 |
+
call_item = call_queue.get(block=True, timeout=timeout)
|
| 427 |
+
if call_item is None:
|
| 428 |
+
mp.util.info("Shutting down worker on sentinel")
|
| 429 |
+
except queue.Empty:
|
| 430 |
+
mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s")
|
| 431 |
+
if processes_management_lock.acquire(block=False):
|
| 432 |
+
processes_management_lock.release()
|
| 433 |
+
call_item = None
|
| 434 |
+
else:
|
| 435 |
+
mp.util.info("Could not acquire processes_management_lock")
|
| 436 |
+
continue
|
| 437 |
+
except BaseException:
|
| 438 |
+
previous_tb = traceback.format_exc()
|
| 439 |
+
try:
|
| 440 |
+
result_queue.put(_RemoteTraceback(previous_tb))
|
| 441 |
+
except BaseException:
|
| 442 |
+
# If we cannot format correctly the exception, at least print
|
| 443 |
+
# the traceback.
|
| 444 |
+
print(previous_tb)
|
| 445 |
+
mp.util.debug("Exiting with code 1")
|
| 446 |
+
sys.exit(1)
|
| 447 |
+
if call_item is None:
|
| 448 |
+
# Notify queue management thread about worker shutdown
|
| 449 |
+
result_queue.put(pid)
|
| 450 |
+
is_clean = worker_exit_lock.acquire(True, timeout=30)
|
| 451 |
+
|
| 452 |
+
# Early notify any loky executor running in this worker process
|
| 453 |
+
# (nested parallelism) that this process is about to shutdown to
|
| 454 |
+
# avoid a deadlock waiting undifinitely for the worker to finish.
|
| 455 |
+
_python_exit()
|
| 456 |
+
|
| 457 |
+
if is_clean:
|
| 458 |
+
mp.util.debug("Exited cleanly")
|
| 459 |
+
else:
|
| 460 |
+
mp.util.info("Main process did not release worker_exit")
|
| 461 |
+
return
|
| 462 |
+
try:
|
| 463 |
+
r = call_item()
|
| 464 |
+
except BaseException as e:
|
| 465 |
+
exc = _ExceptionWithTraceback(e)
|
| 466 |
+
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
|
| 467 |
+
else:
|
| 468 |
+
_sendback_result(result_queue, call_item.work_id, result=r)
|
| 469 |
+
del r
|
| 470 |
+
|
| 471 |
+
# Free the resource as soon as possible, to avoid holding onto
|
| 472 |
+
# open files or shared memory that is not needed anymore
|
| 473 |
+
del call_item
|
| 474 |
+
|
| 475 |
+
if _USE_PSUTIL:
|
| 476 |
+
if _process_reference_size is None:
|
| 477 |
+
# Make reference measurement after the first call
|
| 478 |
+
_process_reference_size = _get_memory_usage(pid, force_gc=True)
|
| 479 |
+
_last_memory_leak_check = time()
|
| 480 |
+
continue
|
| 481 |
+
if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY:
|
| 482 |
+
mem_usage = _get_memory_usage(pid)
|
| 483 |
+
_last_memory_leak_check = time()
|
| 484 |
+
if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
|
| 485 |
+
# Memory usage stays within bounds: everything is fine.
|
| 486 |
+
continue
|
| 487 |
+
|
| 488 |
+
# Check again memory usage; this time take the measurement
|
| 489 |
+
# after a forced garbage collection to break any reference
|
| 490 |
+
# cycles.
|
| 491 |
+
mem_usage = _get_memory_usage(pid, force_gc=True)
|
| 492 |
+
_last_memory_leak_check = time()
|
| 493 |
+
if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
|
| 494 |
+
# The GC managed to free the memory: everything is fine.
|
| 495 |
+
continue
|
| 496 |
+
|
| 497 |
+
# The process is leaking memory: let the main process
|
| 498 |
+
# know that we need to start a new worker.
|
| 499 |
+
mp.util.info("Memory leak detected: shutting down worker")
|
| 500 |
+
result_queue.put(pid)
|
| 501 |
+
with worker_exit_lock:
|
| 502 |
+
mp.util.debug("Exit due to memory leak")
|
| 503 |
+
return
|
| 504 |
+
else:
|
| 505 |
+
# if psutil is not installed, trigger gc.collect events
|
| 506 |
+
# regularly to limit potential memory leaks due to reference cycles
|
| 507 |
+
if _last_memory_leak_check is None or (
|
| 508 |
+
time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY
|
| 509 |
+
):
|
| 510 |
+
gc.collect()
|
| 511 |
+
_last_memory_leak_check = time()
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
class _ExecutorManagerThread(threading.Thread):
|
| 515 |
+
"""Manages the communication between this process and the worker processes.
|
| 516 |
+
|
| 517 |
+
The manager is run in a local thread.
|
| 518 |
+
|
| 519 |
+
Args:
|
| 520 |
+
executor: A reference to the ProcessPoolExecutor that owns
|
| 521 |
+
this thread. A weakref will be own by the manager as well as
|
| 522 |
+
references to internal objects used to introspect the state of
|
| 523 |
+
the executor.
|
| 524 |
+
"""
|
| 525 |
+
|
| 526 |
+
def __init__(self, executor):
|
| 527 |
+
# Store references to necessary internals of the executor.
|
| 528 |
+
|
| 529 |
+
# A _ThreadWakeup to allow waking up the executor_manager_thread from
|
| 530 |
+
# the main Thread and avoid deadlocks caused by permanently
|
| 531 |
+
# locked queues.
|
| 532 |
+
self.thread_wakeup = executor._executor_manager_thread_wakeup
|
| 533 |
+
self.shutdown_lock = executor._shutdown_lock
|
| 534 |
+
|
| 535 |
+
# A weakref.ref to the ProcessPoolExecutor that owns this thread. Used
|
| 536 |
+
# to determine if the ProcessPoolExecutor has been garbage collected
|
| 537 |
+
# and that the manager can exit.
|
| 538 |
+
# When the executor gets garbage collected, the weakref callback
|
| 539 |
+
# will wake up the queue management thread so that it can terminate
|
| 540 |
+
# if there is no pending work item.
|
| 541 |
+
def weakref_cb(
|
| 542 |
+
_,
|
| 543 |
+
thread_wakeup=self.thread_wakeup,
|
| 544 |
+
shutdown_lock=self.shutdown_lock,
|
| 545 |
+
):
|
| 546 |
+
if mp is not None:
|
| 547 |
+
# At this point, the multiprocessing module can already be
|
| 548 |
+
# garbage collected. We only log debug info when still
|
| 549 |
+
# possible.
|
| 550 |
+
mp.util.debug(
|
| 551 |
+
"Executor collected: triggering callback for"
|
| 552 |
+
" QueueManager wakeup"
|
| 553 |
+
)
|
| 554 |
+
with shutdown_lock:
|
| 555 |
+
thread_wakeup.wakeup()
|
| 556 |
+
|
| 557 |
+
self.executor_reference = weakref.ref(executor, weakref_cb)
|
| 558 |
+
|
| 559 |
+
# The flags of the executor
|
| 560 |
+
self.executor_flags = executor._flags
|
| 561 |
+
|
| 562 |
+
# A list of the ctx.Process instances used as workers.
|
| 563 |
+
self.processes = executor._processes
|
| 564 |
+
|
| 565 |
+
# A ctx.Queue that will be filled with _CallItems derived from
|
| 566 |
+
# _WorkItems for processing by the process workers.
|
| 567 |
+
self.call_queue = executor._call_queue
|
| 568 |
+
|
| 569 |
+
# A ctx.SimpleQueue of _ResultItems generated by the process workers.
|
| 570 |
+
self.result_queue = executor._result_queue
|
| 571 |
+
|
| 572 |
+
# A queue.Queue of work ids e.g. Queue([5, 6, ...]).
|
| 573 |
+
self.work_ids_queue = executor._work_ids
|
| 574 |
+
|
| 575 |
+
# A dict mapping work ids to _WorkItems e.g.
|
| 576 |
+
# {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
|
| 577 |
+
self.pending_work_items = executor._pending_work_items
|
| 578 |
+
|
| 579 |
+
# A list of the work_ids that are currently running
|
| 580 |
+
self.running_work_items = executor._running_work_items
|
| 581 |
+
|
| 582 |
+
# A lock to avoid concurrent shutdown of workers on timeout and spawn
|
| 583 |
+
# of new processes or shut down
|
| 584 |
+
self.processes_management_lock = executor._processes_management_lock
|
| 585 |
+
|
| 586 |
+
super().__init__(name="ExecutorManagerThread")
|
| 587 |
+
if sys.version_info < (3, 9):
|
| 588 |
+
self.daemon = True
|
| 589 |
+
|
| 590 |
+
def run(self):
|
| 591 |
+
# Main loop for the executor manager thread.
|
| 592 |
+
|
| 593 |
+
while True:
|
| 594 |
+
self.add_call_item_to_queue()
|
| 595 |
+
|
| 596 |
+
result_item, is_broken, bpe = self.wait_result_broken_or_wakeup()
|
| 597 |
+
|
| 598 |
+
if is_broken:
|
| 599 |
+
self.terminate_broken(bpe)
|
| 600 |
+
return
|
| 601 |
+
if result_item is not None:
|
| 602 |
+
self.process_result_item(result_item)
|
| 603 |
+
# Delete reference to result_item to avoid keeping references
|
| 604 |
+
# while waiting on new results.
|
| 605 |
+
del result_item
|
| 606 |
+
|
| 607 |
+
if self.is_shutting_down():
|
| 608 |
+
self.flag_executor_shutting_down()
|
| 609 |
+
|
| 610 |
+
# Since no new work items can be added, it is safe to shutdown
|
| 611 |
+
# this thread if there are no pending work items.
|
| 612 |
+
if not self.pending_work_items:
|
| 613 |
+
self.join_executor_internals()
|
| 614 |
+
return
|
| 615 |
+
|
| 616 |
+
def add_call_item_to_queue(self):
|
| 617 |
+
# Fills call_queue with _WorkItems from pending_work_items.
|
| 618 |
+
# This function never blocks.
|
| 619 |
+
while True:
|
| 620 |
+
if self.call_queue.full():
|
| 621 |
+
return
|
| 622 |
+
try:
|
| 623 |
+
work_id = self.work_ids_queue.get(block=False)
|
| 624 |
+
except queue.Empty:
|
| 625 |
+
return
|
| 626 |
+
else:
|
| 627 |
+
work_item = self.pending_work_items[work_id]
|
| 628 |
+
|
| 629 |
+
if work_item.future.set_running_or_notify_cancel():
|
| 630 |
+
self.running_work_items += [work_id]
|
| 631 |
+
self.call_queue.put(
|
| 632 |
+
_CallItem(
|
| 633 |
+
work_id,
|
| 634 |
+
work_item.fn,
|
| 635 |
+
work_item.args,
|
| 636 |
+
work_item.kwargs,
|
| 637 |
+
),
|
| 638 |
+
block=True,
|
| 639 |
+
)
|
| 640 |
+
else:
|
| 641 |
+
del self.pending_work_items[work_id]
|
| 642 |
+
continue
|
| 643 |
+
|
| 644 |
+
def wait_result_broken_or_wakeup(self):
|
| 645 |
+
# Wait for a result to be ready in the result_queue while checking
|
| 646 |
+
# that all worker processes are still running, or for a wake up
|
| 647 |
+
# signal send. The wake up signals come either from new tasks being
|
| 648 |
+
# submitted, from the executor being shutdown/gc-ed, or from the
|
| 649 |
+
# shutdown of the python interpreter.
|
| 650 |
+
result_reader = self.result_queue._reader
|
| 651 |
+
wakeup_reader = self.thread_wakeup._reader
|
| 652 |
+
readers = [result_reader, wakeup_reader]
|
| 653 |
+
worker_sentinels = [p.sentinel for p in list(self.processes.values())]
|
| 654 |
+
ready = wait(readers + worker_sentinels)
|
| 655 |
+
|
| 656 |
+
bpe = None
|
| 657 |
+
is_broken = True
|
| 658 |
+
result_item = None
|
| 659 |
+
if result_reader in ready:
|
| 660 |
+
try:
|
| 661 |
+
result_item = result_reader.recv()
|
| 662 |
+
if isinstance(result_item, _RemoteTraceback):
|
| 663 |
+
bpe = BrokenProcessPool(
|
| 664 |
+
"A task has failed to un-serialize. Please ensure that"
|
| 665 |
+
" the arguments of the function are all picklable."
|
| 666 |
+
)
|
| 667 |
+
bpe.__cause__ = result_item
|
| 668 |
+
else:
|
| 669 |
+
is_broken = False
|
| 670 |
+
except BaseException as e:
|
| 671 |
+
bpe = BrokenProcessPool(
|
| 672 |
+
"A result has failed to un-serialize. Please ensure that "
|
| 673 |
+
"the objects returned by the function are always "
|
| 674 |
+
"picklable."
|
| 675 |
+
)
|
| 676 |
+
tb = traceback.format_exception(
|
| 677 |
+
type(e), e, getattr(e, "__traceback__", None)
|
| 678 |
+
)
|
| 679 |
+
bpe.__cause__ = _RemoteTraceback("".join(tb))
|
| 680 |
+
|
| 681 |
+
elif wakeup_reader in ready:
|
| 682 |
+
# This is simply a wake-up event that might either trigger putting
|
| 683 |
+
# more tasks in the queue or trigger the clean up of resources.
|
| 684 |
+
is_broken = False
|
| 685 |
+
else:
|
| 686 |
+
# A worker has terminated and we don't know why, set the state of
|
| 687 |
+
# the executor as broken
|
| 688 |
+
exit_codes = ""
|
| 689 |
+
if sys.platform != "win32":
|
| 690 |
+
# In Windows, introspecting terminated workers exitcodes seems
|
| 691 |
+
# unstable, therefore they are not appended in the exception
|
| 692 |
+
# message.
|
| 693 |
+
exit_codes = (
|
| 694 |
+
"\nThe exit codes of the workers are "
|
| 695 |
+
f"{get_exitcodes_terminated_worker(self.processes)}"
|
| 696 |
+
)
|
| 697 |
+
mp.util.debug(
|
| 698 |
+
"A worker unexpectedly terminated. Workers that "
|
| 699 |
+
"might have caused the breakage: "
|
| 700 |
+
+ str(
|
| 701 |
+
{
|
| 702 |
+
p.name: p.exitcode
|
| 703 |
+
for p in list(self.processes.values())
|
| 704 |
+
if p is not None and p.sentinel in ready
|
| 705 |
+
}
|
| 706 |
+
)
|
| 707 |
+
)
|
| 708 |
+
bpe = TerminatedWorkerError(
|
| 709 |
+
"A worker process managed by the executor was unexpectedly "
|
| 710 |
+
"terminated. This could be caused by a segmentation fault "
|
| 711 |
+
"while calling the function or by an excessive memory usage "
|
| 712 |
+
"causing the Operating System to kill the worker.\n"
|
| 713 |
+
f"{exit_codes}"
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
self.thread_wakeup.clear()
|
| 717 |
+
|
| 718 |
+
return result_item, is_broken, bpe
|
| 719 |
+
|
| 720 |
+
def process_result_item(self, result_item):
|
| 721 |
+
# Process the received a result_item. This can be either the PID of a
|
| 722 |
+
# worker that exited gracefully or a _ResultItem
|
| 723 |
+
|
| 724 |
+
if isinstance(result_item, int):
|
| 725 |
+
# Clean shutdown of a worker using its PID, either on request
|
| 726 |
+
# by the executor.shutdown method or by the timeout of the worker
|
| 727 |
+
# itself: we should not mark the executor as broken.
|
| 728 |
+
with self.processes_management_lock:
|
| 729 |
+
p = self.processes.pop(result_item, None)
|
| 730 |
+
|
| 731 |
+
# p can be None if the executor is concurrently shutting down.
|
| 732 |
+
if p is not None:
|
| 733 |
+
p._worker_exit_lock.release()
|
| 734 |
+
mp.util.debug(
|
| 735 |
+
f"joining {p.name} when processing {p.pid} as result_item"
|
| 736 |
+
)
|
| 737 |
+
p.join()
|
| 738 |
+
del p
|
| 739 |
+
|
| 740 |
+
# Make sure the executor have the right number of worker, even if a
|
| 741 |
+
# worker timeout while some jobs were submitted. If some work is
|
| 742 |
+
# pending or there is less processes than running items, we need to
|
| 743 |
+
# start a new Process and raise a warning.
|
| 744 |
+
n_pending = len(self.pending_work_items)
|
| 745 |
+
n_running = len(self.running_work_items)
|
| 746 |
+
if n_pending - n_running > 0 or n_running > len(self.processes):
|
| 747 |
+
executor = self.executor_reference()
|
| 748 |
+
if (
|
| 749 |
+
executor is not None
|
| 750 |
+
and len(self.processes) < executor._max_workers
|
| 751 |
+
):
|
| 752 |
+
warnings.warn(
|
| 753 |
+
"A worker stopped while some jobs were given to the "
|
| 754 |
+
"executor. This can be caused by a too short worker "
|
| 755 |
+
"timeout or by a memory leak.",
|
| 756 |
+
UserWarning,
|
| 757 |
+
)
|
| 758 |
+
with executor._processes_management_lock:
|
| 759 |
+
executor._adjust_process_count()
|
| 760 |
+
executor = None
|
| 761 |
+
else:
|
| 762 |
+
# Received a _ResultItem so mark the future as completed.
|
| 763 |
+
work_item = self.pending_work_items.pop(result_item.work_id, None)
|
| 764 |
+
# work_item can be None if another process terminated (see above)
|
| 765 |
+
if work_item is not None:
|
| 766 |
+
if result_item.exception:
|
| 767 |
+
work_item.future.set_exception(result_item.exception)
|
| 768 |
+
else:
|
| 769 |
+
work_item.future.set_result(result_item.result)
|
| 770 |
+
self.running_work_items.remove(result_item.work_id)
|
| 771 |
+
|
| 772 |
+
def is_shutting_down(self):
|
| 773 |
+
# Check whether we should start shutting down the executor.
|
| 774 |
+
executor = self.executor_reference()
|
| 775 |
+
# No more work items can be added if:
|
| 776 |
+
# - The interpreter is shutting down OR
|
| 777 |
+
# - The executor that owns this thread is not broken AND
|
| 778 |
+
# * The executor that owns this worker has been collected OR
|
| 779 |
+
# * The executor that owns this worker has been shutdown.
|
| 780 |
+
# If the executor is broken, it should be detected in the next loop.
|
| 781 |
+
return _global_shutdown or (
|
| 782 |
+
(executor is None or self.executor_flags.shutdown)
|
| 783 |
+
and not self.executor_flags.broken
|
| 784 |
+
)
|
| 785 |
+
|
| 786 |
+
def terminate_broken(self, bpe):
|
| 787 |
+
# Terminate the executor because it is in a broken state. The bpe
|
| 788 |
+
# argument can be used to display more information on the error that
|
| 789 |
+
# lead the executor into becoming broken.
|
| 790 |
+
|
| 791 |
+
# Mark the process pool broken so that submits fail right now.
|
| 792 |
+
self.executor_flags.flag_as_broken(bpe)
|
| 793 |
+
|
| 794 |
+
# Mark pending tasks as failed.
|
| 795 |
+
for work_item in self.pending_work_items.values():
|
| 796 |
+
work_item.future.set_exception(bpe)
|
| 797 |
+
# Delete references to object. See issue16284
|
| 798 |
+
del work_item
|
| 799 |
+
self.pending_work_items.clear()
|
| 800 |
+
|
| 801 |
+
# Terminate remaining workers forcibly: the queues or their
|
| 802 |
+
# locks may be in a dirty state and block forever.
|
| 803 |
+
self.kill_workers(reason="broken executor")
|
| 804 |
+
|
| 805 |
+
# clean up resources
|
| 806 |
+
self.join_executor_internals()
|
| 807 |
+
|
| 808 |
+
def flag_executor_shutting_down(self):
|
| 809 |
+
# Flag the executor as shutting down and cancel remaining tasks if
|
| 810 |
+
# requested as early as possible if it is not gc-ed yet.
|
| 811 |
+
self.executor_flags.flag_as_shutting_down()
|
| 812 |
+
|
| 813 |
+
# Cancel pending work items if requested.
|
| 814 |
+
if self.executor_flags.kill_workers:
|
| 815 |
+
while self.pending_work_items:
|
| 816 |
+
_, work_item = self.pending_work_items.popitem()
|
| 817 |
+
work_item.future.set_exception(
|
| 818 |
+
ShutdownExecutorError(
|
| 819 |
+
"The Executor was shutdown with `kill_workers=True` "
|
| 820 |
+
"before this job could complete."
|
| 821 |
+
)
|
| 822 |
+
)
|
| 823 |
+
del work_item
|
| 824 |
+
|
| 825 |
+
# Kill the remaining worker forcibly to no waste time joining them
|
| 826 |
+
self.kill_workers(reason="executor shutting down")
|
| 827 |
+
|
| 828 |
+
def kill_workers(self, reason=""):
|
| 829 |
+
# Terminate the remaining workers using SIGKILL. This function also
|
| 830 |
+
# terminates descendant workers of the children in case there is some
|
| 831 |
+
# nested parallelism.
|
| 832 |
+
while self.processes:
|
| 833 |
+
_, p = self.processes.popitem()
|
| 834 |
+
mp.util.debug(f"terminate process {p.name}, reason: {reason}")
|
| 835 |
+
try:
|
| 836 |
+
kill_process_tree(p)
|
| 837 |
+
except ProcessLookupError: # pragma: no cover
|
| 838 |
+
pass
|
| 839 |
+
|
| 840 |
+
def shutdown_workers(self):
|
| 841 |
+
# shutdown all workers in self.processes
|
| 842 |
+
|
| 843 |
+
# Create a list to avoid RuntimeError due to concurrent modification of
|
| 844 |
+
# processes. nb_children_alive is thus an upper bound. Also release the
|
| 845 |
+
# processes' _worker_exit_lock to accelerate the shutdown procedure, as
|
| 846 |
+
# there is no need for hand-shake here.
|
| 847 |
+
with self.processes_management_lock:
|
| 848 |
+
n_children_to_stop = 0
|
| 849 |
+
for p in list(self.processes.values()):
|
| 850 |
+
mp.util.debug(f"releasing worker exit lock on {p.name}")
|
| 851 |
+
p._worker_exit_lock.release()
|
| 852 |
+
n_children_to_stop += 1
|
| 853 |
+
|
| 854 |
+
mp.util.debug(f"found {n_children_to_stop} processes to stop")
|
| 855 |
+
|
| 856 |
+
# Send the right number of sentinels, to make sure all children are
|
| 857 |
+
# properly terminated. Do it with a mechanism that avoid hanging on
|
| 858 |
+
# Full queue when all workers have already been shutdown.
|
| 859 |
+
n_sentinels_sent = 0
|
| 860 |
+
cooldown_time = 0.001
|
| 861 |
+
while (
|
| 862 |
+
n_sentinels_sent < n_children_to_stop
|
| 863 |
+
and self.get_n_children_alive() > 0
|
| 864 |
+
):
|
| 865 |
+
for _ in range(n_children_to_stop - n_sentinels_sent):
|
| 866 |
+
try:
|
| 867 |
+
self.call_queue.put_nowait(None)
|
| 868 |
+
n_sentinels_sent += 1
|
| 869 |
+
except queue.Full as e:
|
| 870 |
+
if cooldown_time > 5.0:
|
| 871 |
+
mp.util.info(
|
| 872 |
+
"failed to send all sentinels and exit with error."
|
| 873 |
+
f"\ncall_queue size={self.call_queue._maxsize}; "
|
| 874 |
+
f" full is {self.call_queue.full()}; "
|
| 875 |
+
)
|
| 876 |
+
raise e
|
| 877 |
+
mp.util.info(
|
| 878 |
+
"full call_queue prevented to send all sentinels at "
|
| 879 |
+
"once, waiting..."
|
| 880 |
+
)
|
| 881 |
+
sleep(cooldown_time)
|
| 882 |
+
cooldown_time *= 1.2
|
| 883 |
+
break
|
| 884 |
+
|
| 885 |
+
mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue")
|
| 886 |
+
|
| 887 |
+
def join_executor_internals(self):
|
| 888 |
+
self.shutdown_workers()
|
| 889 |
+
|
| 890 |
+
# Release the queue's resources as soon as possible. Flag the feeder
|
| 891 |
+
# thread for clean exit to avoid having the crash detection thread flag
|
| 892 |
+
# the Executor as broken during the shutdown. This is safe as either:
|
| 893 |
+
# * We don't need to communicate with the workers anymore
|
| 894 |
+
# * There is nothing left in the Queue buffer except None sentinels
|
| 895 |
+
mp.util.debug("closing call_queue")
|
| 896 |
+
self.call_queue.close()
|
| 897 |
+
self.call_queue.join_thread()
|
| 898 |
+
|
| 899 |
+
# Closing result_queue
|
| 900 |
+
mp.util.debug("closing result_queue")
|
| 901 |
+
self.result_queue.close()
|
| 902 |
+
|
| 903 |
+
mp.util.debug("closing thread_wakeup")
|
| 904 |
+
with self.shutdown_lock:
|
| 905 |
+
self.thread_wakeup.close()
|
| 906 |
+
|
| 907 |
+
# If .join() is not called on the created processes then
|
| 908 |
+
# some ctx.Queue methods may deadlock on macOS.
|
| 909 |
+
with self.processes_management_lock:
|
| 910 |
+
mp.util.debug(f"joining {len(self.processes)} processes")
|
| 911 |
+
n_joined_processes = 0
|
| 912 |
+
while True:
|
| 913 |
+
try:
|
| 914 |
+
pid, p = self.processes.popitem()
|
| 915 |
+
mp.util.debug(f"joining process {p.name} with pid {pid}")
|
| 916 |
+
p.join()
|
| 917 |
+
n_joined_processes += 1
|
| 918 |
+
except KeyError:
|
| 919 |
+
break
|
| 920 |
+
|
| 921 |
+
mp.util.debug(
|
| 922 |
+
"executor management thread clean shutdown of "
|
| 923 |
+
f"{n_joined_processes} workers"
|
| 924 |
+
)
|
| 925 |
+
|
| 926 |
+
def get_n_children_alive(self):
|
| 927 |
+
# This is an upper bound on the number of children alive.
|
| 928 |
+
with self.processes_management_lock:
|
| 929 |
+
return sum(p.is_alive() for p in list(self.processes.values()))
|
| 930 |
+
|
| 931 |
+
|
| 932 |
+
_system_limits_checked = False
|
| 933 |
+
_system_limited = None
|
| 934 |
+
|
| 935 |
+
|
| 936 |
+
def _check_system_limits():
|
| 937 |
+
global _system_limits_checked, _system_limited
|
| 938 |
+
if _system_limits_checked and _system_limited:
|
| 939 |
+
raise NotImplementedError(_system_limited)
|
| 940 |
+
_system_limits_checked = True
|
| 941 |
+
try:
|
| 942 |
+
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
|
| 943 |
+
except (AttributeError, ValueError):
|
| 944 |
+
# sysconf not available or setting not available
|
| 945 |
+
return
|
| 946 |
+
if nsems_max == -1:
|
| 947 |
+
# undetermined limit, assume that limit is determined
|
| 948 |
+
# by available memory only
|
| 949 |
+
return
|
| 950 |
+
if nsems_max >= 256:
|
| 951 |
+
# minimum number of semaphores available
|
| 952 |
+
# according to POSIX
|
| 953 |
+
return
|
| 954 |
+
_system_limited = (
|
| 955 |
+
f"system provides too few semaphores ({nsems_max} available, "
|
| 956 |
+
"256 necessary)"
|
| 957 |
+
)
|
| 958 |
+
raise NotImplementedError(_system_limited)
|
| 959 |
+
|
| 960 |
+
|
| 961 |
+
def _chain_from_iterable_of_lists(iterable):
|
| 962 |
+
"""
|
| 963 |
+
Specialized implementation of itertools.chain.from_iterable.
|
| 964 |
+
Each item in *iterable* should be a list. This function is
|
| 965 |
+
careful not to keep references to yielded objects.
|
| 966 |
+
"""
|
| 967 |
+
for element in iterable:
|
| 968 |
+
element.reverse()
|
| 969 |
+
while element:
|
| 970 |
+
yield element.pop()
|
| 971 |
+
|
| 972 |
+
|
| 973 |
+
def _check_max_depth(context):
|
| 974 |
+
# Limit the maxmal recursion level
|
| 975 |
+
global _CURRENT_DEPTH
|
| 976 |
+
if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0:
|
| 977 |
+
raise LokyRecursionError(
|
| 978 |
+
"Could not spawn extra nested processes at depth superior to "
|
| 979 |
+
"MAX_DEPTH=1. It is not possible to increase this limit when "
|
| 980 |
+
"using the 'fork' start method."
|
| 981 |
+
)
|
| 982 |
+
|
| 983 |
+
if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH:
|
| 984 |
+
raise LokyRecursionError(
|
| 985 |
+
"Could not spawn extra nested processes at depth superior to "
|
| 986 |
+
f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change "
|
| 987 |
+
"this limit with the LOKY_MAX_DEPTH environment variable."
|
| 988 |
+
)
|
| 989 |
+
|
| 990 |
+
|
| 991 |
+
class LokyRecursionError(RuntimeError):
|
| 992 |
+
"""A process tries to spawn too many levels of nested processes."""
|
| 993 |
+
|
| 994 |
+
|
| 995 |
+
class BrokenProcessPool(_BPPException):
|
| 996 |
+
"""
|
| 997 |
+
Raised when the executor is broken while a future was in the running state.
|
| 998 |
+
The cause can an error raised when unpickling the task in the worker
|
| 999 |
+
process or when unpickling the result value in the parent process. It can
|
| 1000 |
+
also be caused by a worker process being terminated unexpectedly.
|
| 1001 |
+
"""
|
| 1002 |
+
|
| 1003 |
+
|
| 1004 |
+
class TerminatedWorkerError(BrokenProcessPool):
|
| 1005 |
+
"""
|
| 1006 |
+
Raised when a process in a ProcessPoolExecutor terminated abruptly
|
| 1007 |
+
while a future was in the running state.
|
| 1008 |
+
"""
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
# Alias for backward compat (for code written for loky 1.1.4 and earlier). Do
|
| 1012 |
+
# not use in new code.
|
| 1013 |
+
BrokenExecutor = BrokenProcessPool
|
| 1014 |
+
|
| 1015 |
+
|
| 1016 |
+
class ShutdownExecutorError(RuntimeError):
|
| 1017 |
+
|
| 1018 |
+
"""
|
| 1019 |
+
Raised when a ProcessPoolExecutor is shutdown while a future was in the
|
| 1020 |
+
running or pending state.
|
| 1021 |
+
"""
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
class ProcessPoolExecutor(Executor):
|
| 1025 |
+
|
| 1026 |
+
_at_exit = None
|
| 1027 |
+
|
| 1028 |
+
def __init__(
|
| 1029 |
+
self,
|
| 1030 |
+
max_workers=None,
|
| 1031 |
+
job_reducers=None,
|
| 1032 |
+
result_reducers=None,
|
| 1033 |
+
timeout=None,
|
| 1034 |
+
context=None,
|
| 1035 |
+
initializer=None,
|
| 1036 |
+
initargs=(),
|
| 1037 |
+
env=None,
|
| 1038 |
+
):
|
| 1039 |
+
"""Initializes a new ProcessPoolExecutor instance.
|
| 1040 |
+
|
| 1041 |
+
Args:
|
| 1042 |
+
max_workers: int, optional (default: cpu_count())
|
| 1043 |
+
The maximum number of processes that can be used to execute the
|
| 1044 |
+
given calls. If None or not given then as many worker processes
|
| 1045 |
+
will be created as the number of CPUs the current process
|
| 1046 |
+
can use.
|
| 1047 |
+
job_reducers, result_reducers: dict(type: reducer_func)
|
| 1048 |
+
Custom reducer for pickling the jobs and the results from the
|
| 1049 |
+
Executor. If only `job_reducers` is provided, `result_reducer`
|
| 1050 |
+
will use the same reducers
|
| 1051 |
+
timeout: int, optional (default: None)
|
| 1052 |
+
Idle workers exit after timeout seconds. If a new job is
|
| 1053 |
+
submitted after the timeout, the executor will start enough
|
| 1054 |
+
new Python processes to make sure the pool of workers is full.
|
| 1055 |
+
context: A multiprocessing context to launch the workers. This
|
| 1056 |
+
object should provide SimpleQueue, Queue and Process.
|
| 1057 |
+
initializer: An callable used to initialize worker processes.
|
| 1058 |
+
initargs: A tuple of arguments to pass to the initializer.
|
| 1059 |
+
env: A dict of environment variable to overwrite in the child
|
| 1060 |
+
process. The environment variables are set before any module is
|
| 1061 |
+
loaded. Note that this only works with the loky context.
|
| 1062 |
+
"""
|
| 1063 |
+
_check_system_limits()
|
| 1064 |
+
|
| 1065 |
+
if max_workers is None:
|
| 1066 |
+
self._max_workers = cpu_count()
|
| 1067 |
+
else:
|
| 1068 |
+
if max_workers <= 0:
|
| 1069 |
+
raise ValueError("max_workers must be greater than 0")
|
| 1070 |
+
self._max_workers = max_workers
|
| 1071 |
+
|
| 1072 |
+
if (
|
| 1073 |
+
sys.platform == "win32"
|
| 1074 |
+
and self._max_workers > _MAX_WINDOWS_WORKERS
|
| 1075 |
+
):
|
| 1076 |
+
warnings.warn(
|
| 1077 |
+
f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} "
|
| 1078 |
+
"due to limitations of the operating system."
|
| 1079 |
+
)
|
| 1080 |
+
self._max_workers = _MAX_WINDOWS_WORKERS
|
| 1081 |
+
|
| 1082 |
+
if context is None:
|
| 1083 |
+
context = get_context()
|
| 1084 |
+
self._context = context
|
| 1085 |
+
self._env = env
|
| 1086 |
+
|
| 1087 |
+
self._initializer, self._initargs = _prepare_initializer(
|
| 1088 |
+
initializer, initargs
|
| 1089 |
+
)
|
| 1090 |
+
_check_max_depth(self._context)
|
| 1091 |
+
|
| 1092 |
+
if result_reducers is None:
|
| 1093 |
+
result_reducers = job_reducers
|
| 1094 |
+
|
| 1095 |
+
# Timeout
|
| 1096 |
+
self._timeout = timeout
|
| 1097 |
+
|
| 1098 |
+
# Management thread
|
| 1099 |
+
self._executor_manager_thread = None
|
| 1100 |
+
|
| 1101 |
+
# Map of pids to processes
|
| 1102 |
+
self._processes = {}
|
| 1103 |
+
|
| 1104 |
+
# Internal variables of the ProcessPoolExecutor
|
| 1105 |
+
self._processes = {}
|
| 1106 |
+
self._queue_count = 0
|
| 1107 |
+
self._pending_work_items = {}
|
| 1108 |
+
self._running_work_items = []
|
| 1109 |
+
self._work_ids = queue.Queue()
|
| 1110 |
+
self._processes_management_lock = self._context.Lock()
|
| 1111 |
+
self._executor_manager_thread = None
|
| 1112 |
+
self._shutdown_lock = threading.Lock()
|
| 1113 |
+
|
| 1114 |
+
# _ThreadWakeup is a communication channel used to interrupt the wait
|
| 1115 |
+
# of the main loop of executor_manager_thread from another thread (e.g.
|
| 1116 |
+
# when calling executor.submit or executor.shutdown). We do not use the
|
| 1117 |
+
# _result_queue to send wakeup signals to the executor_manager_thread
|
| 1118 |
+
# as it could result in a deadlock if a worker process dies with the
|
| 1119 |
+
# _result_queue write lock still acquired.
|
| 1120 |
+
#
|
| 1121 |
+
# _shutdown_lock must be locked to access _ThreadWakeup.wakeup.
|
| 1122 |
+
self._executor_manager_thread_wakeup = _ThreadWakeup()
|
| 1123 |
+
|
| 1124 |
+
# Flag to hold the state of the Executor. This permits to introspect
|
| 1125 |
+
# the Executor state even once it has been garbage collected.
|
| 1126 |
+
self._flags = _ExecutorFlags(self._shutdown_lock)
|
| 1127 |
+
|
| 1128 |
+
# Finally setup the queues for interprocess communication
|
| 1129 |
+
self._setup_queues(job_reducers, result_reducers)
|
| 1130 |
+
|
| 1131 |
+
mp.util.debug("ProcessPoolExecutor is setup")
|
| 1132 |
+
|
| 1133 |
+
def _setup_queues(self, job_reducers, result_reducers, queue_size=None):
|
| 1134 |
+
# Make the call queue slightly larger than the number of processes to
|
| 1135 |
+
# prevent the worker processes from idling. But don't make it too big
|
| 1136 |
+
# because futures in the call queue cannot be cancelled.
|
| 1137 |
+
if queue_size is None:
|
| 1138 |
+
queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS
|
| 1139 |
+
self._call_queue = _SafeQueue(
|
| 1140 |
+
max_size=queue_size,
|
| 1141 |
+
pending_work_items=self._pending_work_items,
|
| 1142 |
+
running_work_items=self._running_work_items,
|
| 1143 |
+
thread_wakeup=self._executor_manager_thread_wakeup,
|
| 1144 |
+
reducers=job_reducers,
|
| 1145 |
+
ctx=self._context,
|
| 1146 |
+
)
|
| 1147 |
+
# Killed worker processes can produce spurious "broken pipe"
|
| 1148 |
+
# tracebacks in the queue's own worker thread. But we detect killed
|
| 1149 |
+
# processes anyway, so silence the tracebacks.
|
| 1150 |
+
self._call_queue._ignore_epipe = True
|
| 1151 |
+
|
| 1152 |
+
self._result_queue = SimpleQueue(
|
| 1153 |
+
reducers=result_reducers, ctx=self._context
|
| 1154 |
+
)
|
| 1155 |
+
|
| 1156 |
+
def _start_executor_manager_thread(self):
|
| 1157 |
+
if self._executor_manager_thread is None:
|
| 1158 |
+
mp.util.debug("_start_executor_manager_thread called")
|
| 1159 |
+
|
| 1160 |
+
# Start the processes so that their sentinels are known.
|
| 1161 |
+
self._executor_manager_thread = _ExecutorManagerThread(self)
|
| 1162 |
+
self._executor_manager_thread.start()
|
| 1163 |
+
|
| 1164 |
+
# register this executor in a mechanism that ensures it will wakeup
|
| 1165 |
+
# when the interpreter is exiting.
|
| 1166 |
+
_threads_wakeups[self._executor_manager_thread] = (
|
| 1167 |
+
self._shutdown_lock,
|
| 1168 |
+
self._executor_manager_thread_wakeup,
|
| 1169 |
+
)
|
| 1170 |
+
|
| 1171 |
+
global process_pool_executor_at_exit
|
| 1172 |
+
if process_pool_executor_at_exit is None:
|
| 1173 |
+
# Ensure that the _python_exit function will be called before
|
| 1174 |
+
# the multiprocessing.Queue._close finalizers which have an
|
| 1175 |
+
# exitpriority of 10.
|
| 1176 |
+
|
| 1177 |
+
if sys.version_info < (3, 9):
|
| 1178 |
+
process_pool_executor_at_exit = mp.util.Finalize(
|
| 1179 |
+
None, _python_exit, exitpriority=20
|
| 1180 |
+
)
|
| 1181 |
+
else:
|
| 1182 |
+
process_pool_executor_at_exit = threading._register_atexit(
|
| 1183 |
+
_python_exit
|
| 1184 |
+
)
|
| 1185 |
+
|
| 1186 |
+
def _adjust_process_count(self):
|
| 1187 |
+
while len(self._processes) < self._max_workers:
|
| 1188 |
+
worker_exit_lock = self._context.BoundedSemaphore(1)
|
| 1189 |
+
args = (
|
| 1190 |
+
self._call_queue,
|
| 1191 |
+
self._result_queue,
|
| 1192 |
+
self._initializer,
|
| 1193 |
+
self._initargs,
|
| 1194 |
+
self._processes_management_lock,
|
| 1195 |
+
self._timeout,
|
| 1196 |
+
worker_exit_lock,
|
| 1197 |
+
_CURRENT_DEPTH + 1,
|
| 1198 |
+
)
|
| 1199 |
+
worker_exit_lock.acquire()
|
| 1200 |
+
try:
|
| 1201 |
+
# Try to spawn the process with some environment variable to
|
| 1202 |
+
# overwrite but it only works with the loky context for now.
|
| 1203 |
+
p = self._context.Process(
|
| 1204 |
+
target=_process_worker, args=args, env=self._env
|
| 1205 |
+
)
|
| 1206 |
+
except TypeError:
|
| 1207 |
+
p = self._context.Process(target=_process_worker, args=args)
|
| 1208 |
+
p._worker_exit_lock = worker_exit_lock
|
| 1209 |
+
p.start()
|
| 1210 |
+
self._processes[p.pid] = p
|
| 1211 |
+
mp.util.debug(
|
| 1212 |
+
f"Adjusted process count to {self._max_workers}: "
|
| 1213 |
+
f"{[(p.name, pid) for pid, p in self._processes.items()]}"
|
| 1214 |
+
)
|
| 1215 |
+
|
| 1216 |
+
def _ensure_executor_running(self):
|
| 1217 |
+
"""ensures all workers and management thread are running"""
|
| 1218 |
+
with self._processes_management_lock:
|
| 1219 |
+
if len(self._processes) != self._max_workers:
|
| 1220 |
+
self._adjust_process_count()
|
| 1221 |
+
self._start_executor_manager_thread()
|
| 1222 |
+
|
| 1223 |
+
def submit(self, fn, *args, **kwargs):
|
| 1224 |
+
with self._flags.shutdown_lock:
|
| 1225 |
+
if self._flags.broken is not None:
|
| 1226 |
+
raise self._flags.broken
|
| 1227 |
+
if self._flags.shutdown:
|
| 1228 |
+
raise ShutdownExecutorError(
|
| 1229 |
+
"cannot schedule new futures after shutdown"
|
| 1230 |
+
)
|
| 1231 |
+
|
| 1232 |
+
# Cannot submit a new calls once the interpreter is shutting down.
|
| 1233 |
+
# This check avoids spawning new processes at exit.
|
| 1234 |
+
if _global_shutdown:
|
| 1235 |
+
raise RuntimeError(
|
| 1236 |
+
"cannot schedule new futures after " "interpreter shutdown"
|
| 1237 |
+
)
|
| 1238 |
+
|
| 1239 |
+
f = Future()
|
| 1240 |
+
w = _WorkItem(f, fn, args, kwargs)
|
| 1241 |
+
|
| 1242 |
+
self._pending_work_items[self._queue_count] = w
|
| 1243 |
+
self._work_ids.put(self._queue_count)
|
| 1244 |
+
self._queue_count += 1
|
| 1245 |
+
# Wake up queue management thread
|
| 1246 |
+
self._executor_manager_thread_wakeup.wakeup()
|
| 1247 |
+
|
| 1248 |
+
self._ensure_executor_running()
|
| 1249 |
+
return f
|
| 1250 |
+
|
| 1251 |
+
submit.__doc__ = Executor.submit.__doc__
|
| 1252 |
+
|
| 1253 |
+
def map(self, fn, *iterables, **kwargs):
|
| 1254 |
+
"""Returns an iterator equivalent to map(fn, iter).
|
| 1255 |
+
|
| 1256 |
+
Args:
|
| 1257 |
+
fn: A callable that will take as many arguments as there are
|
| 1258 |
+
passed iterables.
|
| 1259 |
+
timeout: The maximum number of seconds to wait. If None, then there
|
| 1260 |
+
is no limit on the wait time.
|
| 1261 |
+
chunksize: If greater than one, the iterables will be chopped into
|
| 1262 |
+
chunks of size chunksize and submitted to the process pool.
|
| 1263 |
+
If set to one, the items in the list will be sent one at a
|
| 1264 |
+
time.
|
| 1265 |
+
|
| 1266 |
+
Returns:
|
| 1267 |
+
An iterator equivalent to: map(func, *iterables) but the calls may
|
| 1268 |
+
be evaluated out-of-order.
|
| 1269 |
+
|
| 1270 |
+
Raises:
|
| 1271 |
+
TimeoutError: If the entire result iterator could not be generated
|
| 1272 |
+
before the given timeout.
|
| 1273 |
+
Exception: If fn(*args) raises for any values.
|
| 1274 |
+
"""
|
| 1275 |
+
timeout = kwargs.get("timeout", None)
|
| 1276 |
+
chunksize = kwargs.get("chunksize", 1)
|
| 1277 |
+
if chunksize < 1:
|
| 1278 |
+
raise ValueError("chunksize must be >= 1.")
|
| 1279 |
+
|
| 1280 |
+
results = super().map(
|
| 1281 |
+
partial(_process_chunk, fn),
|
| 1282 |
+
_get_chunks(chunksize, *iterables),
|
| 1283 |
+
timeout=timeout,
|
| 1284 |
+
)
|
| 1285 |
+
return _chain_from_iterable_of_lists(results)
|
| 1286 |
+
|
| 1287 |
+
def shutdown(self, wait=True, kill_workers=False):
|
| 1288 |
+
mp.util.debug(f"shutting down executor {self}")
|
| 1289 |
+
|
| 1290 |
+
self._flags.flag_as_shutting_down(kill_workers)
|
| 1291 |
+
executor_manager_thread = self._executor_manager_thread
|
| 1292 |
+
executor_manager_thread_wakeup = self._executor_manager_thread_wakeup
|
| 1293 |
+
|
| 1294 |
+
if executor_manager_thread_wakeup is not None:
|
| 1295 |
+
# Wake up queue management thread
|
| 1296 |
+
with self._shutdown_lock:
|
| 1297 |
+
self._executor_manager_thread_wakeup.wakeup()
|
| 1298 |
+
|
| 1299 |
+
if executor_manager_thread is not None and wait:
|
| 1300 |
+
# This locks avoids concurrent join if the interpreter
|
| 1301 |
+
# is shutting down.
|
| 1302 |
+
with _global_shutdown_lock:
|
| 1303 |
+
executor_manager_thread.join()
|
| 1304 |
+
_threads_wakeups.pop(executor_manager_thread, None)
|
| 1305 |
+
|
| 1306 |
+
# To reduce the risk of opening too many files, remove references to
|
| 1307 |
+
# objects that use file descriptors.
|
| 1308 |
+
self._executor_manager_thread = None
|
| 1309 |
+
self._executor_manager_thread_wakeup = None
|
| 1310 |
+
self._call_queue = None
|
| 1311 |
+
self._result_queue = None
|
| 1312 |
+
self._processes_management_lock = None
|
| 1313 |
+
|
| 1314 |
+
shutdown.__doc__ = Executor.shutdown.__doc__
|
evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Reusable ProcessPoolExecutor
|
| 3 |
+
#
|
| 4 |
+
# author: Thomas Moreau and Olivier Grisel
|
| 5 |
+
#
|
| 6 |
+
import time
|
| 7 |
+
import warnings
|
| 8 |
+
import threading
|
| 9 |
+
import multiprocessing as mp
|
| 10 |
+
|
| 11 |
+
from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
|
| 12 |
+
from .backend.context import cpu_count
|
| 13 |
+
from .backend import get_context
|
| 14 |
+
|
| 15 |
+
__all__ = ["get_reusable_executor"]
|
| 16 |
+
|
| 17 |
+
# Singleton executor and id management
|
| 18 |
+
_executor_lock = threading.RLock()
|
| 19 |
+
_next_executor_id = 0
|
| 20 |
+
_executor = None
|
| 21 |
+
_executor_kwargs = None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _get_next_executor_id():
|
| 25 |
+
"""Ensure that each successive executor instance has a unique, monotonic id.
|
| 26 |
+
|
| 27 |
+
The purpose of this monotonic id is to help debug and test automated
|
| 28 |
+
instance creation.
|
| 29 |
+
"""
|
| 30 |
+
global _next_executor_id
|
| 31 |
+
with _executor_lock:
|
| 32 |
+
executor_id = _next_executor_id
|
| 33 |
+
_next_executor_id += 1
|
| 34 |
+
return executor_id
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def get_reusable_executor(
|
| 38 |
+
max_workers=None,
|
| 39 |
+
context=None,
|
| 40 |
+
timeout=10,
|
| 41 |
+
kill_workers=False,
|
| 42 |
+
reuse="auto",
|
| 43 |
+
job_reducers=None,
|
| 44 |
+
result_reducers=None,
|
| 45 |
+
initializer=None,
|
| 46 |
+
initargs=(),
|
| 47 |
+
env=None,
|
| 48 |
+
):
|
| 49 |
+
"""Return the current ReusableExectutor instance.
|
| 50 |
+
|
| 51 |
+
Start a new instance if it has not been started already or if the previous
|
| 52 |
+
instance was left in a broken state.
|
| 53 |
+
|
| 54 |
+
If the previous instance does not have the requested number of workers, the
|
| 55 |
+
executor is dynamically resized to adjust the number of workers prior to
|
| 56 |
+
returning.
|
| 57 |
+
|
| 58 |
+
Reusing a singleton instance spares the overhead of starting new worker
|
| 59 |
+
processes and importing common python packages each time.
|
| 60 |
+
|
| 61 |
+
``max_workers`` controls the maximum number of tasks that can be running in
|
| 62 |
+
parallel in worker processes. By default this is set to the number of
|
| 63 |
+
CPUs on the host.
|
| 64 |
+
|
| 65 |
+
Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
|
| 66 |
+
so as to release system resources. New workers are respawn upon submission
|
| 67 |
+
of new tasks so that ``max_workers`` are available to accept the newly
|
| 68 |
+
submitted tasks. Setting ``timeout`` to around 100 times the time required
|
| 69 |
+
to spawn new processes and import packages in them (on the order of 100ms)
|
| 70 |
+
ensures that the overhead of spawning workers is negligible.
|
| 71 |
+
|
| 72 |
+
Setting ``kill_workers=True`` makes it possible to forcibly interrupt
|
| 73 |
+
previously spawned jobs to get a new instance of the reusable executor
|
| 74 |
+
with new constructor argument values.
|
| 75 |
+
|
| 76 |
+
The ``job_reducers`` and ``result_reducers`` are used to customize the
|
| 77 |
+
pickling of tasks and results send to the executor.
|
| 78 |
+
|
| 79 |
+
When provided, the ``initializer`` is run first in newly spawned
|
| 80 |
+
processes with argument ``initargs``.
|
| 81 |
+
|
| 82 |
+
The environment variable in the child process are a copy of the values in
|
| 83 |
+
the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
|
| 84 |
+
``VAL`` are string literals to overwrite the environment variable ``ENV``
|
| 85 |
+
in the child processes to value ``VAL``. The environment variables are set
|
| 86 |
+
in the children before any module is loaded. This only works with the
|
| 87 |
+
``loky`` context.
|
| 88 |
+
"""
|
| 89 |
+
_executor, _ = _ReusablePoolExecutor.get_reusable_executor(
|
| 90 |
+
max_workers=max_workers,
|
| 91 |
+
context=context,
|
| 92 |
+
timeout=timeout,
|
| 93 |
+
kill_workers=kill_workers,
|
| 94 |
+
reuse=reuse,
|
| 95 |
+
job_reducers=job_reducers,
|
| 96 |
+
result_reducers=result_reducers,
|
| 97 |
+
initializer=initializer,
|
| 98 |
+
initargs=initargs,
|
| 99 |
+
env=env,
|
| 100 |
+
)
|
| 101 |
+
return _executor
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class _ReusablePoolExecutor(ProcessPoolExecutor):
|
| 105 |
+
def __init__(
|
| 106 |
+
self,
|
| 107 |
+
submit_resize_lock,
|
| 108 |
+
max_workers=None,
|
| 109 |
+
context=None,
|
| 110 |
+
timeout=None,
|
| 111 |
+
executor_id=0,
|
| 112 |
+
job_reducers=None,
|
| 113 |
+
result_reducers=None,
|
| 114 |
+
initializer=None,
|
| 115 |
+
initargs=(),
|
| 116 |
+
env=None,
|
| 117 |
+
):
|
| 118 |
+
super().__init__(
|
| 119 |
+
max_workers=max_workers,
|
| 120 |
+
context=context,
|
| 121 |
+
timeout=timeout,
|
| 122 |
+
job_reducers=job_reducers,
|
| 123 |
+
result_reducers=result_reducers,
|
| 124 |
+
initializer=initializer,
|
| 125 |
+
initargs=initargs,
|
| 126 |
+
env=env,
|
| 127 |
+
)
|
| 128 |
+
self.executor_id = executor_id
|
| 129 |
+
self._submit_resize_lock = submit_resize_lock
|
| 130 |
+
|
| 131 |
+
@classmethod
|
| 132 |
+
def get_reusable_executor(
|
| 133 |
+
cls,
|
| 134 |
+
max_workers=None,
|
| 135 |
+
context=None,
|
| 136 |
+
timeout=10,
|
| 137 |
+
kill_workers=False,
|
| 138 |
+
reuse="auto",
|
| 139 |
+
job_reducers=None,
|
| 140 |
+
result_reducers=None,
|
| 141 |
+
initializer=None,
|
| 142 |
+
initargs=(),
|
| 143 |
+
env=None,
|
| 144 |
+
):
|
| 145 |
+
with _executor_lock:
|
| 146 |
+
global _executor, _executor_kwargs
|
| 147 |
+
executor = _executor
|
| 148 |
+
|
| 149 |
+
if max_workers is None:
|
| 150 |
+
if reuse is True and executor is not None:
|
| 151 |
+
max_workers = executor._max_workers
|
| 152 |
+
else:
|
| 153 |
+
max_workers = cpu_count()
|
| 154 |
+
elif max_workers <= 0:
|
| 155 |
+
raise ValueError(
|
| 156 |
+
f"max_workers must be greater than 0, got {max_workers}."
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
if isinstance(context, str):
|
| 160 |
+
context = get_context(context)
|
| 161 |
+
if context is not None and context.get_start_method() == "fork":
|
| 162 |
+
raise ValueError(
|
| 163 |
+
"Cannot use reusable executor with the 'fork' context"
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
kwargs = dict(
|
| 167 |
+
context=context,
|
| 168 |
+
timeout=timeout,
|
| 169 |
+
job_reducers=job_reducers,
|
| 170 |
+
result_reducers=result_reducers,
|
| 171 |
+
initializer=initializer,
|
| 172 |
+
initargs=initargs,
|
| 173 |
+
env=env,
|
| 174 |
+
)
|
| 175 |
+
if executor is None:
|
| 176 |
+
is_reused = False
|
| 177 |
+
mp.util.debug(
|
| 178 |
+
f"Create a executor with max_workers={max_workers}."
|
| 179 |
+
)
|
| 180 |
+
executor_id = _get_next_executor_id()
|
| 181 |
+
_executor_kwargs = kwargs
|
| 182 |
+
_executor = executor = cls(
|
| 183 |
+
_executor_lock,
|
| 184 |
+
max_workers=max_workers,
|
| 185 |
+
executor_id=executor_id,
|
| 186 |
+
**kwargs,
|
| 187 |
+
)
|
| 188 |
+
else:
|
| 189 |
+
if reuse == "auto":
|
| 190 |
+
reuse = kwargs == _executor_kwargs
|
| 191 |
+
if (
|
| 192 |
+
executor._flags.broken
|
| 193 |
+
or executor._flags.shutdown
|
| 194 |
+
or not reuse
|
| 195 |
+
):
|
| 196 |
+
if executor._flags.broken:
|
| 197 |
+
reason = "broken"
|
| 198 |
+
elif executor._flags.shutdown:
|
| 199 |
+
reason = "shutdown"
|
| 200 |
+
else:
|
| 201 |
+
reason = "arguments have changed"
|
| 202 |
+
mp.util.debug(
|
| 203 |
+
"Creating a new executor with max_workers="
|
| 204 |
+
f"{max_workers} as the previous instance cannot be "
|
| 205 |
+
f"reused ({reason})."
|
| 206 |
+
)
|
| 207 |
+
executor.shutdown(wait=True, kill_workers=kill_workers)
|
| 208 |
+
_executor = executor = _executor_kwargs = None
|
| 209 |
+
# Recursive call to build a new instance
|
| 210 |
+
return cls.get_reusable_executor(
|
| 211 |
+
max_workers=max_workers, **kwargs
|
| 212 |
+
)
|
| 213 |
+
else:
|
| 214 |
+
mp.util.debug(
|
| 215 |
+
"Reusing existing executor with "
|
| 216 |
+
f"max_workers={executor._max_workers}."
|
| 217 |
+
)
|
| 218 |
+
is_reused = True
|
| 219 |
+
executor._resize(max_workers)
|
| 220 |
+
|
| 221 |
+
return executor, is_reused
|
| 222 |
+
|
| 223 |
+
def submit(self, fn, *args, **kwargs):
|
| 224 |
+
with self._submit_resize_lock:
|
| 225 |
+
return super().submit(fn, *args, **kwargs)
|
| 226 |
+
|
| 227 |
+
def _resize(self, max_workers):
|
| 228 |
+
with self._submit_resize_lock:
|
| 229 |
+
if max_workers is None:
|
| 230 |
+
raise ValueError("Trying to resize with max_workers=None")
|
| 231 |
+
elif max_workers == self._max_workers:
|
| 232 |
+
return
|
| 233 |
+
|
| 234 |
+
if self._executor_manager_thread is None:
|
| 235 |
+
# If the executor_manager_thread has not been started
|
| 236 |
+
# then no processes have been spawned and we can just
|
| 237 |
+
# update _max_workers and return
|
| 238 |
+
self._max_workers = max_workers
|
| 239 |
+
return
|
| 240 |
+
|
| 241 |
+
self._wait_job_completion()
|
| 242 |
+
|
| 243 |
+
# Some process might have returned due to timeout so check how many
|
| 244 |
+
# children are still alive. Use the _process_management_lock to
|
| 245 |
+
# ensure that no process are spawned or timeout during the resize.
|
| 246 |
+
with self._processes_management_lock:
|
| 247 |
+
processes = list(self._processes.values())
|
| 248 |
+
nb_children_alive = sum(p.is_alive() for p in processes)
|
| 249 |
+
self._max_workers = max_workers
|
| 250 |
+
for _ in range(max_workers, nb_children_alive):
|
| 251 |
+
self._call_queue.put(None)
|
| 252 |
+
while (
|
| 253 |
+
len(self._processes) > max_workers and not self._flags.broken
|
| 254 |
+
):
|
| 255 |
+
time.sleep(1e-3)
|
| 256 |
+
|
| 257 |
+
self._adjust_process_count()
|
| 258 |
+
processes = list(self._processes.values())
|
| 259 |
+
while not all(p.is_alive() for p in processes):
|
| 260 |
+
time.sleep(1e-3)
|
| 261 |
+
|
| 262 |
+
def _wait_job_completion(self):
|
| 263 |
+
"""Wait for the cache to be empty before resizing the pool."""
|
| 264 |
+
# Issue a warning to the user about the bad effect of this usage.
|
| 265 |
+
if self._pending_work_items:
|
| 266 |
+
warnings.warn(
|
| 267 |
+
"Trying to resize an executor with running jobs: "
|
| 268 |
+
"waiting for jobs completion before resizing.",
|
| 269 |
+
UserWarning,
|
| 270 |
+
)
|
| 271 |
+
mp.util.debug(
|
| 272 |
+
f"Executor {self.executor_id} waiting for jobs completion "
|
| 273 |
+
"before resizing"
|
| 274 |
+
)
|
| 275 |
+
# Wait for the completion of the jobs
|
| 276 |
+
while self._pending_work_items:
|
| 277 |
+
time.sleep(1e-3)
|
| 278 |
+
|
| 279 |
+
def _setup_queues(self, job_reducers, result_reducers):
|
| 280 |
+
# As this executor can be resized, use a large queue size to avoid
|
| 281 |
+
# underestimating capacity and introducing overhead
|
| 282 |
+
queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS
|
| 283 |
+
super()._setup_queues(
|
| 284 |
+
job_reducers, result_reducers, queue_size=queue_size
|
| 285 |
+
)
|
evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a1f4e8cccfca94f25ae744d1f050b0734f663263ba38ed0642181404b348b17b
|
| 3 |
+
size 757
|
evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d9e215780f978ce693e48110ead23652e1c6de1c2189172232690198f7088788
|
| 3 |
+
size 792
|
evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a56c3fc6e0db3a4102aaed4a19fd4e154eecd956f30b6bf9179897844ed3c01e
|
| 3 |
+
size 790
|
evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:efb146d450c6d061d06affb56f17384e7f64cbab9b516fcc6c4d3f8869b3e707
|
| 3 |
+
size 712
|
evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e064c2eecfdc58d552844467da7bd56eca596098322bfd266a7e1312abdd5735
|
| 3 |
+
size 1068
|