ZTWHHH commited on
Commit
fafd216
·
verified ·
1 Parent(s): b551bd3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. mplug_owl2/lib/python3.10/site-packages/fastapi-0.115.6.dist-info/INSTALLER +1 -0
  3. mplug_owl2/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc +0 -0
  4. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__init__.py +44 -0
  5. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc +0 -0
  6. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc +0 -0
  7. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc +0 -0
  8. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc +0 -0
  9. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc +0 -0
  10. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc +0 -0
  11. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/_base.py +28 -0
  12. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py +14 -0
  13. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc +0 -0
  14. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc +0 -0
  15. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc +0 -0
  16. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc +0 -0
  17. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc +0 -0
  18. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc +0 -0
  19. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc +0 -0
  20. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc +0 -0
  21. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc +0 -0
  22. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc +0 -0
  23. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc +0 -0
  24. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc +0 -0
  25. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc +0 -0
  26. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc +0 -0
  27. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py +67 -0
  28. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py +18 -0
  29. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py +378 -0
  30. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py +43 -0
  31. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py +193 -0
  32. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py +173 -0
  33. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py +85 -0
  34. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py +236 -0
  35. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py +224 -0
  36. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py +378 -0
  37. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py +250 -0
  38. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py +409 -0
  39. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py +181 -0
  40. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py +102 -0
  41. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/initializers.py +80 -0
  42. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py +1314 -0
  43. mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py +285 -0
  44. mplug_owl2/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz +3 -0
  45. mplug_owl2/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz +3 -0
  46. mplug_owl2/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl +3 -0
  47. mplug_owl2/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy +3 -0
  48. mplug_owl2/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc +3 -0
  49. pllava/share/terminfo/a/aaa-40-rv +0 -0
  50. pllava/share/terminfo/a/aaa-ctxt +0 -0
.gitattributes CHANGED
@@ -703,3 +703,4 @@ mplug_owl2/lib/python3.10/site-packages/sympy/parsing/latex/_antlr/__pycache__/l
703
  mplug_owl2/lib/python3.10/site-packages/sympy/physics/control/__pycache__/lti.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
704
  mplug_owl2/lib/python3.10/site-packages/sympy/physics/continuum_mechanics/__pycache__/beam.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
705
  mplug_owl2/lib/python3.10/site-packages/sympy/matrices/__pycache__/matrixbase.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
703
  mplug_owl2/lib/python3.10/site-packages/sympy/physics/control/__pycache__/lti.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
704
  mplug_owl2/lib/python3.10/site-packages/sympy/physics/continuum_mechanics/__pycache__/beam.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
705
  mplug_owl2/lib/python3.10/site-packages/sympy/matrices/__pycache__/matrixbase.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
706
+ mplug_owl2/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
mplug_owl2/lib/python3.10/site-packages/fastapi-0.115.6.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
mplug_owl2/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.
2
+ It provides a robust and dynamic implementation os the
3
+ :class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which
4
+ hide the pool management under the hood.
5
+ """
6
+ from concurrent.futures import (
7
+ ALL_COMPLETED,
8
+ FIRST_COMPLETED,
9
+ FIRST_EXCEPTION,
10
+ CancelledError,
11
+ Executor,
12
+ TimeoutError,
13
+ as_completed,
14
+ wait,
15
+ )
16
+
17
+ from ._base import Future
18
+ from .backend.context import cpu_count
19
+ from .backend.reduction import set_loky_pickler
20
+ from .reusable_executor import get_reusable_executor
21
+ from .cloudpickle_wrapper import wrap_non_picklable_objects
22
+ from .process_executor import BrokenProcessPool, ProcessPoolExecutor
23
+
24
+
25
+ __all__ = [
26
+ "get_reusable_executor",
27
+ "cpu_count",
28
+ "wait",
29
+ "as_completed",
30
+ "Future",
31
+ "Executor",
32
+ "ProcessPoolExecutor",
33
+ "BrokenProcessPool",
34
+ "CancelledError",
35
+ "TimeoutError",
36
+ "FIRST_COMPLETED",
37
+ "FIRST_EXCEPTION",
38
+ "ALL_COMPLETED",
39
+ "wrap_non_picklable_objects",
40
+ "set_loky_pickler",
41
+ ]
42
+
43
+
44
+ __version__ = "3.4.1"
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc ADDED
Binary file (732 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc ADDED
Binary file (33 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc ADDED
Binary file (7.5 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/_base.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Modification of concurrent.futures.Future
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from concurrent/futures/_base.py (17/02/2017)
7
+ # * Do not use yield from
8
+ # * Use old super syntax
9
+ #
10
+ # Copyright 2009 Brian Quinlan. All Rights Reserved.
11
+ # Licensed to PSF under a Contributor Agreement.
12
+
13
+ from concurrent.futures import Future as _BaseFuture
14
+ from concurrent.futures._base import LOGGER
15
+
16
+
17
+ # To make loky._base.Future instances awaitable by concurrent.futures.wait,
18
+ # derive our custom Future class from _BaseFuture. _invoke_callback is the only
19
+ # modification made to this class in loky.
20
+ # TODO investigate why using `concurrent.futures.Future` directly does not
21
+ # always work in our test suite.
22
+ class Future(_BaseFuture):
23
+ def _invoke_callbacks(self):
24
+ for callback in self._done_callbacks:
25
+ try:
26
+ callback(self)
27
+ except BaseException:
28
+ LOGGER.exception(f"exception calling callback for {self!r}")
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from multiprocessing import synchronize
3
+
4
+ from .context import get_context
5
+
6
+
7
+ def _make_name():
8
+ return f"/loky-{os.getpid()}-{next(synchronize.SemLock._rand)}"
9
+
10
+
11
+ # monkey patch the name creation for multiprocessing
12
+ synchronize.SemLock._make_name = staticmethod(_make_name)
13
+
14
+ __all__ = ["get_context"]
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (530 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc ADDED
Binary file (487 Bytes). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc ADDED
Binary file (9.59 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc ADDED
Binary file (1.11 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc ADDED
Binary file (5.04 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc ADDED
Binary file (4.94 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc ADDED
Binary file (7.81 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc ADDED
Binary file (5.04 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.32 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Extra reducers for Unix based system and connections objects
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Add adapted reduction for LokyProcesses and socket/Connection
8
+ #
9
+ import os
10
+ import socket
11
+ import _socket
12
+ from multiprocessing.connection import Connection
13
+ from multiprocessing.context import get_spawning_popen
14
+
15
+ from .reduction import register
16
+
17
+ HAVE_SEND_HANDLE = (
18
+ hasattr(socket, "CMSG_LEN")
19
+ and hasattr(socket, "SCM_RIGHTS")
20
+ and hasattr(socket.socket, "sendmsg")
21
+ )
22
+
23
+
24
+ def _mk_inheritable(fd):
25
+ os.set_inheritable(fd, True)
26
+ return fd
27
+
28
+
29
+ def DupFd(fd):
30
+ """Return a wrapper for an fd."""
31
+ popen_obj = get_spawning_popen()
32
+ if popen_obj is not None:
33
+ return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
34
+ elif HAVE_SEND_HANDLE:
35
+ from multiprocessing import resource_sharer
36
+
37
+ return resource_sharer.DupFd(fd)
38
+ else:
39
+ raise TypeError(
40
+ "Cannot pickle connection object. This object can only be "
41
+ "passed when spawning a new process"
42
+ )
43
+
44
+
45
+ def _reduce_socket(s):
46
+ df = DupFd(s.fileno())
47
+ return _rebuild_socket, (df, s.family, s.type, s.proto)
48
+
49
+
50
+ def _rebuild_socket(df, family, type, proto):
51
+ fd = df.detach()
52
+ return socket.fromfd(fd, family, type, proto)
53
+
54
+
55
+ def rebuild_connection(df, readable, writable):
56
+ fd = df.detach()
57
+ return Connection(fd, readable, writable)
58
+
59
+
60
+ def reduce_connection(conn):
61
+ df = DupFd(conn.fileno())
62
+ return rebuild_connection, (df, conn.readable, conn.writable)
63
+
64
+
65
+ register(socket.socket, _reduce_socket)
66
+ register(_socket.socket, _reduce_socket)
67
+ register(Connection, reduce_connection)
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Extra reducers for Windows system and connections objects
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Add adapted reduction for LokyProcesses and socket/PipeConnection
8
+ #
9
+ import socket
10
+ from multiprocessing import connection
11
+ from multiprocessing.reduction import _reduce_socket
12
+
13
+ from .reduction import register
14
+
15
+ # register reduction for win32 communication objects
16
+ register(socket.socket, _reduce_socket)
17
+ register(connection.Connection, connection.reduce_connection)
18
+ register(connection.PipeConnection, connection.reduce_pipe_connection)
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Basic context management with LokyContext
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/context.py
7
+ # * Create a context ensuring loky uses only objects that are compatible
8
+ # * Add LokyContext to the list of context of multiprocessing so loky can be
9
+ # used with multiprocessing.set_start_method
10
+ # * Implement a CFS-aware amd physical-core aware cpu_count function.
11
+ #
12
+ import os
13
+ import sys
14
+ import math
15
+ import subprocess
16
+ import traceback
17
+ import warnings
18
+ import multiprocessing as mp
19
+ from multiprocessing import get_context as mp_get_context
20
+ from multiprocessing.context import BaseContext
21
+
22
+
23
+ from .process import LokyProcess, LokyInitMainProcess
24
+
25
+ # Apparently, on older Python versions, loky cannot work 61 workers on Windows
26
+ # but instead 60: ¯\_(ツ)_/¯
27
+ if sys.version_info >= (3, 8):
28
+ from concurrent.futures.process import _MAX_WINDOWS_WORKERS
29
+
30
+ if sys.version_info < (3, 10):
31
+ _MAX_WINDOWS_WORKERS = _MAX_WINDOWS_WORKERS - 1
32
+ else:
33
+ # compat for versions before 3.8 which do not define this.
34
+ _MAX_WINDOWS_WORKERS = 60
35
+
36
+ START_METHODS = ["loky", "loky_init_main", "spawn"]
37
+ if sys.platform != "win32":
38
+ START_METHODS += ["fork", "forkserver"]
39
+
40
+ _DEFAULT_START_METHOD = None
41
+
42
+ # Cache for the number of physical cores to avoid repeating subprocess calls.
43
+ # It should not change during the lifetime of the program.
44
+ physical_cores_cache = None
45
+
46
+
47
+ def get_context(method=None):
48
+ # Try to overload the default context
49
+ method = method or _DEFAULT_START_METHOD or "loky"
50
+ if method == "fork":
51
+ # If 'fork' is explicitly requested, warn user about potential issues.
52
+ warnings.warn(
53
+ "`fork` start method should not be used with "
54
+ "`loky` as it does not respect POSIX. Try using "
55
+ "`spawn` or `loky` instead.",
56
+ UserWarning,
57
+ )
58
+ try:
59
+ return mp_get_context(method)
60
+ except ValueError:
61
+ raise ValueError(
62
+ f"Unknown context '{method}'. Value should be in "
63
+ f"{START_METHODS}."
64
+ )
65
+
66
+
67
+ def set_start_method(method, force=False):
68
+ global _DEFAULT_START_METHOD
69
+ if _DEFAULT_START_METHOD is not None and not force:
70
+ raise RuntimeError("context has already been set")
71
+ assert method is None or method in START_METHODS, (
72
+ f"'{method}' is not a valid start_method. It should be in "
73
+ f"{START_METHODS}"
74
+ )
75
+
76
+ _DEFAULT_START_METHOD = method
77
+
78
+
79
+ def get_start_method():
80
+ return _DEFAULT_START_METHOD
81
+
82
+
83
+ def cpu_count(only_physical_cores=False):
84
+ """Return the number of CPUs the current process can use.
85
+
86
+ The returned number of CPUs accounts for:
87
+ * the number of CPUs in the system, as given by
88
+ ``multiprocessing.cpu_count``;
89
+ * the CPU affinity settings of the current process
90
+ (available on some Unix systems);
91
+ * Cgroup CPU bandwidth limit (available on Linux only, typically
92
+ set by docker and similar container orchestration systems);
93
+ * the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
94
+ and is given as the minimum of these constraints.
95
+
96
+ If ``only_physical_cores`` is True, return the number of physical cores
97
+ instead of the number of logical cores (hyperthreading / SMT). Note that
98
+ this option is not enforced if the number of usable cores is controlled in
99
+ any other way such as: process affinity, Cgroup restricted CPU bandwidth
100
+ or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
101
+ cores is not found, return the number of logical cores.
102
+
103
+ Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for
104
+ Python < 3.10), see:
105
+ https://bugs.python.org/issue26903.
106
+
107
+ It is also always larger or equal to 1.
108
+ """
109
+ # Note: os.cpu_count() is allowed to return None in its docstring
110
+ os_cpu_count = os.cpu_count() or 1
111
+ if sys.platform == "win32":
112
+ # On Windows, attempting to use more than 61 CPUs would result in a
113
+ # OS-level error. See https://bugs.python.org/issue26903. According to
114
+ # https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups
115
+ # it might be possible to go beyond with a lot of extra work but this
116
+ # does not look easy.
117
+ os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)
118
+
119
+ cpu_count_user = _cpu_count_user(os_cpu_count)
120
+ aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)
121
+
122
+ if not only_physical_cores:
123
+ return aggregate_cpu_count
124
+
125
+ if cpu_count_user < os_cpu_count:
126
+ # Respect user setting
127
+ return max(cpu_count_user, 1)
128
+
129
+ cpu_count_physical, exception = _count_physical_cores()
130
+ if cpu_count_physical != "not found":
131
+ return cpu_count_physical
132
+
133
+ # Fallback to default behavior
134
+ if exception is not None:
135
+ # warns only the first time
136
+ warnings.warn(
137
+ "Could not find the number of physical cores for the "
138
+ f"following reason:\n{exception}\n"
139
+ "Returning the number of logical cores instead. You can "
140
+ "silence this warning by setting LOKY_MAX_CPU_COUNT to "
141
+ "the number of cores you want to use."
142
+ )
143
+ traceback.print_tb(exception.__traceback__)
144
+
145
+ return aggregate_cpu_count
146
+
147
+
148
+ def _cpu_count_cgroup(os_cpu_count):
149
+ # Cgroup CPU bandwidth limit available in Linux since 2.6 kernel
150
+ cpu_max_fname = "/sys/fs/cgroup/cpu.max"
151
+ cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
152
+ cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
153
+ if os.path.exists(cpu_max_fname):
154
+ # cgroup v2
155
+ # https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
156
+ with open(cpu_max_fname) as fh:
157
+ cpu_quota_us, cpu_period_us = fh.read().strip().split()
158
+ elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
159
+ # cgroup v1
160
+ # https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management
161
+ with open(cfs_quota_fname) as fh:
162
+ cpu_quota_us = fh.read().strip()
163
+ with open(cfs_period_fname) as fh:
164
+ cpu_period_us = fh.read().strip()
165
+ else:
166
+ # No Cgroup CPU bandwidth limit (e.g. non-Linux platform)
167
+ cpu_quota_us = "max"
168
+ cpu_period_us = 100_000 # unused, for consistency with default values
169
+
170
+ if cpu_quota_us == "max":
171
+ # No active Cgroup quota on a Cgroup-capable platform
172
+ return os_cpu_count
173
+ else:
174
+ cpu_quota_us = int(cpu_quota_us)
175
+ cpu_period_us = int(cpu_period_us)
176
+ if cpu_quota_us > 0 and cpu_period_us > 0:
177
+ return math.ceil(cpu_quota_us / cpu_period_us)
178
+ else: # pragma: no cover
179
+ # Setting a negative cpu_quota_us value is a valid way to disable
180
+ # cgroup CPU bandwith limits
181
+ return os_cpu_count
182
+
183
+
184
+ def _cpu_count_affinity(os_cpu_count):
185
+ # Number of available CPUs given affinity settings
186
+ if hasattr(os, "sched_getaffinity"):
187
+ try:
188
+ return len(os.sched_getaffinity(0))
189
+ except NotImplementedError:
190
+ pass
191
+
192
+ # On PyPy and possibly other platforms, os.sched_getaffinity does not exist
193
+ # or raises NotImplementedError, let's try with the psutil if installed.
194
+ try:
195
+ import psutil
196
+
197
+ p = psutil.Process()
198
+ if hasattr(p, "cpu_affinity"):
199
+ return len(p.cpu_affinity())
200
+
201
+ except ImportError: # pragma: no cover
202
+ if (
203
+ sys.platform == "linux"
204
+ and os.environ.get("LOKY_MAX_CPU_COUNT") is None
205
+ ):
206
+ # PyPy does not implement os.sched_getaffinity on Linux which
207
+ # can cause severe oversubscription problems. Better warn the
208
+ # user in this particularly pathological case which can wreck
209
+ # havoc, typically on CI workers.
210
+ warnings.warn(
211
+ "Failed to inspect CPU affinity constraints on this system. "
212
+ "Please install psutil or explictly set LOKY_MAX_CPU_COUNT."
213
+ )
214
+
215
+ # This can happen for platforms that do not implement any kind of CPU
216
+ # infinity such as macOS-based platforms.
217
+ return os_cpu_count
218
+
219
+
220
+ def _cpu_count_user(os_cpu_count):
221
+ """Number of user defined available CPUs"""
222
+ cpu_count_affinity = _cpu_count_affinity(os_cpu_count)
223
+
224
+ cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)
225
+
226
+ # User defined soft-limit passed as a loky specific environment variable.
227
+ cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count))
228
+
229
+ return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)
230
+
231
+
232
+ def _count_physical_cores():
233
+ """Return a tuple (number of physical cores, exception)
234
+
235
+ If the number of physical cores is found, exception is set to None.
236
+ If it has not been found, return ("not found", exception).
237
+
238
+ The number of physical cores is cached to avoid repeating subprocess calls.
239
+ """
240
+ exception = None
241
+
242
+ # First check if the value is cached
243
+ global physical_cores_cache
244
+ if physical_cores_cache is not None:
245
+ return physical_cores_cache, exception
246
+
247
+ # Not cached yet, find it
248
+ try:
249
+ if sys.platform == "linux":
250
+ cpu_info = subprocess.run(
251
+ "lscpu --parse=core".split(), capture_output=True, text=True
252
+ )
253
+ cpu_info = cpu_info.stdout.splitlines()
254
+ cpu_info = {line for line in cpu_info if not line.startswith("#")}
255
+ cpu_count_physical = len(cpu_info)
256
+ elif sys.platform == "win32":
257
+ cpu_info = subprocess.run(
258
+ "wmic CPU Get NumberOfCores /Format:csv".split(),
259
+ capture_output=True,
260
+ text=True,
261
+ )
262
+ cpu_info = cpu_info.stdout.splitlines()
263
+ cpu_info = [
264
+ l.split(",")[1]
265
+ for l in cpu_info
266
+ if (l and l != "Node,NumberOfCores")
267
+ ]
268
+ cpu_count_physical = sum(map(int, cpu_info))
269
+ elif sys.platform == "darwin":
270
+ cpu_info = subprocess.run(
271
+ "sysctl -n hw.physicalcpu".split(),
272
+ capture_output=True,
273
+ text=True,
274
+ )
275
+ cpu_info = cpu_info.stdout
276
+ cpu_count_physical = int(cpu_info)
277
+ else:
278
+ raise NotImplementedError(f"unsupported platform: {sys.platform}")
279
+
280
+ # if cpu_count_physical < 1, we did not find a valid value
281
+ if cpu_count_physical < 1:
282
+ raise ValueError(f"found {cpu_count_physical} physical cores < 1")
283
+
284
+ except Exception as e:
285
+ exception = e
286
+ cpu_count_physical = "not found"
287
+
288
+ # Put the result in cache
289
+ physical_cores_cache = cpu_count_physical
290
+
291
+ return cpu_count_physical, exception
292
+
293
+
294
+ class LokyContext(BaseContext):
295
+ """Context relying on the LokyProcess."""
296
+
297
+ _name = "loky"
298
+ Process = LokyProcess
299
+ cpu_count = staticmethod(cpu_count)
300
+
301
+ def Queue(self, maxsize=0, reducers=None):
302
+ """Returns a queue object"""
303
+ from .queues import Queue
304
+
305
+ return Queue(maxsize, reducers=reducers, ctx=self.get_context())
306
+
307
+ def SimpleQueue(self, reducers=None):
308
+ """Returns a queue object"""
309
+ from .queues import SimpleQueue
310
+
311
+ return SimpleQueue(reducers=reducers, ctx=self.get_context())
312
+
313
+ if sys.platform != "win32":
314
+ """For Unix platform, use our custom implementation of synchronize
315
+ ensuring that we use the loky.backend.resource_tracker to clean-up
316
+ the semaphores in case of a worker crash.
317
+ """
318
+
319
+ def Semaphore(self, value=1):
320
+ """Returns a semaphore object"""
321
+ from .synchronize import Semaphore
322
+
323
+ return Semaphore(value=value)
324
+
325
+ def BoundedSemaphore(self, value):
326
+ """Returns a bounded semaphore object"""
327
+ from .synchronize import BoundedSemaphore
328
+
329
+ return BoundedSemaphore(value)
330
+
331
+ def Lock(self):
332
+ """Returns a lock object"""
333
+ from .synchronize import Lock
334
+
335
+ return Lock()
336
+
337
+ def RLock(self):
338
+ """Returns a recurrent lock object"""
339
+ from .synchronize import RLock
340
+
341
+ return RLock()
342
+
343
+ def Condition(self, lock=None):
344
+ """Returns a condition object"""
345
+ from .synchronize import Condition
346
+
347
+ return Condition(lock)
348
+
349
+ def Event(self):
350
+ """Returns an event object"""
351
+ from .synchronize import Event
352
+
353
+ return Event()
354
+
355
+
356
+ class LokyInitMainContext(LokyContext):
357
+ """Extra context with LokyProcess, which does load the main module
358
+
359
+ This context is used for compatibility in the case ``cloudpickle`` is not
360
+ present on the running system. This permits to load functions defined in
361
+ the ``main`` module, using proper safeguards. The declaration of the
362
+ ``executor`` should be protected by ``if __name__ == "__main__":`` and the
363
+ functions and variable used from main should be out of this block.
364
+
365
+ This mimics the default behavior of multiprocessing under Windows and the
366
+ behavior of the ``spawn`` start method on a posix system.
367
+ For more details, see the end of the following section of python doc
368
+ https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
369
+ """
370
+
371
+ _name = "loky_init_main"
372
+ Process = LokyInitMainProcess
373
+
374
+
375
+ # Register loky context so it works with multiprocessing.get_context
376
+ ctx_loky = LokyContext()
377
+ mp.context._concrete_contexts["loky"] = ctx_loky
378
+ mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext()
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Launch a subprocess using forkexec and make sure only the needed fd are
3
+ # shared in the two process.
4
+ #
5
+ # author: Thomas Moreau and Olivier Grisel
6
+ #
7
+ import os
8
+ import sys
9
+
10
+
11
+ def close_fds(keep_fds): # pragma: no cover
12
+ """Close all the file descriptors except those in keep_fds."""
13
+
14
+ # Make sure to keep stdout and stderr open for logging purpose
15
+ keep_fds = {*keep_fds, 1, 2}
16
+
17
+ # We try to retrieve all the open fds
18
+ try:
19
+ open_fds = {int(fd) for fd in os.listdir("/proc/self/fd")}
20
+ except FileNotFoundError:
21
+ import resource
22
+
23
+ max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
24
+ open_fds = {*range(max_nfds)}
25
+
26
+ for i in open_fds - keep_fds:
27
+ try:
28
+ os.close(i)
29
+ except OSError:
30
+ pass
31
+
32
+
33
+ def fork_exec(cmd, keep_fds, env=None):
34
+ # copy the environment variables to set in the child process
35
+ env = env or {}
36
+ child_env = {**os.environ, **env}
37
+
38
+ pid = os.fork()
39
+ if pid == 0: # pragma: no cover
40
+ close_fds(keep_fds)
41
+ os.execve(sys.executable, cmd, child_env)
42
+ else:
43
+ return pid
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Popen for LokyProcess.
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ import os
7
+ import sys
8
+ import signal
9
+ import pickle
10
+ from io import BytesIO
11
+ from multiprocessing import util, process
12
+ from multiprocessing.connection import wait
13
+ from multiprocessing.context import set_spawning_popen
14
+
15
+ from . import reduction, resource_tracker, spawn
16
+
17
+
18
+ __all__ = ["Popen"]
19
+
20
+
21
+ #
22
+ # Wrapper for an fd used while launching a process
23
+ #
24
+
25
+
26
+ class _DupFd:
27
+ def __init__(self, fd):
28
+ self.fd = reduction._mk_inheritable(fd)
29
+
30
+ def detach(self):
31
+ return self.fd
32
+
33
+
34
+ #
35
+ # Start child process using subprocess.Popen
36
+ #
37
+
38
+
39
+ class Popen:
40
+ method = "loky"
41
+ DupFd = _DupFd
42
+
43
+ def __init__(self, process_obj):
44
+ sys.stdout.flush()
45
+ sys.stderr.flush()
46
+ self.returncode = None
47
+ self._fds = []
48
+ self._launch(process_obj)
49
+
50
+ def duplicate_for_child(self, fd):
51
+ self._fds.append(fd)
52
+ return reduction._mk_inheritable(fd)
53
+
54
+ def poll(self, flag=os.WNOHANG):
55
+ if self.returncode is None:
56
+ while True:
57
+ try:
58
+ pid, sts = os.waitpid(self.pid, flag)
59
+ except OSError:
60
+ # Child process not yet created. See #1731717
61
+ # e.errno == errno.ECHILD == 10
62
+ return None
63
+ else:
64
+ break
65
+ if pid == self.pid:
66
+ if os.WIFSIGNALED(sts):
67
+ self.returncode = -os.WTERMSIG(sts)
68
+ else:
69
+ assert os.WIFEXITED(sts)
70
+ self.returncode = os.WEXITSTATUS(sts)
71
+ return self.returncode
72
+
73
+ def wait(self, timeout=None):
74
+ if self.returncode is None:
75
+ if timeout is not None:
76
+ if not wait([self.sentinel], timeout):
77
+ return None
78
+ # This shouldn't block if wait() returned successfully.
79
+ return self.poll(os.WNOHANG if timeout == 0.0 else 0)
80
+ return self.returncode
81
+
82
+ def terminate(self):
83
+ if self.returncode is None:
84
+ try:
85
+ os.kill(self.pid, signal.SIGTERM)
86
+ except ProcessLookupError:
87
+ pass
88
+ except OSError:
89
+ if self.wait(timeout=0.1) is None:
90
+ raise
91
+
92
+ def _launch(self, process_obj):
93
+
94
+ tracker_fd = resource_tracker._resource_tracker.getfd()
95
+
96
+ fp = BytesIO()
97
+ set_spawning_popen(self)
98
+ try:
99
+ prep_data = spawn.get_preparation_data(
100
+ process_obj._name,
101
+ getattr(process_obj, "init_main_module", True),
102
+ )
103
+ reduction.dump(prep_data, fp)
104
+ reduction.dump(process_obj, fp)
105
+
106
+ finally:
107
+ set_spawning_popen(None)
108
+
109
+ try:
110
+ parent_r, child_w = os.pipe()
111
+ child_r, parent_w = os.pipe()
112
+ # for fd in self._fds:
113
+ # _mk_inheritable(fd)
114
+
115
+ cmd_python = [sys.executable]
116
+ cmd_python += ["-m", self.__module__]
117
+ cmd_python += ["--process-name", str(process_obj.name)]
118
+ cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))]
119
+ reduction._mk_inheritable(child_w)
120
+ reduction._mk_inheritable(tracker_fd)
121
+ self._fds += [child_r, child_w, tracker_fd]
122
+ if sys.version_info >= (3, 8) and os.name == "posix":
123
+ mp_tracker_fd = prep_data["mp_tracker_args"]["fd"]
124
+ self.duplicate_for_child(mp_tracker_fd)
125
+
126
+ from .fork_exec import fork_exec
127
+
128
+ pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
129
+ util.debug(
130
+ f"launched python with pid {pid} and cmd:\n{cmd_python}"
131
+ )
132
+ self.sentinel = parent_r
133
+
134
+ method = "getbuffer"
135
+ if not hasattr(fp, method):
136
+ method = "getvalue"
137
+ with os.fdopen(parent_w, "wb") as f:
138
+ f.write(getattr(fp, method)())
139
+ self.pid = pid
140
+ finally:
141
+ if parent_r is not None:
142
+ util.Finalize(self, os.close, (parent_r,))
143
+ for fd in (child_r, child_w):
144
+ if fd is not None:
145
+ os.close(fd)
146
+
147
+ @staticmethod
148
+ def thread_is_spawning():
149
+ return True
150
+
151
+
152
+ if __name__ == "__main__":
153
+ import argparse
154
+
155
+ parser = argparse.ArgumentParser("Command line parser")
156
+ parser.add_argument(
157
+ "--pipe", type=int, required=True, help="File handle for the pipe"
158
+ )
159
+ parser.add_argument(
160
+ "--process-name",
161
+ type=str,
162
+ default=None,
163
+ help="Identifier for debugging purpose",
164
+ )
165
+
166
+ args = parser.parse_args()
167
+
168
+ info = {}
169
+ exitcode = 1
170
+ try:
171
+ with os.fdopen(args.pipe, "rb") as from_parent:
172
+ process.current_process()._inheriting = True
173
+ try:
174
+ prep_data = pickle.load(from_parent)
175
+ spawn.prepare(prep_data)
176
+ process_obj = pickle.load(from_parent)
177
+ finally:
178
+ del process.current_process()._inheriting
179
+
180
+ exitcode = process_obj._bootstrap()
181
+ except Exception:
182
+ print("\n\n" + "-" * 80)
183
+ print(f"{args.process_name} failed with traceback: ")
184
+ print("-" * 80)
185
+ import traceback
186
+
187
+ print(traceback.format_exc())
188
+ print("\n" + "-" * 80)
189
+ finally:
190
+ if from_parent is not None:
191
+ from_parent.close()
192
+
193
+ sys.exit(exitcode)
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import msvcrt
4
+ import _winapi
5
+ from pickle import load
6
+ from multiprocessing import process, util
7
+ from multiprocessing.context import set_spawning_popen
8
+ from multiprocessing.popen_spawn_win32 import Popen as _Popen
9
+
10
+ from . import reduction, spawn
11
+
12
+
13
+ __all__ = ["Popen"]
14
+
15
+ #
16
+ #
17
+ #
18
+
19
+
20
+ def _path_eq(p1, p2):
21
+ return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
22
+
23
+
24
+ WINENV = hasattr(sys, "_base_executable") and not _path_eq(
25
+ sys.executable, sys._base_executable
26
+ )
27
+
28
+
29
+ def _close_handles(*handles):
30
+ for handle in handles:
31
+ _winapi.CloseHandle(handle)
32
+
33
+
34
+ #
35
+ # We define a Popen class similar to the one from subprocess, but
36
+ # whose constructor takes a process object as its argument.
37
+ #
38
+
39
+
40
+ class Popen(_Popen):
41
+ """
42
+ Start a subprocess to run the code of a process object.
43
+
44
+ We differ from cpython implementation with the way we handle environment
45
+ variables, in order to be able to modify then in the child processes before
46
+ importing any library, in order to control the number of threads in C-level
47
+ threadpools.
48
+
49
+ We also use the loky preparation data, in particular to handle main_module
50
+ inits and the loky resource tracker.
51
+ """
52
+
53
+ method = "loky"
54
+
55
+ def __init__(self, process_obj):
56
+ prep_data = spawn.get_preparation_data(
57
+ process_obj._name, getattr(process_obj, "init_main_module", True)
58
+ )
59
+
60
+ # read end of pipe will be duplicated by the child process
61
+ # -- see spawn_main() in spawn.py.
62
+ #
63
+ # bpo-33929: Previously, the read end of pipe was "stolen" by the child
64
+ # process, but it leaked a handle if the child process had been
65
+ # terminated before it could steal the handle from the parent process.
66
+ rhandle, whandle = _winapi.CreatePipe(None, 0)
67
+ wfd = msvcrt.open_osfhandle(whandle, 0)
68
+ cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle)
69
+
70
+ python_exe = spawn.get_executable()
71
+
72
+ # copy the environment variables to set in the child process
73
+ child_env = {**os.environ, **process_obj.env}
74
+
75
+ # bpo-35797: When running in a venv, we bypass the redirect
76
+ # executor and launch our base Python.
77
+ if WINENV and _path_eq(python_exe, sys.executable):
78
+ cmd[0] = python_exe = sys._base_executable
79
+ child_env["__PYVENV_LAUNCHER__"] = sys.executable
80
+
81
+ cmd = " ".join(f'"{x}"' for x in cmd)
82
+
83
+ with open(wfd, "wb") as to_child:
84
+ # start process
85
+ try:
86
+ hp, ht, pid, _ = _winapi.CreateProcess(
87
+ python_exe,
88
+ cmd,
89
+ None,
90
+ None,
91
+ False,
92
+ 0,
93
+ child_env,
94
+ None,
95
+ None,
96
+ )
97
+ _winapi.CloseHandle(ht)
98
+ except BaseException:
99
+ _winapi.CloseHandle(rhandle)
100
+ raise
101
+
102
+ # set attributes of self
103
+ self.pid = pid
104
+ self.returncode = None
105
+ self._handle = hp
106
+ self.sentinel = int(hp)
107
+ self.finalizer = util.Finalize(
108
+ self, _close_handles, (self.sentinel, int(rhandle))
109
+ )
110
+
111
+ # send information to child
112
+ set_spawning_popen(self)
113
+ try:
114
+ reduction.dump(prep_data, to_child)
115
+ reduction.dump(process_obj, to_child)
116
+ finally:
117
+ set_spawning_popen(None)
118
+
119
+
120
+ def get_command_line(pipe_handle, parent_pid, **kwds):
121
+ """Returns prefix of command line used for spawning a child process."""
122
+ if getattr(sys, "frozen", False):
123
+ return [sys.executable, "--multiprocessing-fork", pipe_handle]
124
+ else:
125
+ prog = (
126
+ "from joblib.externals.loky.backend.popen_loky_win32 import main; "
127
+ f"main(pipe_handle={pipe_handle}, parent_pid={parent_pid})"
128
+ )
129
+ opts = util._args_from_interpreter_flags()
130
+ return [
131
+ spawn.get_executable(),
132
+ *opts,
133
+ "-c",
134
+ prog,
135
+ "--multiprocessing-fork",
136
+ ]
137
+
138
+
139
+ def is_forking(argv):
140
+ """Return whether commandline indicates we are forking."""
141
+ if len(argv) >= 2 and argv[1] == "--multiprocessing-fork":
142
+ return True
143
+ else:
144
+ return False
145
+
146
+
147
+ def main(pipe_handle, parent_pid=None):
148
+ """Run code specified by data received over pipe."""
149
+ assert is_forking(sys.argv), "Not forking"
150
+
151
+ if parent_pid is not None:
152
+ source_process = _winapi.OpenProcess(
153
+ _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid
154
+ )
155
+ else:
156
+ source_process = None
157
+ new_handle = reduction.duplicate(
158
+ pipe_handle, source_process=source_process
159
+ )
160
+ fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
161
+ parent_sentinel = source_process
162
+
163
+ with os.fdopen(fd, "rb", closefd=True) as from_parent:
164
+ process.current_process()._inheriting = True
165
+ try:
166
+ preparation_data = load(from_parent)
167
+ spawn.prepare(preparation_data, parent_sentinel)
168
+ self = load(from_parent)
169
+ finally:
170
+ del process.current_process()._inheriting
171
+
172
+ exitcode = self._bootstrap(parent_sentinel)
173
+ sys.exit(exitcode)
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # LokyProcess implementation
3
+ #
4
+ # authors: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # based on multiprocessing/process.py (17/02/2017)
7
+ #
8
+ import sys
9
+ from multiprocessing.context import assert_spawning
10
+ from multiprocessing.process import BaseProcess
11
+
12
+
13
+ class LokyProcess(BaseProcess):
14
+ _start_method = "loky"
15
+
16
+ def __init__(
17
+ self,
18
+ group=None,
19
+ target=None,
20
+ name=None,
21
+ args=(),
22
+ kwargs={},
23
+ daemon=None,
24
+ init_main_module=False,
25
+ env=None,
26
+ ):
27
+ super().__init__(
28
+ group=group,
29
+ target=target,
30
+ name=name,
31
+ args=args,
32
+ kwargs=kwargs,
33
+ daemon=daemon,
34
+ )
35
+ self.env = {} if env is None else env
36
+ self.authkey = self.authkey
37
+ self.init_main_module = init_main_module
38
+
39
+ @staticmethod
40
+ def _Popen(process_obj):
41
+ if sys.platform == "win32":
42
+ from .popen_loky_win32 import Popen
43
+ else:
44
+ from .popen_loky_posix import Popen
45
+ return Popen(process_obj)
46
+
47
+
48
+ class LokyInitMainProcess(LokyProcess):
49
+ _start_method = "loky_init_main"
50
+
51
+ def __init__(
52
+ self,
53
+ group=None,
54
+ target=None,
55
+ name=None,
56
+ args=(),
57
+ kwargs={},
58
+ daemon=None,
59
+ ):
60
+ super().__init__(
61
+ group=group,
62
+ target=target,
63
+ name=name,
64
+ args=args,
65
+ kwargs=kwargs,
66
+ daemon=daemon,
67
+ init_main_module=True,
68
+ )
69
+
70
+
71
+ #
72
+ # We subclass bytes to avoid accidental transmission of auth keys over network
73
+ #
74
+
75
+
76
+ class AuthenticationKey(bytes):
77
+ def __reduce__(self):
78
+ try:
79
+ assert_spawning(self)
80
+ except RuntimeError:
81
+ raise TypeError(
82
+ "Pickling an AuthenticationKey object is "
83
+ "disallowed for security reasons"
84
+ )
85
+ return AuthenticationKey, (bytes(self),)
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Queue and SimpleQueue implementation for loky
3
+ #
4
+ # authors: Thomas Moreau, Olivier Grisel
5
+ #
6
+ # based on multiprocessing/queues.py (16/02/2017)
7
+ # * Add some custom reducers for the Queues/SimpleQueue to tweak the
8
+ # pickling process. (overload Queue._feed/SimpleQueue.put)
9
+ #
10
+ import os
11
+ import sys
12
+ import errno
13
+ import weakref
14
+ import threading
15
+ from multiprocessing import util
16
+ from multiprocessing.queues import (
17
+ Full,
18
+ Queue as mp_Queue,
19
+ SimpleQueue as mp_SimpleQueue,
20
+ _sentinel,
21
+ )
22
+ from multiprocessing.context import assert_spawning
23
+
24
+ from .reduction import dumps
25
+
26
+
27
+ __all__ = ["Queue", "SimpleQueue", "Full"]
28
+
29
+
30
+ class Queue(mp_Queue):
31
+ def __init__(self, maxsize=0, reducers=None, ctx=None):
32
+ super().__init__(maxsize=maxsize, ctx=ctx)
33
+ self._reducers = reducers
34
+
35
+ # Use custom queue set/get state to be able to reduce the custom reducers
36
+ def __getstate__(self):
37
+ assert_spawning(self)
38
+ return (
39
+ self._ignore_epipe,
40
+ self._maxsize,
41
+ self._reader,
42
+ self._writer,
43
+ self._reducers,
44
+ self._rlock,
45
+ self._wlock,
46
+ self._sem,
47
+ self._opid,
48
+ )
49
+
50
+ def __setstate__(self, state):
51
+ (
52
+ self._ignore_epipe,
53
+ self._maxsize,
54
+ self._reader,
55
+ self._writer,
56
+ self._reducers,
57
+ self._rlock,
58
+ self._wlock,
59
+ self._sem,
60
+ self._opid,
61
+ ) = state
62
+ if sys.version_info >= (3, 9):
63
+ self._reset()
64
+ else:
65
+ self._after_fork()
66
+
67
+ # Overload _start_thread to correctly call our custom _feed
68
+ def _start_thread(self):
69
+ util.debug("Queue._start_thread()")
70
+
71
+ # Start thread which transfers data from buffer to pipe
72
+ self._buffer.clear()
73
+ self._thread = threading.Thread(
74
+ target=Queue._feed,
75
+ args=(
76
+ self._buffer,
77
+ self._notempty,
78
+ self._send_bytes,
79
+ self._wlock,
80
+ self._writer.close,
81
+ self._reducers,
82
+ self._ignore_epipe,
83
+ self._on_queue_feeder_error,
84
+ self._sem,
85
+ ),
86
+ name="QueueFeederThread",
87
+ )
88
+ self._thread.daemon = True
89
+
90
+ util.debug("doing self._thread.start()")
91
+ self._thread.start()
92
+ util.debug("... done self._thread.start()")
93
+
94
+ # On process exit we will wait for data to be flushed to pipe.
95
+ #
96
+ # However, if this process created the queue then all
97
+ # processes which use the queue will be descendants of this
98
+ # process. Therefore waiting for the queue to be flushed
99
+ # is pointless once all the child processes have been joined.
100
+ created_by_this_process = self._opid == os.getpid()
101
+ if not self._joincancelled and not created_by_this_process:
102
+ self._jointhread = util.Finalize(
103
+ self._thread,
104
+ Queue._finalize_join,
105
+ [weakref.ref(self._thread)],
106
+ exitpriority=-5,
107
+ )
108
+
109
+ # Send sentinel to the thread queue object when garbage collected
110
+ self._close = util.Finalize(
111
+ self,
112
+ Queue._finalize_close,
113
+ [self._buffer, self._notempty],
114
+ exitpriority=10,
115
+ )
116
+
117
+ # Overload the _feed methods to use our custom pickling strategy.
118
+ @staticmethod
119
+ def _feed(
120
+ buffer,
121
+ notempty,
122
+ send_bytes,
123
+ writelock,
124
+ close,
125
+ reducers,
126
+ ignore_epipe,
127
+ onerror,
128
+ queue_sem,
129
+ ):
130
+ util.debug("starting thread to feed data to pipe")
131
+ nacquire = notempty.acquire
132
+ nrelease = notempty.release
133
+ nwait = notempty.wait
134
+ bpopleft = buffer.popleft
135
+ sentinel = _sentinel
136
+ if sys.platform != "win32":
137
+ wacquire = writelock.acquire
138
+ wrelease = writelock.release
139
+ else:
140
+ wacquire = None
141
+
142
+ while True:
143
+ try:
144
+ nacquire()
145
+ try:
146
+ if not buffer:
147
+ nwait()
148
+ finally:
149
+ nrelease()
150
+ try:
151
+ while True:
152
+ obj = bpopleft()
153
+ if obj is sentinel:
154
+ util.debug("feeder thread got sentinel -- exiting")
155
+ close()
156
+ return
157
+
158
+ # serialize the data before acquiring the lock
159
+ obj_ = dumps(obj, reducers=reducers)
160
+ if wacquire is None:
161
+ send_bytes(obj_)
162
+ else:
163
+ wacquire()
164
+ try:
165
+ send_bytes(obj_)
166
+ finally:
167
+ wrelease()
168
+ # Remove references early to avoid leaking memory
169
+ del obj, obj_
170
+ except IndexError:
171
+ pass
172
+ except BaseException as e:
173
+ if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE:
174
+ return
175
+ # Since this runs in a daemon thread the resources it uses
176
+ # may be become unusable while the process is cleaning up.
177
+ # We ignore errors which happen after the process has
178
+ # started to cleanup.
179
+ if util.is_exiting():
180
+ util.info(f"error in queue thread: {e}")
181
+ return
182
+ else:
183
+ queue_sem.release()
184
+ onerror(e, obj)
185
+
186
+ def _on_queue_feeder_error(self, e, obj):
187
+ """
188
+ Private API hook called when feeding data in the background thread
189
+ raises an exception. For overriding by concurrent.futures.
190
+ """
191
+ import traceback
192
+
193
+ traceback.print_exc()
194
+
195
+
196
+ class SimpleQueue(mp_SimpleQueue):
197
+ def __init__(self, reducers=None, ctx=None):
198
+ super().__init__(ctx=ctx)
199
+
200
+ # Add possiblity to use custom reducers
201
+ self._reducers = reducers
202
+
203
+ def close(self):
204
+ self._reader.close()
205
+ self._writer.close()
206
+
207
+ # Use custom queue set/get state to be able to reduce the custom reducers
208
+ def __getstate__(self):
209
+ assert_spawning(self)
210
+ return (
211
+ self._reader,
212
+ self._writer,
213
+ self._reducers,
214
+ self._rlock,
215
+ self._wlock,
216
+ )
217
+
218
+ def __setstate__(self, state):
219
+ (
220
+ self._reader,
221
+ self._writer,
222
+ self._reducers,
223
+ self._rlock,
224
+ self._wlock,
225
+ ) = state
226
+
227
+ # Overload put to use our customizable reducer
228
+ def put(self, obj):
229
+ # serialize the data before acquiring the lock
230
+ obj = dumps(obj, reducers=self._reducers)
231
+ if self._wlock is None:
232
+ # writes to a message oriented win32 pipe are atomic
233
+ self._writer.send_bytes(obj)
234
+ else:
235
+ with self._wlock:
236
+ self._writer.send_bytes(obj)
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Customizable Pickler with some basic reducers
3
+ #
4
+ # author: Thomas Moreau
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Replace the ForkingPickler with a similar _LokyPickler,
8
+ # * Add CustomizableLokyPickler to allow customizing pickling process
9
+ # on the fly.
10
+ #
11
+ import copyreg
12
+ import io
13
+ import functools
14
+ import types
15
+ import sys
16
+ import os
17
+
18
+ from multiprocessing import util
19
+ from pickle import loads, HIGHEST_PROTOCOL
20
+
21
+ ###############################################################################
22
+ # Enable custom pickling in Loky.
23
+
24
+ _dispatch_table = {}
25
+
26
+
27
+ def register(type_, reduce_function):
28
+ _dispatch_table[type_] = reduce_function
29
+
30
+
31
+ ###############################################################################
32
+ # Registers extra pickling routines to improve picklization for loky
33
+
34
+
35
+ # make methods picklable
36
+ def _reduce_method(m):
37
+ if m.__self__ is None:
38
+ return getattr, (m.__class__, m.__func__.__name__)
39
+ else:
40
+ return getattr, (m.__self__, m.__func__.__name__)
41
+
42
+
43
+ class _C:
44
+ def f(self):
45
+ pass
46
+
47
+ @classmethod
48
+ def h(cls):
49
+ pass
50
+
51
+
52
+ register(type(_C().f), _reduce_method)
53
+ register(type(_C.h), _reduce_method)
54
+
55
+
56
+ if not hasattr(sys, "pypy_version_info"):
57
+ # PyPy uses functions instead of method_descriptors and wrapper_descriptors
58
+ def _reduce_method_descriptor(m):
59
+ return getattr, (m.__objclass__, m.__name__)
60
+
61
+ register(type(list.append), _reduce_method_descriptor)
62
+ register(type(int.__add__), _reduce_method_descriptor)
63
+
64
+
65
+ # Make partial func pickable
66
+ def _reduce_partial(p):
67
+ return _rebuild_partial, (p.func, p.args, p.keywords or {})
68
+
69
+
70
+ def _rebuild_partial(func, args, keywords):
71
+ return functools.partial(func, *args, **keywords)
72
+
73
+
74
+ register(functools.partial, _reduce_partial)
75
+
76
+ if sys.platform != "win32":
77
+ from ._posix_reduction import _mk_inheritable # noqa: F401
78
+ else:
79
+ from . import _win_reduction # noqa: F401
80
+
81
+ # global variable to change the pickler behavior
82
+ try:
83
+ from joblib.externals import cloudpickle # noqa: F401
84
+
85
+ DEFAULT_ENV = "cloudpickle"
86
+ except ImportError:
87
+ # If cloudpickle is not present, fallback to pickle
88
+ DEFAULT_ENV = "pickle"
89
+
90
+ ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
91
+ _LokyPickler = None
92
+ _loky_pickler_name = None
93
+
94
+
95
+ def set_loky_pickler(loky_pickler=None):
96
+ global _LokyPickler, _loky_pickler_name
97
+
98
+ if loky_pickler is None:
99
+ loky_pickler = ENV_LOKY_PICKLER
100
+
101
+ loky_pickler_cls = None
102
+
103
+ # The default loky_pickler is cloudpickle
104
+ if loky_pickler in ["", None]:
105
+ loky_pickler = "cloudpickle"
106
+
107
+ if loky_pickler == _loky_pickler_name:
108
+ return
109
+
110
+ if loky_pickler == "cloudpickle":
111
+ from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
112
+ else:
113
+ try:
114
+ from importlib import import_module
115
+
116
+ module_pickle = import_module(loky_pickler)
117
+ loky_pickler_cls = module_pickle.Pickler
118
+ except (ImportError, AttributeError) as e:
119
+ extra_info = (
120
+ "\nThis error occurred while setting loky_pickler to"
121
+ f" '{loky_pickler}', as required by the env variable "
122
+ "LOKY_PICKLER or the function set_loky_pickler."
123
+ )
124
+ e.args = (e.args[0] + extra_info,) + e.args[1:]
125
+ e.msg = e.args[0]
126
+ raise e
127
+
128
+ util.debug(
129
+ f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for "
130
+ "serialization."
131
+ )
132
+
133
+ class CustomizablePickler(loky_pickler_cls):
134
+ _loky_pickler_cls = loky_pickler_cls
135
+
136
+ def _set_dispatch_table(self, dispatch_table):
137
+ for ancestor_class in self._loky_pickler_cls.mro():
138
+ dt_attribute = getattr(ancestor_class, "dispatch_table", None)
139
+ if isinstance(dt_attribute, types.MemberDescriptorType):
140
+ # Ancestor class (typically _pickle.Pickler) has a
141
+ # member_descriptor for its "dispatch_table" attribute. Use
142
+ # it to set the dispatch_table as a member instead of a
143
+ # dynamic attribute in the __dict__ of the instance,
144
+ # otherwise it will not be taken into account by the C
145
+ # implementation of the dump method if a subclass defines a
146
+ # class-level dispatch_table attribute as was done in
147
+ # cloudpickle 1.6.0:
148
+ # https://github.com/joblib/loky/pull/260
149
+ dt_attribute.__set__(self, dispatch_table)
150
+ break
151
+
152
+ # On top of member descriptor set, also use setattr such that code
153
+ # that directly access self.dispatch_table gets a consistent view
154
+ # of the same table.
155
+ self.dispatch_table = dispatch_table
156
+
157
+ def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
158
+ loky_pickler_cls.__init__(self, writer, protocol=protocol)
159
+ if reducers is None:
160
+ reducers = {}
161
+
162
+ if hasattr(self, "dispatch_table"):
163
+ # Force a copy that we will update without mutating the
164
+ # any class level defined dispatch_table.
165
+ loky_dt = dict(self.dispatch_table)
166
+ else:
167
+ # Use standard reducers as bases
168
+ loky_dt = copyreg.dispatch_table.copy()
169
+
170
+ # Register loky specific reducers
171
+ loky_dt.update(_dispatch_table)
172
+
173
+ # Set the new dispatch table, taking care of the fact that we
174
+ # need to use the member_descriptor when we inherit from a
175
+ # subclass of the C implementation of the Pickler base class
176
+ # with an class level dispatch_table attribute.
177
+ self._set_dispatch_table(loky_dt)
178
+
179
+ # Register the reducers
180
+ for type, reduce_func in reducers.items():
181
+ self.register(type, reduce_func)
182
+
183
+ def register(self, type, reduce_func):
184
+ """Attach a reducer function to a given type in the dispatch table."""
185
+ self.dispatch_table[type] = reduce_func
186
+
187
+ _LokyPickler = CustomizablePickler
188
+ _loky_pickler_name = loky_pickler
189
+
190
+
191
+ def get_loky_pickler_name():
192
+ global _loky_pickler_name
193
+ return _loky_pickler_name
194
+
195
+
196
+ def get_loky_pickler():
197
+ global _LokyPickler
198
+ return _LokyPickler
199
+
200
+
201
+ # Set it to its default value
202
+ set_loky_pickler()
203
+
204
+
205
+ def dump(obj, file, reducers=None, protocol=None):
206
+ """Replacement for pickle.dump() using _LokyPickler."""
207
+ global _LokyPickler
208
+ _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
209
+
210
+
211
+ def dumps(obj, reducers=None, protocol=None):
212
+ global _LokyPickler
213
+
214
+ buf = io.BytesIO()
215
+ dump(obj, buf, reducers=reducers, protocol=protocol)
216
+ return buf.getbuffer()
217
+
218
+
219
+ __all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
220
+
221
+ if sys.platform == "win32":
222
+ from multiprocessing.reduction import duplicate
223
+
224
+ __all__ += ["duplicate"]
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Server process to keep track of unlinked resources, like folders and
3
+ # semaphores and clean them.
4
+ #
5
+ # author: Thomas Moreau
6
+ #
7
+ # adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
8
+ # * include custom spawnv_passfds to start the process
9
+ # * add some VERBOSE logging
10
+ #
11
+ # TODO: multiprocessing.resource_tracker was contributed to Python 3.8 so
12
+ # once loky drops support for Python 3.7 it might be possible to stop
13
+ # maintaining this loky-specific fork. As a consequence, it might also be
14
+ # possible to stop maintaining the loky.backend.synchronize fork of
15
+ # multiprocessing.synchronize.
16
+
17
+ #
18
+ # On Unix we run a server process which keeps track of unlinked
19
+ # resources. The server ignores SIGINT and SIGTERM and reads from a
20
+ # pipe. The resource_tracker implements a reference counting scheme: each time
21
+ # a Python process anticipates the shared usage of a resource by another
22
+ # process, it signals the resource_tracker of this shared usage, and in return,
23
+ # the resource_tracker increments the resource's reference count by 1.
24
+ # Similarly, when access to a resource is closed by a Python process, the
25
+ # process notifies the resource_tracker by asking it to decrement the
26
+ # resource's reference count by 1. When the reference count drops to 0, the
27
+ # resource_tracker attempts to clean up the underlying resource.
28
+
29
+ # Finally, every other process connected to the resource tracker has a copy of
30
+ # the writable end of the pipe used to communicate with it, so the resource
31
+ # tracker gets EOF when all other processes have exited. Then the
32
+ # resource_tracker process unlinks any remaining leaked resources (with
33
+ # reference count above 0)
34
+
35
+ # For semaphores, this is important because the system only supports a limited
36
+ # number of named semaphores, and they will not be automatically removed till
37
+ # the next reboot. Without this resource tracker process, "killall python"
38
+ # would probably leave unlinked semaphores.
39
+
40
+ # Note that this behavior differs from CPython's resource_tracker, which only
41
+ # implements list of shared resources, and not a proper refcounting scheme.
42
+ # Also, CPython's resource tracker will only attempt to cleanup those shared
43
+ # resources once all procsses connected to the resouce tracker have exited.
44
+
45
+
46
+ import os
47
+ import shutil
48
+ import sys
49
+ import signal
50
+ import warnings
51
+ import threading
52
+ from _multiprocessing import sem_unlink
53
+ from multiprocessing import util
54
+
55
+ from . import spawn
56
+
57
+ if sys.platform == "win32":
58
+ import _winapi
59
+ import msvcrt
60
+ from multiprocessing.reduction import duplicate
61
+
62
+
63
+ __all__ = ["ensure_running", "register", "unregister"]
64
+
65
+ _HAVE_SIGMASK = hasattr(signal, "pthread_sigmask")
66
+ _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
67
+
68
+ _CLEANUP_FUNCS = {"folder": shutil.rmtree, "file": os.unlink}
69
+
70
+ if os.name == "posix":
71
+ _CLEANUP_FUNCS["semlock"] = sem_unlink
72
+
73
+
74
+ VERBOSE = False
75
+
76
+
77
+ class ResourceTracker:
78
+ def __init__(self):
79
+ self._lock = threading.Lock()
80
+ self._fd = None
81
+ self._pid = None
82
+
83
+ def getfd(self):
84
+ self.ensure_running()
85
+ return self._fd
86
+
87
+ def ensure_running(self):
88
+ """Make sure that resource tracker process is running.
89
+
90
+ This can be run from any process. Usually a child process will use
91
+ the resource created by its parent."""
92
+ with self._lock:
93
+ if self._fd is not None:
94
+ # resource tracker was launched before, is it still running?
95
+ if self._check_alive():
96
+ # => still alive
97
+ return
98
+ # => dead, launch it again
99
+ os.close(self._fd)
100
+ if os.name == "posix":
101
+ try:
102
+ # At this point, the resource_tracker process has been
103
+ # killed or crashed. Let's remove the process entry
104
+ # from the process table to avoid zombie processes.
105
+ os.waitpid(self._pid, 0)
106
+ except OSError:
107
+ # The process was terminated or is a child from an
108
+ # ancestor of the current process.
109
+ pass
110
+ self._fd = None
111
+ self._pid = None
112
+
113
+ warnings.warn(
114
+ "resource_tracker: process died unexpectedly, "
115
+ "relaunching. Some folders/sempahores might "
116
+ "leak."
117
+ )
118
+
119
+ fds_to_pass = []
120
+ try:
121
+ fds_to_pass.append(sys.stderr.fileno())
122
+ except Exception:
123
+ pass
124
+
125
+ r, w = os.pipe()
126
+ if sys.platform == "win32":
127
+ _r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
128
+ os.close(r)
129
+ r = _r
130
+
131
+ cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})"
132
+ try:
133
+ fds_to_pass.append(r)
134
+ # process will out live us, so no need to wait on pid
135
+ exe = spawn.get_executable()
136
+ args = [exe, *util._args_from_interpreter_flags(), "-c", cmd]
137
+ util.debug(f"launching resource tracker: {args}")
138
+ # bpo-33613: Register a signal mask that will block the
139
+ # signals. This signal mask will be inherited by the child
140
+ # that is going to be spawned and will protect the child from a
141
+ # race condition that can make the child die before it
142
+ # registers signal handlers for SIGINT and SIGTERM. The mask is
143
+ # unregistered after spawning the child.
144
+ try:
145
+ if _HAVE_SIGMASK:
146
+ signal.pthread_sigmask(
147
+ signal.SIG_BLOCK, _IGNORED_SIGNALS
148
+ )
149
+ pid = spawnv_passfds(exe, args, fds_to_pass)
150
+ finally:
151
+ if _HAVE_SIGMASK:
152
+ signal.pthread_sigmask(
153
+ signal.SIG_UNBLOCK, _IGNORED_SIGNALS
154
+ )
155
+ except BaseException:
156
+ os.close(w)
157
+ raise
158
+ else:
159
+ self._fd = w
160
+ self._pid = pid
161
+ finally:
162
+ if sys.platform == "win32":
163
+ _winapi.CloseHandle(r)
164
+ else:
165
+ os.close(r)
166
+
167
+ def _check_alive(self):
168
+ """Check for the existence of the resource tracker process."""
169
+ try:
170
+ self._send("PROBE", "", "")
171
+ except BrokenPipeError:
172
+ return False
173
+ else:
174
+ return True
175
+
176
+ def register(self, name, rtype):
177
+ """Register a named resource, and increment its refcount."""
178
+ self.ensure_running()
179
+ self._send("REGISTER", name, rtype)
180
+
181
+ def unregister(self, name, rtype):
182
+ """Unregister a named resource with resource tracker."""
183
+ self.ensure_running()
184
+ self._send("UNREGISTER", name, rtype)
185
+
186
+ def maybe_unlink(self, name, rtype):
187
+ """Decrement the refcount of a resource, and delete it if it hits 0"""
188
+ self.ensure_running()
189
+ self._send("MAYBE_UNLINK", name, rtype)
190
+
191
+ def _send(self, cmd, name, rtype):
192
+ if len(name) > 512:
193
+ # posix guarantees that writes to a pipe of less than PIPE_BUF
194
+ # bytes are atomic, and that PIPE_BUF >= 512
195
+ raise ValueError("name too long")
196
+ msg = f"{cmd}:{name}:{rtype}\n".encode("ascii")
197
+ nbytes = os.write(self._fd, msg)
198
+ assert nbytes == len(msg)
199
+
200
+
201
+ _resource_tracker = ResourceTracker()
202
+ ensure_running = _resource_tracker.ensure_running
203
+ register = _resource_tracker.register
204
+ maybe_unlink = _resource_tracker.maybe_unlink
205
+ unregister = _resource_tracker.unregister
206
+ getfd = _resource_tracker.getfd
207
+
208
+
209
+ def main(fd, verbose=0):
210
+ """Run resource tracker."""
211
+ # protect the process from ^C and "killall python" etc
212
+ if verbose:
213
+ util.log_to_stderr(level=util.DEBUG)
214
+
215
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
216
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
217
+
218
+ if _HAVE_SIGMASK:
219
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
220
+
221
+ for f in (sys.stdin, sys.stdout):
222
+ try:
223
+ f.close()
224
+ except Exception:
225
+ pass
226
+
227
+ if verbose:
228
+ util.debug("Main resource tracker is running")
229
+
230
+ registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()}
231
+ try:
232
+ # keep track of registered/unregistered resources
233
+ if sys.platform == "win32":
234
+ fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
235
+ with open(fd, "rb") as f:
236
+ while True:
237
+ line = f.readline()
238
+ if line == b"": # EOF
239
+ break
240
+ try:
241
+ splitted = line.strip().decode("ascii").split(":")
242
+ # name can potentially contain separator symbols (for
243
+ # instance folders on Windows)
244
+ cmd, name, rtype = (
245
+ splitted[0],
246
+ ":".join(splitted[1:-1]),
247
+ splitted[-1],
248
+ )
249
+
250
+ if cmd == "PROBE":
251
+ continue
252
+
253
+ if rtype not in _CLEANUP_FUNCS:
254
+ raise ValueError(
255
+ f"Cannot register {name} for automatic cleanup: "
256
+ f"unknown resource type ({rtype}). Resource type "
257
+ "should be one of the following: "
258
+ f"{list(_CLEANUP_FUNCS.keys())}"
259
+ )
260
+
261
+ if cmd == "REGISTER":
262
+ if name not in registry[rtype]:
263
+ registry[rtype][name] = 1
264
+ else:
265
+ registry[rtype][name] += 1
266
+
267
+ if verbose:
268
+ util.debug(
269
+ "[ResourceTracker] incremented refcount of "
270
+ f"{rtype} {name} "
271
+ f"(current {registry[rtype][name]})"
272
+ )
273
+ elif cmd == "UNREGISTER":
274
+ del registry[rtype][name]
275
+ if verbose:
276
+ util.debug(
277
+ f"[ResourceTracker] unregister {name} {rtype}: "
278
+ f"registry({len(registry)})"
279
+ )
280
+ elif cmd == "MAYBE_UNLINK":
281
+ registry[rtype][name] -= 1
282
+ if verbose:
283
+ util.debug(
284
+ "[ResourceTracker] decremented refcount of "
285
+ f"{rtype} {name} "
286
+ f"(current {registry[rtype][name]})"
287
+ )
288
+
289
+ if registry[rtype][name] == 0:
290
+ del registry[rtype][name]
291
+ try:
292
+ if verbose:
293
+ util.debug(
294
+ f"[ResourceTracker] unlink {name}"
295
+ )
296
+ _CLEANUP_FUNCS[rtype](name)
297
+ except Exception as e:
298
+ warnings.warn(
299
+ f"resource_tracker: {name}: {e!r}"
300
+ )
301
+
302
+ else:
303
+ raise RuntimeError(f"unrecognized command {cmd!r}")
304
+ except BaseException:
305
+ try:
306
+ sys.excepthook(*sys.exc_info())
307
+ except BaseException:
308
+ pass
309
+ finally:
310
+ # all processes have terminated; cleanup any remaining resources
311
+ def _unlink_resources(rtype_registry, rtype):
312
+ if rtype_registry:
313
+ try:
314
+ warnings.warn(
315
+ "resource_tracker: There appear to be "
316
+ f"{len(rtype_registry)} leaked {rtype} objects to "
317
+ "clean up at shutdown"
318
+ )
319
+ except Exception:
320
+ pass
321
+ for name in rtype_registry:
322
+ # For some reason the process which created and registered this
323
+ # resource has failed to unregister it. Presumably it has
324
+ # died. We therefore clean it up.
325
+ try:
326
+ _CLEANUP_FUNCS[rtype](name)
327
+ if verbose:
328
+ util.debug(f"[ResourceTracker] unlink {name}")
329
+ except Exception as e:
330
+ warnings.warn(f"resource_tracker: {name}: {e!r}")
331
+
332
+ for rtype, rtype_registry in registry.items():
333
+ if rtype == "folder":
334
+ continue
335
+ else:
336
+ _unlink_resources(rtype_registry, rtype)
337
+
338
+ # The default cleanup routine for folders deletes everything inside
339
+ # those folders recursively, which can include other resources tracked
340
+ # by the resource tracker). To limit the risk of the resource tracker
341
+ # attempting to delete twice a resource (once as part of a tracked
342
+ # folder, and once as a resource), we delete the folders after all
343
+ # other resource types.
344
+ if "folder" in registry:
345
+ _unlink_resources(registry["folder"], "folder")
346
+
347
+ if verbose:
348
+ util.debug("resource tracker shut down")
349
+
350
+
351
+ #
352
+ # Start a program with only specified fds kept open
353
+ #
354
+
355
+
356
+ def spawnv_passfds(path, args, passfds):
357
+ passfds = sorted(passfds)
358
+ if sys.platform != "win32":
359
+ errpipe_read, errpipe_write = os.pipe()
360
+ try:
361
+ from .reduction import _mk_inheritable
362
+ from .fork_exec import fork_exec
363
+
364
+ _pass = [_mk_inheritable(fd) for fd in passfds]
365
+ return fork_exec(args, _pass)
366
+ finally:
367
+ os.close(errpipe_read)
368
+ os.close(errpipe_write)
369
+ else:
370
+ cmd = " ".join(f'"{x}"' for x in args)
371
+ try:
372
+ _, ht, pid, _ = _winapi.CreateProcess(
373
+ path, cmd, None, None, True, 0, None, None, None
374
+ )
375
+ _winapi.CloseHandle(ht)
376
+ except BaseException:
377
+ pass
378
+ return pid
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Prepares and processes the data to setup the new process environment
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/spawn.py (17/02/2017)
7
+ # * Improve logging data
8
+ #
9
+ import os
10
+ import sys
11
+ import runpy
12
+ import textwrap
13
+ import types
14
+ from multiprocessing import process, util
15
+
16
+
17
+ if sys.platform != "win32":
18
+ WINEXE = False
19
+ WINSERVICE = False
20
+ else:
21
+ import msvcrt
22
+ from multiprocessing.reduction import duplicate
23
+
24
+ WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False)
25
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
26
+
27
+ if WINSERVICE:
28
+ _python_exe = os.path.join(sys.exec_prefix, "python.exe")
29
+ else:
30
+ _python_exe = sys.executable
31
+
32
+
33
+ def get_executable():
34
+ return _python_exe
35
+
36
+
37
+ def _check_not_importing_main():
38
+ if getattr(process.current_process(), "_inheriting", False):
39
+ raise RuntimeError(
40
+ textwrap.dedent(
41
+ """\
42
+ An attempt has been made to start a new process before the
43
+ current process has finished its bootstrapping phase.
44
+
45
+ This probably means that you are not using fork to start your
46
+ child processes and you have forgotten to use the proper idiom
47
+ in the main module:
48
+
49
+ if __name__ == '__main__':
50
+ freeze_support()
51
+ ...
52
+
53
+ The "freeze_support()" line can be omitted if the program
54
+ is not going to be frozen to produce an executable."""
55
+ )
56
+ )
57
+
58
+
59
+ def get_preparation_data(name, init_main_module=True):
60
+ """Return info about parent needed by child to unpickle process object."""
61
+ _check_not_importing_main()
62
+ d = dict(
63
+ log_to_stderr=util._log_to_stderr,
64
+ authkey=bytes(process.current_process().authkey),
65
+ name=name,
66
+ sys_argv=sys.argv,
67
+ orig_dir=process.ORIGINAL_DIR,
68
+ dir=os.getcwd(),
69
+ )
70
+
71
+ # Send sys_path and make sure the current directory will not be changed
72
+ d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path]
73
+
74
+ # Make sure to pass the information if the multiprocessing logger is active
75
+ if util._logger is not None:
76
+ d["log_level"] = util._logger.getEffectiveLevel()
77
+ if util._logger.handlers:
78
+ h = util._logger.handlers[0]
79
+ d["log_fmt"] = h.formatter._fmt
80
+
81
+ # Tell the child how to communicate with the resource_tracker
82
+ from .resource_tracker import _resource_tracker
83
+
84
+ _resource_tracker.ensure_running()
85
+ d["tracker_args"] = {"pid": _resource_tracker._pid}
86
+ if sys.platform == "win32":
87
+ d["tracker_args"]["fh"] = msvcrt.get_osfhandle(_resource_tracker._fd)
88
+ else:
89
+ d["tracker_args"]["fd"] = _resource_tracker._fd
90
+
91
+ if sys.version_info >= (3, 8) and os.name == "posix":
92
+ # joblib/loky#242: allow loky processes to retrieve the resource
93
+ # tracker of their parent in case the child processes depickles
94
+ # shared_memory objects, that are still tracked by multiprocessing's
95
+ # resource_tracker by default.
96
+ # XXX: this is a workaround that may be error prone: in the future, it
97
+ # would be better to have loky subclass multiprocessing's shared_memory
98
+ # to force registration of shared_memory segments via loky's
99
+ # resource_tracker.
100
+ from multiprocessing.resource_tracker import (
101
+ _resource_tracker as mp_resource_tracker,
102
+ )
103
+
104
+ # multiprocessing's resource_tracker must be running before loky
105
+ # process is created (othewise the child won't be able to use it if it
106
+ # is created later on)
107
+ mp_resource_tracker.ensure_running()
108
+ d["mp_tracker_args"] = {
109
+ "fd": mp_resource_tracker._fd,
110
+ "pid": mp_resource_tracker._pid,
111
+ }
112
+
113
+ # Figure out whether to initialise main in the subprocess as a module
114
+ # or through direct execution (or to leave it alone entirely)
115
+ if init_main_module:
116
+ main_module = sys.modules["__main__"]
117
+ try:
118
+ main_mod_name = getattr(main_module.__spec__, "name", None)
119
+ except BaseException:
120
+ main_mod_name = None
121
+ if main_mod_name is not None:
122
+ d["init_main_from_name"] = main_mod_name
123
+ elif sys.platform != "win32" or (not WINEXE and not WINSERVICE):
124
+ main_path = getattr(main_module, "__file__", None)
125
+ if main_path is not None:
126
+ if (
127
+ not os.path.isabs(main_path)
128
+ and process.ORIGINAL_DIR is not None
129
+ ):
130
+ main_path = os.path.join(process.ORIGINAL_DIR, main_path)
131
+ d["init_main_from_path"] = os.path.normpath(main_path)
132
+
133
+ return d
134
+
135
+
136
+ #
137
+ # Prepare current process
138
+ #
139
+ old_main_modules = []
140
+
141
+
142
+ def prepare(data, parent_sentinel=None):
143
+ """Try to get current process ready to unpickle process object."""
144
+ if "name" in data:
145
+ process.current_process().name = data["name"]
146
+
147
+ if "authkey" in data:
148
+ process.current_process().authkey = data["authkey"]
149
+
150
+ if "log_to_stderr" in data and data["log_to_stderr"]:
151
+ util.log_to_stderr()
152
+
153
+ if "log_level" in data:
154
+ util.get_logger().setLevel(data["log_level"])
155
+
156
+ if "log_fmt" in data:
157
+ import logging
158
+
159
+ util.get_logger().handlers[0].setFormatter(
160
+ logging.Formatter(data["log_fmt"])
161
+ )
162
+
163
+ if "sys_path" in data:
164
+ sys.path = data["sys_path"]
165
+
166
+ if "sys_argv" in data:
167
+ sys.argv = data["sys_argv"]
168
+
169
+ if "dir" in data:
170
+ os.chdir(data["dir"])
171
+
172
+ if "orig_dir" in data:
173
+ process.ORIGINAL_DIR = data["orig_dir"]
174
+
175
+ if "mp_tracker_args" in data:
176
+ from multiprocessing.resource_tracker import (
177
+ _resource_tracker as mp_resource_tracker,
178
+ )
179
+
180
+ mp_resource_tracker._fd = data["mp_tracker_args"]["fd"]
181
+ mp_resource_tracker._pid = data["mp_tracker_args"]["pid"]
182
+ if "tracker_args" in data:
183
+ from .resource_tracker import _resource_tracker
184
+
185
+ _resource_tracker._pid = data["tracker_args"]["pid"]
186
+ if sys.platform == "win32":
187
+ handle = data["tracker_args"]["fh"]
188
+ handle = duplicate(handle, source_process=parent_sentinel)
189
+ _resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
190
+ else:
191
+ _resource_tracker._fd = data["tracker_args"]["fd"]
192
+
193
+ if "init_main_from_name" in data:
194
+ _fixup_main_from_name(data["init_main_from_name"])
195
+ elif "init_main_from_path" in data:
196
+ _fixup_main_from_path(data["init_main_from_path"])
197
+
198
+
199
+ # Multiprocessing module helpers to fix up the main module in
200
+ # spawned subprocesses
201
+ def _fixup_main_from_name(mod_name):
202
+ # __main__.py files for packages, directories, zip archives, etc, run
203
+ # their "main only" code unconditionally, so we don't even try to
204
+ # populate anything in __main__, nor do we make any changes to
205
+ # __main__ attributes
206
+ current_main = sys.modules["__main__"]
207
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
208
+ return
209
+
210
+ # If this process was forked, __main__ may already be populated
211
+ if getattr(current_main.__spec__, "name", None) == mod_name:
212
+ return
213
+
214
+ # Otherwise, __main__ may contain some non-main code where we need to
215
+ # support unpickling it properly. We rerun it as __mp_main__ and make
216
+ # the normal __main__ an alias to that
217
+ old_main_modules.append(current_main)
218
+ main_module = types.ModuleType("__mp_main__")
219
+ main_content = runpy.run_module(
220
+ mod_name, run_name="__mp_main__", alter_sys=True
221
+ )
222
+ main_module.__dict__.update(main_content)
223
+ sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
224
+
225
+
226
+ def _fixup_main_from_path(main_path):
227
+ # If this process was forked, __main__ may already be populated
228
+ current_main = sys.modules["__main__"]
229
+
230
+ # Unfortunately, the main ipython launch script historically had no
231
+ # "if __name__ == '__main__'" guard, so we work around that
232
+ # by treating it like a __main__.py file
233
+ # See https://github.com/ipython/ipython/issues/4698
234
+ main_name = os.path.splitext(os.path.basename(main_path))[0]
235
+ if main_name == "ipython":
236
+ return
237
+
238
+ # Otherwise, if __file__ already has the setting we expect,
239
+ # there's nothing more to do
240
+ if getattr(current_main, "__file__", None) == main_path:
241
+ return
242
+
243
+ # If the parent process has sent a path through rather than a module
244
+ # name we assume it is an executable script that may contain
245
+ # non-main code that needs to be executed
246
+ old_main_modules.append(current_main)
247
+ main_module = types.ModuleType("__mp_main__")
248
+ main_content = runpy.run_path(main_path, run_name="__mp_main__")
249
+ main_module.__dict__.update(main_content)
250
+ sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Synchronization primitives based on our SemLock implementation
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/synchronize.py (17/02/2017)
7
+ # * Remove ctx argument for compatibility reason
8
+ # * Registers a cleanup function with the loky resource_tracker to remove the
9
+ # semaphore when the process dies instead.
10
+ #
11
+ # TODO: investigate which Python version is required to be able to use
12
+ # multiprocessing.resource_tracker and therefore multiprocessing.synchronize
13
+ # instead of a loky-specific fork.
14
+
15
+ import os
16
+ import sys
17
+ import tempfile
18
+ import threading
19
+ import _multiprocessing
20
+ from time import time as _time
21
+ from multiprocessing import process, util
22
+ from multiprocessing.context import assert_spawning
23
+
24
+ from . import resource_tracker
25
+
26
+ __all__ = [
27
+ "Lock",
28
+ "RLock",
29
+ "Semaphore",
30
+ "BoundedSemaphore",
31
+ "Condition",
32
+ "Event",
33
+ ]
34
+ # Try to import the mp.synchronize module cleanly, if it fails
35
+ # raise ImportError for platforms lacking a working sem_open implementation.
36
+ # See issue 3770
37
+ try:
38
+ from _multiprocessing import SemLock as _SemLock
39
+ from _multiprocessing import sem_unlink
40
+ except ImportError:
41
+ raise ImportError(
42
+ "This platform lacks a functioning sem_open"
43
+ " implementation, therefore, the required"
44
+ " synchronization primitives needed will not"
45
+ " function, see issue 3770."
46
+ )
47
+
48
+ #
49
+ # Constants
50
+ #
51
+
52
+ RECURSIVE_MUTEX, SEMAPHORE = range(2)
53
+ SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
54
+
55
+
56
+ #
57
+ # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
58
+ #
59
+
60
+
61
+ class SemLock:
62
+
63
+ _rand = tempfile._RandomNameSequence()
64
+
65
+ def __init__(self, kind, value, maxvalue, name=None):
66
+ # unlink_now is only used on win32 or when we are using fork.
67
+ unlink_now = False
68
+ if name is None:
69
+ # Try to find an unused name for the SemLock instance.
70
+ for _ in range(100):
71
+ try:
72
+ self._semlock = _SemLock(
73
+ kind, value, maxvalue, SemLock._make_name(), unlink_now
74
+ )
75
+ except FileExistsError: # pragma: no cover
76
+ pass
77
+ else:
78
+ break
79
+ else: # pragma: no cover
80
+ raise FileExistsError("cannot find name for semaphore")
81
+ else:
82
+ self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now)
83
+ self.name = name
84
+ util.debug(
85
+ f"created semlock with handle {self._semlock.handle} and name "
86
+ f'"{self.name}"'
87
+ )
88
+
89
+ self._make_methods()
90
+
91
+ def _after_fork(obj):
92
+ obj._semlock._after_fork()
93
+
94
+ util.register_after_fork(self, _after_fork)
95
+
96
+ # When the object is garbage collected or the
97
+ # process shuts down we unlink the semaphore name
98
+ resource_tracker.register(self._semlock.name, "semlock")
99
+ util.Finalize(
100
+ self, SemLock._cleanup, (self._semlock.name,), exitpriority=0
101
+ )
102
+
103
+ @staticmethod
104
+ def _cleanup(name):
105
+ try:
106
+ sem_unlink(name)
107
+ except FileNotFoundError:
108
+ # Already unlinked, possibly by user code: ignore and make sure to
109
+ # unregister the semaphore from the resource tracker.
110
+ pass
111
+ finally:
112
+ resource_tracker.unregister(name, "semlock")
113
+
114
+ def _make_methods(self):
115
+ self.acquire = self._semlock.acquire
116
+ self.release = self._semlock.release
117
+
118
+ def __enter__(self):
119
+ return self._semlock.acquire()
120
+
121
+ def __exit__(self, *args):
122
+ return self._semlock.release()
123
+
124
+ def __getstate__(self):
125
+ assert_spawning(self)
126
+ sl = self._semlock
127
+ h = sl.handle
128
+ return (h, sl.kind, sl.maxvalue, sl.name)
129
+
130
+ def __setstate__(self, state):
131
+ self._semlock = _SemLock._rebuild(*state)
132
+ util.debug(
133
+ f'recreated blocker with handle {state[0]!r} and name "{state[3]}"'
134
+ )
135
+ self._make_methods()
136
+
137
+ @staticmethod
138
+ def _make_name():
139
+ # OSX does not support long names for semaphores
140
+ return f"/loky-{os.getpid()}-{next(SemLock._rand)}"
141
+
142
+
143
+ #
144
+ # Semaphore
145
+ #
146
+
147
+
148
+ class Semaphore(SemLock):
149
+ def __init__(self, value=1):
150
+ SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
151
+
152
+ def get_value(self):
153
+ if sys.platform == "darwin":
154
+ raise NotImplementedError("OSX does not implement sem_getvalue")
155
+ return self._semlock._get_value()
156
+
157
+ def __repr__(self):
158
+ try:
159
+ value = self._semlock._get_value()
160
+ except Exception:
161
+ value = "unknown"
162
+ return f"<{self.__class__.__name__}(value={value})>"
163
+
164
+
165
+ #
166
+ # Bounded semaphore
167
+ #
168
+
169
+
170
+ class BoundedSemaphore(Semaphore):
171
+ def __init__(self, value=1):
172
+ SemLock.__init__(self, SEMAPHORE, value, value)
173
+
174
+ def __repr__(self):
175
+ try:
176
+ value = self._semlock._get_value()
177
+ except Exception:
178
+ value = "unknown"
179
+ return (
180
+ f"<{self.__class__.__name__}(value={value}, "
181
+ f"maxvalue={self._semlock.maxvalue})>"
182
+ )
183
+
184
+
185
+ #
186
+ # Non-recursive lock
187
+ #
188
+
189
+
190
+ class Lock(SemLock):
191
+ def __init__(self):
192
+ super().__init__(SEMAPHORE, 1, 1)
193
+
194
+ def __repr__(self):
195
+ try:
196
+ if self._semlock._is_mine():
197
+ name = process.current_process().name
198
+ if threading.current_thread().name != "MainThread":
199
+ name = f"{name}|{threading.current_thread().name}"
200
+ elif self._semlock._get_value() == 1:
201
+ name = "None"
202
+ elif self._semlock._count() > 0:
203
+ name = "SomeOtherThread"
204
+ else:
205
+ name = "SomeOtherProcess"
206
+ except Exception:
207
+ name = "unknown"
208
+ return f"<{self.__class__.__name__}(owner={name})>"
209
+
210
+
211
+ #
212
+ # Recursive lock
213
+ #
214
+
215
+
216
+ class RLock(SemLock):
217
+ def __init__(self):
218
+ super().__init__(RECURSIVE_MUTEX, 1, 1)
219
+
220
+ def __repr__(self):
221
+ try:
222
+ if self._semlock._is_mine():
223
+ name = process.current_process().name
224
+ if threading.current_thread().name != "MainThread":
225
+ name = f"{name}|{threading.current_thread().name}"
226
+ count = self._semlock._count()
227
+ elif self._semlock._get_value() == 1:
228
+ name, count = "None", 0
229
+ elif self._semlock._count() > 0:
230
+ name, count = "SomeOtherThread", "nonzero"
231
+ else:
232
+ name, count = "SomeOtherProcess", "nonzero"
233
+ except Exception:
234
+ name, count = "unknown", "unknown"
235
+ return f"<{self.__class__.__name__}({name}, {count})>"
236
+
237
+
238
+ #
239
+ # Condition variable
240
+ #
241
+
242
+
243
+ class Condition:
244
+ def __init__(self, lock=None):
245
+ self._lock = lock or RLock()
246
+ self._sleeping_count = Semaphore(0)
247
+ self._woken_count = Semaphore(0)
248
+ self._wait_semaphore = Semaphore(0)
249
+ self._make_methods()
250
+
251
+ def __getstate__(self):
252
+ assert_spawning(self)
253
+ return (
254
+ self._lock,
255
+ self._sleeping_count,
256
+ self._woken_count,
257
+ self._wait_semaphore,
258
+ )
259
+
260
+ def __setstate__(self, state):
261
+ (
262
+ self._lock,
263
+ self._sleeping_count,
264
+ self._woken_count,
265
+ self._wait_semaphore,
266
+ ) = state
267
+ self._make_methods()
268
+
269
+ def __enter__(self):
270
+ return self._lock.__enter__()
271
+
272
+ def __exit__(self, *args):
273
+ return self._lock.__exit__(*args)
274
+
275
+ def _make_methods(self):
276
+ self.acquire = self._lock.acquire
277
+ self.release = self._lock.release
278
+
279
+ def __repr__(self):
280
+ try:
281
+ num_waiters = (
282
+ self._sleeping_count._semlock._get_value()
283
+ - self._woken_count._semlock._get_value()
284
+ )
285
+ except Exception:
286
+ num_waiters = "unknown"
287
+ return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>"
288
+
289
+ def wait(self, timeout=None):
290
+ assert (
291
+ self._lock._semlock._is_mine()
292
+ ), "must acquire() condition before using wait()"
293
+
294
+ # indicate that this thread is going to sleep
295
+ self._sleeping_count.release()
296
+
297
+ # release lock
298
+ count = self._lock._semlock._count()
299
+ for _ in range(count):
300
+ self._lock.release()
301
+
302
+ try:
303
+ # wait for notification or timeout
304
+ return self._wait_semaphore.acquire(True, timeout)
305
+ finally:
306
+ # indicate that this thread has woken
307
+ self._woken_count.release()
308
+
309
+ # reacquire lock
310
+ for _ in range(count):
311
+ self._lock.acquire()
312
+
313
+ def notify(self):
314
+ assert self._lock._semlock._is_mine(), "lock is not owned"
315
+ assert not self._wait_semaphore.acquire(False)
316
+
317
+ # to take account of timeouts since last notify() we subtract
318
+ # woken_count from sleeping_count and rezero woken_count
319
+ while self._woken_count.acquire(False):
320
+ res = self._sleeping_count.acquire(False)
321
+ assert res
322
+
323
+ if self._sleeping_count.acquire(False): # try grabbing a sleeper
324
+ self._wait_semaphore.release() # wake up one sleeper
325
+ self._woken_count.acquire() # wait for the sleeper to wake
326
+
327
+ # rezero _wait_semaphore in case a timeout just happened
328
+ self._wait_semaphore.acquire(False)
329
+
330
+ def notify_all(self):
331
+ assert self._lock._semlock._is_mine(), "lock is not owned"
332
+ assert not self._wait_semaphore.acquire(False)
333
+
334
+ # to take account of timeouts since last notify*() we subtract
335
+ # woken_count from sleeping_count and rezero woken_count
336
+ while self._woken_count.acquire(False):
337
+ res = self._sleeping_count.acquire(False)
338
+ assert res
339
+
340
+ sleepers = 0
341
+ while self._sleeping_count.acquire(False):
342
+ self._wait_semaphore.release() # wake up one sleeper
343
+ sleepers += 1
344
+
345
+ if sleepers:
346
+ for _ in range(sleepers):
347
+ self._woken_count.acquire() # wait for a sleeper to wake
348
+
349
+ # rezero wait_semaphore in case some timeouts just happened
350
+ while self._wait_semaphore.acquire(False):
351
+ pass
352
+
353
+ def wait_for(self, predicate, timeout=None):
354
+ result = predicate()
355
+ if result:
356
+ return result
357
+ if timeout is not None:
358
+ endtime = _time() + timeout
359
+ else:
360
+ endtime = None
361
+ waittime = None
362
+ while not result:
363
+ if endtime is not None:
364
+ waittime = endtime - _time()
365
+ if waittime <= 0:
366
+ break
367
+ self.wait(waittime)
368
+ result = predicate()
369
+ return result
370
+
371
+
372
+ #
373
+ # Event
374
+ #
375
+
376
+
377
+ class Event:
378
+ def __init__(self):
379
+ self._cond = Condition(Lock())
380
+ self._flag = Semaphore(0)
381
+
382
+ def is_set(self):
383
+ with self._cond:
384
+ if self._flag.acquire(False):
385
+ self._flag.release()
386
+ return True
387
+ return False
388
+
389
+ def set(self):
390
+ with self._cond:
391
+ self._flag.acquire(False)
392
+ self._flag.release()
393
+ self._cond.notify_all()
394
+
395
+ def clear(self):
396
+ with self._cond:
397
+ self._flag.acquire(False)
398
+
399
+ def wait(self, timeout=None):
400
+ with self._cond:
401
+ if self._flag.acquire(False):
402
+ self._flag.release()
403
+ else:
404
+ self._cond.wait(timeout)
405
+
406
+ if self._flag.acquire(False):
407
+ self._flag.release()
408
+ return True
409
+ return False
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import time
4
+ import errno
5
+ import signal
6
+ import warnings
7
+ import subprocess
8
+ import traceback
9
+
10
+ try:
11
+ import psutil
12
+ except ImportError:
13
+ psutil = None
14
+
15
+
16
+ def kill_process_tree(process, use_psutil=True):
17
+ """Terminate process and its descendants with SIGKILL"""
18
+ if use_psutil and psutil is not None:
19
+ _kill_process_tree_with_psutil(process)
20
+ else:
21
+ _kill_process_tree_without_psutil(process)
22
+
23
+
24
+ def recursive_terminate(process, use_psutil=True):
25
+ warnings.warn(
26
+ "recursive_terminate is deprecated in loky 3.2, use kill_process_tree"
27
+ "instead",
28
+ DeprecationWarning,
29
+ )
30
+ kill_process_tree(process, use_psutil=use_psutil)
31
+
32
+
33
+ def _kill_process_tree_with_psutil(process):
34
+ try:
35
+ descendants = psutil.Process(process.pid).children(recursive=True)
36
+ except psutil.NoSuchProcess:
37
+ return
38
+
39
+ # Kill the descendants in reverse order to avoid killing the parents before
40
+ # the descendant in cases where there are more processes nested.
41
+ for descendant in descendants[::-1]:
42
+ try:
43
+ descendant.kill()
44
+ except psutil.NoSuchProcess:
45
+ pass
46
+
47
+ try:
48
+ psutil.Process(process.pid).kill()
49
+ except psutil.NoSuchProcess:
50
+ pass
51
+ process.join()
52
+
53
+
54
+ def _kill_process_tree_without_psutil(process):
55
+ """Terminate a process and its descendants."""
56
+ try:
57
+ if sys.platform == "win32":
58
+ _windows_taskkill_process_tree(process.pid)
59
+ else:
60
+ _posix_recursive_kill(process.pid)
61
+ except Exception: # pragma: no cover
62
+ details = traceback.format_exc()
63
+ warnings.warn(
64
+ "Failed to kill subprocesses on this platform. Please install"
65
+ "psutil: https://github.com/giampaolo/psutil\n"
66
+ f"Details:\n{details}"
67
+ )
68
+ # In case we cannot introspect or kill the descendants, we fall back to
69
+ # only killing the main process.
70
+ #
71
+ # Note: on Windows, process.kill() is an alias for process.terminate()
72
+ # which in turns calls the Win32 API function TerminateProcess().
73
+ process.kill()
74
+ process.join()
75
+
76
+
77
+ def _windows_taskkill_process_tree(pid):
78
+ # On windows, the taskkill function with option `/T` terminate a given
79
+ # process pid and its children.
80
+ try:
81
+ subprocess.check_output(
82
+ ["taskkill", "/F", "/T", "/PID", str(pid)], stderr=None
83
+ )
84
+ except subprocess.CalledProcessError as e:
85
+ # In Windows, taskkill returns 128, 255 for no process found.
86
+ if e.returncode not in [128, 255]:
87
+ # Let's raise to let the caller log the error details in a
88
+ # warning and only kill the root process.
89
+ raise # pragma: no cover
90
+
91
+
92
+ def _kill(pid):
93
+ # Not all systems (e.g. Windows) have a SIGKILL, but the C specification
94
+ # mandates a SIGTERM signal. While Windows is handled specifically above,
95
+ # let's try to be safe for other hypothetic platforms that only have
96
+ # SIGTERM without SIGKILL.
97
+ kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM)
98
+ try:
99
+ os.kill(pid, kill_signal)
100
+ except OSError as e:
101
+ # if OSError is raised with [Errno 3] no such process, the process
102
+ # is already terminated, else, raise the error and let the top
103
+ # level function raise a warning and retry to kill the process.
104
+ if e.errno != errno.ESRCH:
105
+ raise # pragma: no cover
106
+
107
+
108
+ def _posix_recursive_kill(pid):
109
+ """Recursively kill the descendants of a process before killing it."""
110
+ try:
111
+ children_pids = subprocess.check_output(
112
+ ["pgrep", "-P", str(pid)], stderr=None, text=True
113
+ )
114
+ except subprocess.CalledProcessError as e:
115
+ # `ps` returns 1 when no child process has been found
116
+ if e.returncode == 1:
117
+ children_pids = ""
118
+ else:
119
+ raise # pragma: no cover
120
+
121
+ # Decode the result, split the cpid and remove the trailing line
122
+ for cpid in children_pids.splitlines():
123
+ cpid = int(cpid)
124
+ _posix_recursive_kill(cpid)
125
+
126
+ _kill(pid)
127
+
128
+
129
+ def get_exitcodes_terminated_worker(processes):
130
+ """Return a formatted string with the exitcodes of terminated workers.
131
+
132
+ If necessary, wait (up to .25s) for the system to correctly set the
133
+ exitcode of one terminated worker.
134
+ """
135
+ patience = 5
136
+
137
+ # Catch the exitcode of the terminated workers. There should at least be
138
+ # one. If not, wait a bit for the system to correctly set the exitcode of
139
+ # the terminated worker.
140
+ exitcodes = [
141
+ p.exitcode for p in list(processes.values()) if p.exitcode is not None
142
+ ]
143
+ while not exitcodes and patience > 0:
144
+ patience -= 1
145
+ exitcodes = [
146
+ p.exitcode
147
+ for p in list(processes.values())
148
+ if p.exitcode is not None
149
+ ]
150
+ time.sleep(0.05)
151
+
152
+ return _format_exitcodes(exitcodes)
153
+
154
+
155
+ def _format_exitcodes(exitcodes):
156
+ """Format a list of exit code with names of the signals if possible"""
157
+ str_exitcodes = [
158
+ f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None
159
+ ]
160
+ return "{" + ", ".join(str_exitcodes) + "}"
161
+
162
+
163
+ def _get_exitcode_name(exitcode):
164
+ if sys.platform == "win32":
165
+ # The exitcode are unreliable on windows (see bpo-31863).
166
+ # For this case, return UNKNOWN
167
+ return "UNKNOWN"
168
+
169
+ if exitcode < 0:
170
+ try:
171
+ import signal
172
+
173
+ return signal.Signals(-exitcode).name
174
+ except ValueError:
175
+ return "UNKNOWN"
176
+ elif exitcode != 255:
177
+ # The exitcode are unreliable on forkserver were 255 is always returned
178
+ # (see bpo-30589). For this case, return UNKNOWN
179
+ return "EXIT"
180
+
181
+ return "UNKNOWN"
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from functools import partial
3
+ from joblib.externals.cloudpickle import dumps, loads
4
+
5
+
6
+ WRAP_CACHE = {}
7
+
8
+
9
+ class CloudpickledObjectWrapper:
10
+ def __init__(self, obj, keep_wrapper=False):
11
+ self._obj = obj
12
+ self._keep_wrapper = keep_wrapper
13
+
14
+ def __reduce__(self):
15
+ _pickled_object = dumps(self._obj)
16
+ if not self._keep_wrapper:
17
+ return loads, (_pickled_object,)
18
+
19
+ return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
20
+
21
+ def __getattr__(self, attr):
22
+ # Ensure that the wrapped object can be used seemlessly as the
23
+ # previous object.
24
+ if attr not in ["_obj", "_keep_wrapper"]:
25
+ return getattr(self._obj, attr)
26
+ return getattr(self, attr)
27
+
28
+
29
+ # Make sure the wrapped object conserves the callable property
30
+ class CallableObjectWrapper(CloudpickledObjectWrapper):
31
+ def __call__(self, *args, **kwargs):
32
+ return self._obj(*args, **kwargs)
33
+
34
+
35
+ def _wrap_non_picklable_objects(obj, keep_wrapper):
36
+ if callable(obj):
37
+ return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
38
+ return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
39
+
40
+
41
+ def _reconstruct_wrapper(_pickled_object, keep_wrapper):
42
+ obj = loads(_pickled_object)
43
+ return _wrap_non_picklable_objects(obj, keep_wrapper)
44
+
45
+
46
+ def _wrap_objects_when_needed(obj):
47
+ # Function to introspect an object and decide if it should be wrapped or
48
+ # not.
49
+ need_wrap = "__main__" in getattr(obj, "__module__", "")
50
+ if isinstance(obj, partial):
51
+ return partial(
52
+ _wrap_objects_when_needed(obj.func),
53
+ *[_wrap_objects_when_needed(a) for a in obj.args],
54
+ **{
55
+ k: _wrap_objects_when_needed(v)
56
+ for k, v in obj.keywords.items()
57
+ }
58
+ )
59
+ if callable(obj):
60
+ # Need wrap if the object is a function defined in a local scope of
61
+ # another function.
62
+ func_code = getattr(obj, "__code__", "")
63
+ need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
64
+
65
+ # Need wrap if the obj is a lambda expression
66
+ func_name = getattr(obj, "__name__", "")
67
+ need_wrap |= "<lambda>" in func_name
68
+
69
+ if not need_wrap:
70
+ return obj
71
+
72
+ wrapped_obj = WRAP_CACHE.get(obj)
73
+ if wrapped_obj is None:
74
+ wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
75
+ WRAP_CACHE[obj] = wrapped_obj
76
+ return wrapped_obj
77
+
78
+
79
+ def wrap_non_picklable_objects(obj, keep_wrapper=True):
80
+ """Wrapper for non-picklable object to use cloudpickle to serialize them.
81
+
82
+ Note that this wrapper tends to slow down the serialization process as it
83
+ is done with cloudpickle which is typically slower compared to pickle. The
84
+ proper way to solve serialization issues is to avoid defining functions and
85
+ objects in the main scripts and to implement __reduce__ functions for
86
+ complex classes.
87
+ """
88
+ # If obj is a class, create a CloudpickledClassWrapper which instantiates
89
+ # the object internally and wrap it directly in a CloudpickledObjectWrapper
90
+ if inspect.isclass(obj):
91
+
92
+ class CloudpickledClassWrapper(CloudpickledObjectWrapper):
93
+ def __init__(self, *args, **kwargs):
94
+ self._obj = obj(*args, **kwargs)
95
+ self._keep_wrapper = keep_wrapper
96
+
97
+ CloudpickledClassWrapper.__name__ = obj.__name__
98
+ return CloudpickledClassWrapper
99
+
100
+ # If obj is an instance of a class, just wrap it in a regular
101
+ # CloudpickledObjectWrapper
102
+ return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/initializers.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+
4
+ def _viztracer_init(init_kwargs):
5
+ """Initialize viztracer's profiler in worker processes"""
6
+ from viztracer import VizTracer
7
+
8
+ tracer = VizTracer(**init_kwargs)
9
+ tracer.register_exit()
10
+ tracer.start()
11
+
12
+
13
+ def _make_viztracer_initializer_and_initargs():
14
+ try:
15
+ import viztracer
16
+
17
+ tracer = viztracer.get_tracer()
18
+ if tracer is not None and getattr(tracer, "enable", False):
19
+ # Profiler is active: introspect its configuration to
20
+ # initialize the workers with the same configuration.
21
+ return _viztracer_init, (tracer.init_kwargs,)
22
+ except ImportError:
23
+ # viztracer is not installed: nothing to do
24
+ pass
25
+ except Exception as e:
26
+ # In case viztracer's API evolve, we do not want to crash loky but
27
+ # we want to know about it to be able to update loky.
28
+ warnings.warn(f"Unable to introspect viztracer state: {e}")
29
+ return None, ()
30
+
31
+
32
+ class _ChainedInitializer:
33
+ """Compound worker initializer
34
+
35
+ This is meant to be used in conjunction with _chain_initializers to
36
+ produce the necessary chained_args list to be passed to __call__.
37
+ """
38
+
39
+ def __init__(self, initializers):
40
+ self._initializers = initializers
41
+
42
+ def __call__(self, *chained_args):
43
+ for initializer, args in zip(self._initializers, chained_args):
44
+ initializer(*args)
45
+
46
+
47
+ def _chain_initializers(initializer_and_args):
48
+ """Convenience helper to combine a sequence of initializers.
49
+
50
+ If some initializers are None, they are filtered out.
51
+ """
52
+ filtered_initializers = []
53
+ filtered_initargs = []
54
+ for initializer, initargs in initializer_and_args:
55
+ if initializer is not None:
56
+ filtered_initializers.append(initializer)
57
+ filtered_initargs.append(initargs)
58
+
59
+ if not filtered_initializers:
60
+ return None, ()
61
+ elif len(filtered_initializers) == 1:
62
+ return filtered_initializers[0], filtered_initargs[0]
63
+ else:
64
+ return _ChainedInitializer(filtered_initializers), filtered_initargs
65
+
66
+
67
+ def _prepare_initializer(initializer, initargs):
68
+ if initializer is not None and not callable(initializer):
69
+ raise TypeError(
70
+ f"initializer must be a callable, got: {initializer!r}"
71
+ )
72
+
73
+ # Introspect runtime to determine if we need to propagate the viztracer
74
+ # profiler information to the workers:
75
+ return _chain_initializers(
76
+ [
77
+ (initializer, initargs),
78
+ _make_viztracer_initializer_and_initargs(),
79
+ ]
80
+ )
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py ADDED
@@ -0,0 +1,1314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Re-implementation of the ProcessPoolExecutor more robust to faults
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from concurrent/futures/process_pool_executor.py (17/02/2017)
7
+ # * Add an extra management thread to detect executor_manager_thread failures,
8
+ # * Improve the shutdown process to avoid deadlocks,
9
+ # * Add timeout for workers,
10
+ # * More robust pickling process.
11
+ #
12
+ # Copyright 2009 Brian Quinlan. All Rights Reserved.
13
+ # Licensed to PSF under a Contributor Agreement.
14
+
15
+ """Implements ProcessPoolExecutor.
16
+
17
+ The follow diagram and text describe the data-flow through the system:
18
+
19
+ |======================= In-process =====================|== Out-of-process ==|
20
+
21
+ +----------+ +----------+ +--------+ +-----------+ +---------+
22
+ | | => | Work Ids | | | | Call Q | | Process |
23
+ | | +----------+ | | +-----------+ | Pool |
24
+ | | | ... | | | | ... | +---------+
25
+ | | | 6 | => | | => | 5, call() | => | |
26
+ | | | 7 | | | | ... | | |
27
+ | Process | | ... | | Local | +-----------+ | Process |
28
+ | Pool | +----------+ | Worker | | #1..n |
29
+ | Executor | | Thread | | |
30
+ | | +----------- + | | +-----------+ | |
31
+ | | <=> | Work Items | <=> | | <= | Result Q | <= | |
32
+ | | +------------+ | | +-----------+ | |
33
+ | | | 6: call() | | | | ... | | |
34
+ | | | future | +--------+ | 4, result | | |
35
+ | | | ... | | 3, except | | |
36
+ +----------+ +------------+ +-----------+ +---------+
37
+
38
+ Executor.submit() called:
39
+ - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
40
+ - adds the id of the _WorkItem to the "Work Ids" queue
41
+
42
+ Local worker thread:
43
+ - reads work ids from the "Work Ids" queue and looks up the corresponding
44
+ WorkItem from the "Work Items" dict: if the work item has been cancelled then
45
+ it is simply removed from the dict, otherwise it is repackaged as a
46
+ _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
47
+ until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
48
+ calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
49
+ - reads _ResultItems from "Result Q", updates the future stored in the
50
+ "Work Items" dict and deletes the dict entry
51
+
52
+ Process #1..n:
53
+ - reads _CallItems from "Call Q", executes the calls, and puts the resulting
54
+ _ResultItems in "Result Q"
55
+ """
56
+
57
+
58
+ __author__ = "Thomas Moreau (thomas.moreau.2010@gmail.com)"
59
+
60
+
61
+ import os
62
+ import gc
63
+ import sys
64
+ import queue
65
+ import struct
66
+ import weakref
67
+ import warnings
68
+ import itertools
69
+ import traceback
70
+ import threading
71
+ from time import time, sleep
72
+ import multiprocessing as mp
73
+ from functools import partial
74
+ from pickle import PicklingError
75
+ from concurrent.futures import Executor
76
+ from concurrent.futures._base import LOGGER
77
+ from concurrent.futures.process import BrokenProcessPool as _BPPException
78
+ from multiprocessing.connection import wait
79
+
80
+ from ._base import Future
81
+ from .backend import get_context
82
+ from .backend.context import cpu_count, _MAX_WINDOWS_WORKERS
83
+ from .backend.queues import Queue, SimpleQueue
84
+ from .backend.reduction import set_loky_pickler, get_loky_pickler_name
85
+ from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
86
+ from .initializers import _prepare_initializer
87
+
88
+
89
+ # Mechanism to prevent infinite process spawning. When a worker of a
90
+ # ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new
91
+ # Executor, a LokyRecursionError is raised
92
+ MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10))
93
+ _CURRENT_DEPTH = 0
94
+
95
+ # Minimum time interval between two consecutive memory leak protection checks.
96
+ _MEMORY_LEAK_CHECK_DELAY = 1.0
97
+
98
+ # Number of bytes of memory usage allowed over the reference process size.
99
+ _MAX_MEMORY_LEAK_SIZE = int(3e8)
100
+
101
+
102
+ try:
103
+ from psutil import Process
104
+
105
+ _USE_PSUTIL = True
106
+
107
+ def _get_memory_usage(pid, force_gc=False):
108
+ if force_gc:
109
+ gc.collect()
110
+
111
+ mem_size = Process(pid).memory_info().rss
112
+ mp.util.debug(f"psutil return memory size: {mem_size}")
113
+ return mem_size
114
+
115
+ except ImportError:
116
+ _USE_PSUTIL = False
117
+
118
+
119
+ class _ThreadWakeup:
120
+ def __init__(self):
121
+ self._closed = False
122
+ self._reader, self._writer = mp.Pipe(duplex=False)
123
+
124
+ def close(self):
125
+ if not self._closed:
126
+ self._closed = True
127
+ self._writer.close()
128
+ self._reader.close()
129
+
130
+ def wakeup(self):
131
+ if not self._closed:
132
+ self._writer.send_bytes(b"")
133
+
134
+ def clear(self):
135
+ if not self._closed:
136
+ while self._reader.poll():
137
+ self._reader.recv_bytes()
138
+
139
+
140
+ class _ExecutorFlags:
141
+ """necessary references to maintain executor states without preventing gc
142
+
143
+ It permits to keep the information needed by executor_manager_thread
144
+ and crash_detection_thread to maintain the pool without preventing the
145
+ garbage collection of unreferenced executors.
146
+ """
147
+
148
+ def __init__(self, shutdown_lock):
149
+
150
+ self.shutdown = False
151
+ self.broken = None
152
+ self.kill_workers = False
153
+ self.shutdown_lock = shutdown_lock
154
+
155
+ def flag_as_shutting_down(self, kill_workers=None):
156
+ with self.shutdown_lock:
157
+ self.shutdown = True
158
+ if kill_workers is not None:
159
+ self.kill_workers = kill_workers
160
+
161
+ def flag_as_broken(self, broken):
162
+ with self.shutdown_lock:
163
+ self.shutdown = True
164
+ self.broken = broken
165
+
166
+
167
+ # Prior to 3.9, executor_manager_thread is created as daemon thread. This means
168
+ # that it is not joined automatically when the interpreter is shutting down.
169
+ # To work around this problem, an exit handler is installed to tell the
170
+ # thread to exit when the interpreter is shutting down and then waits until
171
+ # it finishes. The thread needs to be daemonized because the atexit hooks are
172
+ # called after all non daemonized threads are joined.
173
+ #
174
+ # Starting 3.9, there exists a specific atexit hook to be called before joining
175
+ # the threads so the executor_manager_thread does not need to be daemonized
176
+ # anymore.
177
+ #
178
+ # The atexit hooks are registered when starting the first ProcessPoolExecutor
179
+ # to avoid import having an effect on the interpreter.
180
+
181
+ _global_shutdown = False
182
+ _global_shutdown_lock = threading.Lock()
183
+ _threads_wakeups = weakref.WeakKeyDictionary()
184
+
185
+
186
+ def _python_exit():
187
+ global _global_shutdown
188
+ _global_shutdown = True
189
+
190
+ # Materialize the list of items to avoid error due to iterating over
191
+ # changing size dictionary.
192
+ items = list(_threads_wakeups.items())
193
+ if len(items) > 0:
194
+ mp.util.debug(
195
+ "Interpreter shutting down. Waking up {len(items)}"
196
+ f"executor_manager_thread:\n{items}"
197
+ )
198
+
199
+ # Wake up the executor_manager_thread's so they can detect the interpreter
200
+ # is shutting down and exit.
201
+ for _, (shutdown_lock, thread_wakeup) in items:
202
+ with shutdown_lock:
203
+ thread_wakeup.wakeup()
204
+
205
+ # Collect the executor_manager_thread's to make sure we exit cleanly.
206
+ for thread, _ in items:
207
+ # This locks is to prevent situations where an executor is gc'ed in one
208
+ # thread while the atexit finalizer is running in another thread. This
209
+ # can happen when joblib is used in pypy for instance.
210
+ with _global_shutdown_lock:
211
+ thread.join()
212
+
213
+
214
+ # With the fork context, _thread_wakeups is propagated to children.
215
+ # Clear it after fork to avoid some situation that can cause some
216
+ # freeze when joining the workers.
217
+ mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear())
218
+
219
+
220
+ # Module variable to register the at_exit call
221
+ process_pool_executor_at_exit = None
222
+
223
+ # Controls how many more calls than processes will be queued in the call queue.
224
+ # A smaller number will mean that processes spend more time idle waiting for
225
+ # work while a larger number will make Future.cancel() succeed less frequently
226
+ # (Futures in the call queue cannot be cancelled).
227
+ EXTRA_QUEUED_CALLS = 1
228
+
229
+
230
+ class _RemoteTraceback(Exception):
231
+ """Embed stringification of remote traceback in local traceback"""
232
+
233
+ def __init__(self, tb=None):
234
+ self.tb = f'\n"""\n{tb}"""'
235
+
236
+ def __str__(self):
237
+ return self.tb
238
+
239
+
240
+ # Do not inherit from BaseException to mirror
241
+ # concurrent.futures.process._ExceptionWithTraceback
242
+ class _ExceptionWithTraceback:
243
+ def __init__(self, exc):
244
+ tb = getattr(exc, "__traceback__", None)
245
+ if tb is None:
246
+ _, _, tb = sys.exc_info()
247
+ tb = traceback.format_exception(type(exc), exc, tb)
248
+ tb = "".join(tb)
249
+ self.exc = exc
250
+ self.tb = tb
251
+
252
+ def __reduce__(self):
253
+ return _rebuild_exc, (self.exc, self.tb)
254
+
255
+
256
+ def _rebuild_exc(exc, tb):
257
+ exc.__cause__ = _RemoteTraceback(tb)
258
+ return exc
259
+
260
+
261
+ class _WorkItem:
262
+
263
+ __slots__ = ["future", "fn", "args", "kwargs"]
264
+
265
+ def __init__(self, future, fn, args, kwargs):
266
+ self.future = future
267
+ self.fn = fn
268
+ self.args = args
269
+ self.kwargs = kwargs
270
+
271
+
272
+ class _ResultItem:
273
+ def __init__(self, work_id, exception=None, result=None):
274
+ self.work_id = work_id
275
+ self.exception = exception
276
+ self.result = result
277
+
278
+
279
+ class _CallItem:
280
+ def __init__(self, work_id, fn, args, kwargs):
281
+ self.work_id = work_id
282
+ self.fn = fn
283
+ self.args = args
284
+ self.kwargs = kwargs
285
+
286
+ # Store the current loky_pickler so it is correctly set in the worker
287
+ self.loky_pickler = get_loky_pickler_name()
288
+
289
+ def __call__(self):
290
+ set_loky_pickler(self.loky_pickler)
291
+ return self.fn(*self.args, **self.kwargs)
292
+
293
+ def __repr__(self):
294
+ return (
295
+ f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})"
296
+ )
297
+
298
+
299
+ class _SafeQueue(Queue):
300
+ """Safe Queue set exception to the future object linked to a job"""
301
+
302
+ def __init__(
303
+ self,
304
+ max_size=0,
305
+ ctx=None,
306
+ pending_work_items=None,
307
+ running_work_items=None,
308
+ thread_wakeup=None,
309
+ reducers=None,
310
+ ):
311
+ self.thread_wakeup = thread_wakeup
312
+ self.pending_work_items = pending_work_items
313
+ self.running_work_items = running_work_items
314
+ super().__init__(max_size, reducers=reducers, ctx=ctx)
315
+
316
+ def _on_queue_feeder_error(self, e, obj):
317
+ if isinstance(obj, _CallItem):
318
+ # format traceback only works on python3
319
+ if isinstance(e, struct.error):
320
+ raised_error = RuntimeError(
321
+ "The task could not be sent to the workers as it is too "
322
+ "large for `send_bytes`."
323
+ )
324
+ else:
325
+ raised_error = PicklingError(
326
+ "Could not pickle the task to send it to the workers."
327
+ )
328
+ tb = traceback.format_exception(
329
+ type(e), e, getattr(e, "__traceback__", None)
330
+ )
331
+ raised_error.__cause__ = _RemoteTraceback("".join(tb))
332
+ work_item = self.pending_work_items.pop(obj.work_id, None)
333
+ self.running_work_items.remove(obj.work_id)
334
+ # work_item can be None if another process terminated. In this
335
+ # case, the executor_manager_thread fails all work_items with
336
+ # BrokenProcessPool
337
+ if work_item is not None:
338
+ work_item.future.set_exception(raised_error)
339
+ del work_item
340
+ self.thread_wakeup.wakeup()
341
+ else:
342
+ super()._on_queue_feeder_error(e, obj)
343
+
344
+
345
+ def _get_chunks(chunksize, *iterables):
346
+ """Iterates over zip()ed iterables in chunks."""
347
+ it = zip(*iterables)
348
+ while True:
349
+ chunk = tuple(itertools.islice(it, chunksize))
350
+ if not chunk:
351
+ return
352
+ yield chunk
353
+
354
+
355
+ def _process_chunk(fn, chunk):
356
+ """Processes a chunk of an iterable passed to map.
357
+
358
+ Runs the function passed to map() on a chunk of the
359
+ iterable passed to map.
360
+
361
+ This function is run in a separate process.
362
+
363
+ """
364
+ return [fn(*args) for args in chunk]
365
+
366
+
367
+ def _sendback_result(result_queue, work_id, result=None, exception=None):
368
+ """Safely send back the given result or exception"""
369
+ try:
370
+ result_queue.put(
371
+ _ResultItem(work_id, result=result, exception=exception)
372
+ )
373
+ except BaseException as e:
374
+ exc = _ExceptionWithTraceback(e)
375
+ result_queue.put(_ResultItem(work_id, exception=exc))
376
+
377
+
378
+ def _process_worker(
379
+ call_queue,
380
+ result_queue,
381
+ initializer,
382
+ initargs,
383
+ processes_management_lock,
384
+ timeout,
385
+ worker_exit_lock,
386
+ current_depth,
387
+ ):
388
+ """Evaluates calls from call_queue and places the results in result_queue.
389
+
390
+ This worker is run in a separate process.
391
+
392
+ Args:
393
+ call_queue: A ctx.Queue of _CallItems that will be read and
394
+ evaluated by the worker.
395
+ result_queue: A ctx.Queue of _ResultItems that will written
396
+ to by the worker.
397
+ initializer: A callable initializer, or None
398
+ initargs: A tuple of args for the initializer
399
+ processes_management_lock: A ctx.Lock avoiding worker timeout while
400
+ some workers are being spawned.
401
+ timeout: maximum time to wait for a new item in the call_queue. If that
402
+ time is expired, the worker will shutdown.
403
+ worker_exit_lock: Lock to avoid flagging the executor as broken on
404
+ workers timeout.
405
+ current_depth: Nested parallelism level, to avoid infinite spawning.
406
+ """
407
+ if initializer is not None:
408
+ try:
409
+ initializer(*initargs)
410
+ except BaseException:
411
+ LOGGER.critical("Exception in initializer:", exc_info=True)
412
+ # The parent will notice that the process stopped and
413
+ # mark the pool broken
414
+ return
415
+
416
+ # set the global _CURRENT_DEPTH mechanism to limit recursive call
417
+ global _CURRENT_DEPTH
418
+ _CURRENT_DEPTH = current_depth
419
+ _process_reference_size = None
420
+ _last_memory_leak_check = None
421
+ pid = os.getpid()
422
+
423
+ mp.util.debug(f"Worker started with timeout={timeout}")
424
+ while True:
425
+ try:
426
+ call_item = call_queue.get(block=True, timeout=timeout)
427
+ if call_item is None:
428
+ mp.util.info("Shutting down worker on sentinel")
429
+ except queue.Empty:
430
+ mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s")
431
+ if processes_management_lock.acquire(block=False):
432
+ processes_management_lock.release()
433
+ call_item = None
434
+ else:
435
+ mp.util.info("Could not acquire processes_management_lock")
436
+ continue
437
+ except BaseException:
438
+ previous_tb = traceback.format_exc()
439
+ try:
440
+ result_queue.put(_RemoteTraceback(previous_tb))
441
+ except BaseException:
442
+ # If we cannot format correctly the exception, at least print
443
+ # the traceback.
444
+ print(previous_tb)
445
+ mp.util.debug("Exiting with code 1")
446
+ sys.exit(1)
447
+ if call_item is None:
448
+ # Notify queue management thread about worker shutdown
449
+ result_queue.put(pid)
450
+ is_clean = worker_exit_lock.acquire(True, timeout=30)
451
+
452
+ # Early notify any loky executor running in this worker process
453
+ # (nested parallelism) that this process is about to shutdown to
454
+ # avoid a deadlock waiting undifinitely for the worker to finish.
455
+ _python_exit()
456
+
457
+ if is_clean:
458
+ mp.util.debug("Exited cleanly")
459
+ else:
460
+ mp.util.info("Main process did not release worker_exit")
461
+ return
462
+ try:
463
+ r = call_item()
464
+ except BaseException as e:
465
+ exc = _ExceptionWithTraceback(e)
466
+ result_queue.put(_ResultItem(call_item.work_id, exception=exc))
467
+ else:
468
+ _sendback_result(result_queue, call_item.work_id, result=r)
469
+ del r
470
+
471
+ # Free the resource as soon as possible, to avoid holding onto
472
+ # open files or shared memory that is not needed anymore
473
+ del call_item
474
+
475
+ if _USE_PSUTIL:
476
+ if _process_reference_size is None:
477
+ # Make reference measurement after the first call
478
+ _process_reference_size = _get_memory_usage(pid, force_gc=True)
479
+ _last_memory_leak_check = time()
480
+ continue
481
+ if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY:
482
+ mem_usage = _get_memory_usage(pid)
483
+ _last_memory_leak_check = time()
484
+ if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
485
+ # Memory usage stays within bounds: everything is fine.
486
+ continue
487
+
488
+ # Check again memory usage; this time take the measurement
489
+ # after a forced garbage collection to break any reference
490
+ # cycles.
491
+ mem_usage = _get_memory_usage(pid, force_gc=True)
492
+ _last_memory_leak_check = time()
493
+ if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
494
+ # The GC managed to free the memory: everything is fine.
495
+ continue
496
+
497
+ # The process is leaking memory: let the main process
498
+ # know that we need to start a new worker.
499
+ mp.util.info("Memory leak detected: shutting down worker")
500
+ result_queue.put(pid)
501
+ with worker_exit_lock:
502
+ mp.util.debug("Exit due to memory leak")
503
+ return
504
+ else:
505
+ # if psutil is not installed, trigger gc.collect events
506
+ # regularly to limit potential memory leaks due to reference cycles
507
+ if _last_memory_leak_check is None or (
508
+ time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY
509
+ ):
510
+ gc.collect()
511
+ _last_memory_leak_check = time()
512
+
513
+
514
+ class _ExecutorManagerThread(threading.Thread):
515
+ """Manages the communication between this process and the worker processes.
516
+
517
+ The manager is run in a local thread.
518
+
519
+ Args:
520
+ executor: A reference to the ProcessPoolExecutor that owns
521
+ this thread. A weakref will be own by the manager as well as
522
+ references to internal objects used to introspect the state of
523
+ the executor.
524
+ """
525
+
526
+ def __init__(self, executor):
527
+ # Store references to necessary internals of the executor.
528
+
529
+ # A _ThreadWakeup to allow waking up the executor_manager_thread from
530
+ # the main Thread and avoid deadlocks caused by permanently
531
+ # locked queues.
532
+ self.thread_wakeup = executor._executor_manager_thread_wakeup
533
+ self.shutdown_lock = executor._shutdown_lock
534
+
535
+ # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used
536
+ # to determine if the ProcessPoolExecutor has been garbage collected
537
+ # and that the manager can exit.
538
+ # When the executor gets garbage collected, the weakref callback
539
+ # will wake up the queue management thread so that it can terminate
540
+ # if there is no pending work item.
541
+ def weakref_cb(
542
+ _,
543
+ thread_wakeup=self.thread_wakeup,
544
+ shutdown_lock=self.shutdown_lock,
545
+ ):
546
+ if mp is not None:
547
+ # At this point, the multiprocessing module can already be
548
+ # garbage collected. We only log debug info when still
549
+ # possible.
550
+ mp.util.debug(
551
+ "Executor collected: triggering callback for"
552
+ " QueueManager wakeup"
553
+ )
554
+ with shutdown_lock:
555
+ thread_wakeup.wakeup()
556
+
557
+ self.executor_reference = weakref.ref(executor, weakref_cb)
558
+
559
+ # The flags of the executor
560
+ self.executor_flags = executor._flags
561
+
562
+ # A list of the ctx.Process instances used as workers.
563
+ self.processes = executor._processes
564
+
565
+ # A ctx.Queue that will be filled with _CallItems derived from
566
+ # _WorkItems for processing by the process workers.
567
+ self.call_queue = executor._call_queue
568
+
569
+ # A ctx.SimpleQueue of _ResultItems generated by the process workers.
570
+ self.result_queue = executor._result_queue
571
+
572
+ # A queue.Queue of work ids e.g. Queue([5, 6, ...]).
573
+ self.work_ids_queue = executor._work_ids
574
+
575
+ # A dict mapping work ids to _WorkItems e.g.
576
+ # {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
577
+ self.pending_work_items = executor._pending_work_items
578
+
579
+ # A list of the work_ids that are currently running
580
+ self.running_work_items = executor._running_work_items
581
+
582
+ # A lock to avoid concurrent shutdown of workers on timeout and spawn
583
+ # of new processes or shut down
584
+ self.processes_management_lock = executor._processes_management_lock
585
+
586
+ super().__init__(name="ExecutorManagerThread")
587
+ if sys.version_info < (3, 9):
588
+ self.daemon = True
589
+
590
+ def run(self):
591
+ # Main loop for the executor manager thread.
592
+
593
+ while True:
594
+ self.add_call_item_to_queue()
595
+
596
+ result_item, is_broken, bpe = self.wait_result_broken_or_wakeup()
597
+
598
+ if is_broken:
599
+ self.terminate_broken(bpe)
600
+ return
601
+ if result_item is not None:
602
+ self.process_result_item(result_item)
603
+ # Delete reference to result_item to avoid keeping references
604
+ # while waiting on new results.
605
+ del result_item
606
+
607
+ if self.is_shutting_down():
608
+ self.flag_executor_shutting_down()
609
+
610
+ # Since no new work items can be added, it is safe to shutdown
611
+ # this thread if there are no pending work items.
612
+ if not self.pending_work_items:
613
+ self.join_executor_internals()
614
+ return
615
+
616
+ def add_call_item_to_queue(self):
617
+ # Fills call_queue with _WorkItems from pending_work_items.
618
+ # This function never blocks.
619
+ while True:
620
+ if self.call_queue.full():
621
+ return
622
+ try:
623
+ work_id = self.work_ids_queue.get(block=False)
624
+ except queue.Empty:
625
+ return
626
+ else:
627
+ work_item = self.pending_work_items[work_id]
628
+
629
+ if work_item.future.set_running_or_notify_cancel():
630
+ self.running_work_items += [work_id]
631
+ self.call_queue.put(
632
+ _CallItem(
633
+ work_id,
634
+ work_item.fn,
635
+ work_item.args,
636
+ work_item.kwargs,
637
+ ),
638
+ block=True,
639
+ )
640
+ else:
641
+ del self.pending_work_items[work_id]
642
+ continue
643
+
644
+ def wait_result_broken_or_wakeup(self):
645
+ # Wait for a result to be ready in the result_queue while checking
646
+ # that all worker processes are still running, or for a wake up
647
+ # signal send. The wake up signals come either from new tasks being
648
+ # submitted, from the executor being shutdown/gc-ed, or from the
649
+ # shutdown of the python interpreter.
650
+ result_reader = self.result_queue._reader
651
+ wakeup_reader = self.thread_wakeup._reader
652
+ readers = [result_reader, wakeup_reader]
653
+ worker_sentinels = [p.sentinel for p in list(self.processes.values())]
654
+ ready = wait(readers + worker_sentinels)
655
+
656
+ bpe = None
657
+ is_broken = True
658
+ result_item = None
659
+ if result_reader in ready:
660
+ try:
661
+ result_item = result_reader.recv()
662
+ if isinstance(result_item, _RemoteTraceback):
663
+ bpe = BrokenProcessPool(
664
+ "A task has failed to un-serialize. Please ensure that"
665
+ " the arguments of the function are all picklable."
666
+ )
667
+ bpe.__cause__ = result_item
668
+ else:
669
+ is_broken = False
670
+ except BaseException as e:
671
+ bpe = BrokenProcessPool(
672
+ "A result has failed to un-serialize. Please ensure that "
673
+ "the objects returned by the function are always "
674
+ "picklable."
675
+ )
676
+ tb = traceback.format_exception(
677
+ type(e), e, getattr(e, "__traceback__", None)
678
+ )
679
+ bpe.__cause__ = _RemoteTraceback("".join(tb))
680
+
681
+ elif wakeup_reader in ready:
682
+ # This is simply a wake-up event that might either trigger putting
683
+ # more tasks in the queue or trigger the clean up of resources.
684
+ is_broken = False
685
+ else:
686
+ # A worker has terminated and we don't know why, set the state of
687
+ # the executor as broken
688
+ exit_codes = ""
689
+ if sys.platform != "win32":
690
+ # In Windows, introspecting terminated workers exitcodes seems
691
+ # unstable, therefore they are not appended in the exception
692
+ # message.
693
+ exit_codes = (
694
+ "\nThe exit codes of the workers are "
695
+ f"{get_exitcodes_terminated_worker(self.processes)}"
696
+ )
697
+ mp.util.debug(
698
+ "A worker unexpectedly terminated. Workers that "
699
+ "might have caused the breakage: "
700
+ + str(
701
+ {
702
+ p.name: p.exitcode
703
+ for p in list(self.processes.values())
704
+ if p is not None and p.sentinel in ready
705
+ }
706
+ )
707
+ )
708
+ bpe = TerminatedWorkerError(
709
+ "A worker process managed by the executor was unexpectedly "
710
+ "terminated. This could be caused by a segmentation fault "
711
+ "while calling the function or by an excessive memory usage "
712
+ "causing the Operating System to kill the worker.\n"
713
+ f"{exit_codes}"
714
+ )
715
+
716
+ self.thread_wakeup.clear()
717
+
718
+ return result_item, is_broken, bpe
719
+
720
+ def process_result_item(self, result_item):
721
+ # Process the received a result_item. This can be either the PID of a
722
+ # worker that exited gracefully or a _ResultItem
723
+
724
+ if isinstance(result_item, int):
725
+ # Clean shutdown of a worker using its PID, either on request
726
+ # by the executor.shutdown method or by the timeout of the worker
727
+ # itself: we should not mark the executor as broken.
728
+ with self.processes_management_lock:
729
+ p = self.processes.pop(result_item, None)
730
+
731
+ # p can be None if the executor is concurrently shutting down.
732
+ if p is not None:
733
+ p._worker_exit_lock.release()
734
+ mp.util.debug(
735
+ f"joining {p.name} when processing {p.pid} as result_item"
736
+ )
737
+ p.join()
738
+ del p
739
+
740
+ # Make sure the executor have the right number of worker, even if a
741
+ # worker timeout while some jobs were submitted. If some work is
742
+ # pending or there is less processes than running items, we need to
743
+ # start a new Process and raise a warning.
744
+ n_pending = len(self.pending_work_items)
745
+ n_running = len(self.running_work_items)
746
+ if n_pending - n_running > 0 or n_running > len(self.processes):
747
+ executor = self.executor_reference()
748
+ if (
749
+ executor is not None
750
+ and len(self.processes) < executor._max_workers
751
+ ):
752
+ warnings.warn(
753
+ "A worker stopped while some jobs were given to the "
754
+ "executor. This can be caused by a too short worker "
755
+ "timeout or by a memory leak.",
756
+ UserWarning,
757
+ )
758
+ with executor._processes_management_lock:
759
+ executor._adjust_process_count()
760
+ executor = None
761
+ else:
762
+ # Received a _ResultItem so mark the future as completed.
763
+ work_item = self.pending_work_items.pop(result_item.work_id, None)
764
+ # work_item can be None if another process terminated (see above)
765
+ if work_item is not None:
766
+ if result_item.exception:
767
+ work_item.future.set_exception(result_item.exception)
768
+ else:
769
+ work_item.future.set_result(result_item.result)
770
+ self.running_work_items.remove(result_item.work_id)
771
+
772
+ def is_shutting_down(self):
773
+ # Check whether we should start shutting down the executor.
774
+ executor = self.executor_reference()
775
+ # No more work items can be added if:
776
+ # - The interpreter is shutting down OR
777
+ # - The executor that owns this thread is not broken AND
778
+ # * The executor that owns this worker has been collected OR
779
+ # * The executor that owns this worker has been shutdown.
780
+ # If the executor is broken, it should be detected in the next loop.
781
+ return _global_shutdown or (
782
+ (executor is None or self.executor_flags.shutdown)
783
+ and not self.executor_flags.broken
784
+ )
785
+
786
+ def terminate_broken(self, bpe):
787
+ # Terminate the executor because it is in a broken state. The bpe
788
+ # argument can be used to display more information on the error that
789
+ # lead the executor into becoming broken.
790
+
791
+ # Mark the process pool broken so that submits fail right now.
792
+ self.executor_flags.flag_as_broken(bpe)
793
+
794
+ # Mark pending tasks as failed.
795
+ for work_item in self.pending_work_items.values():
796
+ work_item.future.set_exception(bpe)
797
+ # Delete references to object. See issue16284
798
+ del work_item
799
+ self.pending_work_items.clear()
800
+
801
+ # Terminate remaining workers forcibly: the queues or their
802
+ # locks may be in a dirty state and block forever.
803
+ self.kill_workers(reason="broken executor")
804
+
805
+ # clean up resources
806
+ self.join_executor_internals()
807
+
808
+ def flag_executor_shutting_down(self):
809
+ # Flag the executor as shutting down and cancel remaining tasks if
810
+ # requested as early as possible if it is not gc-ed yet.
811
+ self.executor_flags.flag_as_shutting_down()
812
+
813
+ # Cancel pending work items if requested.
814
+ if self.executor_flags.kill_workers:
815
+ while self.pending_work_items:
816
+ _, work_item = self.pending_work_items.popitem()
817
+ work_item.future.set_exception(
818
+ ShutdownExecutorError(
819
+ "The Executor was shutdown with `kill_workers=True` "
820
+ "before this job could complete."
821
+ )
822
+ )
823
+ del work_item
824
+
825
+ # Kill the remaining worker forcibly to no waste time joining them
826
+ self.kill_workers(reason="executor shutting down")
827
+
828
+ def kill_workers(self, reason=""):
829
+ # Terminate the remaining workers using SIGKILL. This function also
830
+ # terminates descendant workers of the children in case there is some
831
+ # nested parallelism.
832
+ while self.processes:
833
+ _, p = self.processes.popitem()
834
+ mp.util.debug(f"terminate process {p.name}, reason: {reason}")
835
+ try:
836
+ kill_process_tree(p)
837
+ except ProcessLookupError: # pragma: no cover
838
+ pass
839
+
840
+ def shutdown_workers(self):
841
+ # shutdown all workers in self.processes
842
+
843
+ # Create a list to avoid RuntimeError due to concurrent modification of
844
+ # processes. nb_children_alive is thus an upper bound. Also release the
845
+ # processes' _worker_exit_lock to accelerate the shutdown procedure, as
846
+ # there is no need for hand-shake here.
847
+ with self.processes_management_lock:
848
+ n_children_to_stop = 0
849
+ for p in list(self.processes.values()):
850
+ mp.util.debug(f"releasing worker exit lock on {p.name}")
851
+ p._worker_exit_lock.release()
852
+ n_children_to_stop += 1
853
+
854
+ mp.util.debug(f"found {n_children_to_stop} processes to stop")
855
+
856
+ # Send the right number of sentinels, to make sure all children are
857
+ # properly terminated. Do it with a mechanism that avoid hanging on
858
+ # Full queue when all workers have already been shutdown.
859
+ n_sentinels_sent = 0
860
+ cooldown_time = 0.001
861
+ while (
862
+ n_sentinels_sent < n_children_to_stop
863
+ and self.get_n_children_alive() > 0
864
+ ):
865
+ for _ in range(n_children_to_stop - n_sentinels_sent):
866
+ try:
867
+ self.call_queue.put_nowait(None)
868
+ n_sentinels_sent += 1
869
+ except queue.Full as e:
870
+ if cooldown_time > 5.0:
871
+ mp.util.info(
872
+ "failed to send all sentinels and exit with error."
873
+ f"\ncall_queue size={self.call_queue._maxsize}; "
874
+ f" full is {self.call_queue.full()}; "
875
+ )
876
+ raise e
877
+ mp.util.info(
878
+ "full call_queue prevented to send all sentinels at "
879
+ "once, waiting..."
880
+ )
881
+ sleep(cooldown_time)
882
+ cooldown_time *= 1.2
883
+ break
884
+
885
+ mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue")
886
+
887
+ def join_executor_internals(self):
888
+ self.shutdown_workers()
889
+
890
+ # Release the queue's resources as soon as possible. Flag the feeder
891
+ # thread for clean exit to avoid having the crash detection thread flag
892
+ # the Executor as broken during the shutdown. This is safe as either:
893
+ # * We don't need to communicate with the workers anymore
894
+ # * There is nothing left in the Queue buffer except None sentinels
895
+ mp.util.debug("closing call_queue")
896
+ self.call_queue.close()
897
+ self.call_queue.join_thread()
898
+
899
+ # Closing result_queue
900
+ mp.util.debug("closing result_queue")
901
+ self.result_queue.close()
902
+
903
+ mp.util.debug("closing thread_wakeup")
904
+ with self.shutdown_lock:
905
+ self.thread_wakeup.close()
906
+
907
+ # If .join() is not called on the created processes then
908
+ # some ctx.Queue methods may deadlock on macOS.
909
+ with self.processes_management_lock:
910
+ mp.util.debug(f"joining {len(self.processes)} processes")
911
+ n_joined_processes = 0
912
+ while True:
913
+ try:
914
+ pid, p = self.processes.popitem()
915
+ mp.util.debug(f"joining process {p.name} with pid {pid}")
916
+ p.join()
917
+ n_joined_processes += 1
918
+ except KeyError:
919
+ break
920
+
921
+ mp.util.debug(
922
+ "executor management thread clean shutdown of "
923
+ f"{n_joined_processes} workers"
924
+ )
925
+
926
+ def get_n_children_alive(self):
927
+ # This is an upper bound on the number of children alive.
928
+ with self.processes_management_lock:
929
+ return sum(p.is_alive() for p in list(self.processes.values()))
930
+
931
+
932
+ _system_limits_checked = False
933
+ _system_limited = None
934
+
935
+
936
+ def _check_system_limits():
937
+ global _system_limits_checked, _system_limited
938
+ if _system_limits_checked and _system_limited:
939
+ raise NotImplementedError(_system_limited)
940
+ _system_limits_checked = True
941
+ try:
942
+ nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
943
+ except (AttributeError, ValueError):
944
+ # sysconf not available or setting not available
945
+ return
946
+ if nsems_max == -1:
947
+ # undetermined limit, assume that limit is determined
948
+ # by available memory only
949
+ return
950
+ if nsems_max >= 256:
951
+ # minimum number of semaphores available
952
+ # according to POSIX
953
+ return
954
+ _system_limited = (
955
+ f"system provides too few semaphores ({nsems_max} available, "
956
+ "256 necessary)"
957
+ )
958
+ raise NotImplementedError(_system_limited)
959
+
960
+
961
+ def _chain_from_iterable_of_lists(iterable):
962
+ """
963
+ Specialized implementation of itertools.chain.from_iterable.
964
+ Each item in *iterable* should be a list. This function is
965
+ careful not to keep references to yielded objects.
966
+ """
967
+ for element in iterable:
968
+ element.reverse()
969
+ while element:
970
+ yield element.pop()
971
+
972
+
973
+ def _check_max_depth(context):
974
+ # Limit the maxmal recursion level
975
+ global _CURRENT_DEPTH
976
+ if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0:
977
+ raise LokyRecursionError(
978
+ "Could not spawn extra nested processes at depth superior to "
979
+ "MAX_DEPTH=1. It is not possible to increase this limit when "
980
+ "using the 'fork' start method."
981
+ )
982
+
983
+ if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH:
984
+ raise LokyRecursionError(
985
+ "Could not spawn extra nested processes at depth superior to "
986
+ f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change "
987
+ "this limit with the LOKY_MAX_DEPTH environment variable."
988
+ )
989
+
990
+
991
+ class LokyRecursionError(RuntimeError):
992
+ """A process tries to spawn too many levels of nested processes."""
993
+
994
+
995
+ class BrokenProcessPool(_BPPException):
996
+ """
997
+ Raised when the executor is broken while a future was in the running state.
998
+ The cause can an error raised when unpickling the task in the worker
999
+ process or when unpickling the result value in the parent process. It can
1000
+ also be caused by a worker process being terminated unexpectedly.
1001
+ """
1002
+
1003
+
1004
+ class TerminatedWorkerError(BrokenProcessPool):
1005
+ """
1006
+ Raised when a process in a ProcessPoolExecutor terminated abruptly
1007
+ while a future was in the running state.
1008
+ """
1009
+
1010
+
1011
+ # Alias for backward compat (for code written for loky 1.1.4 and earlier). Do
1012
+ # not use in new code.
1013
+ BrokenExecutor = BrokenProcessPool
1014
+
1015
+
1016
+ class ShutdownExecutorError(RuntimeError):
1017
+
1018
+ """
1019
+ Raised when a ProcessPoolExecutor is shutdown while a future was in the
1020
+ running or pending state.
1021
+ """
1022
+
1023
+
1024
+ class ProcessPoolExecutor(Executor):
1025
+
1026
+ _at_exit = None
1027
+
1028
+ def __init__(
1029
+ self,
1030
+ max_workers=None,
1031
+ job_reducers=None,
1032
+ result_reducers=None,
1033
+ timeout=None,
1034
+ context=None,
1035
+ initializer=None,
1036
+ initargs=(),
1037
+ env=None,
1038
+ ):
1039
+ """Initializes a new ProcessPoolExecutor instance.
1040
+
1041
+ Args:
1042
+ max_workers: int, optional (default: cpu_count())
1043
+ The maximum number of processes that can be used to execute the
1044
+ given calls. If None or not given then as many worker processes
1045
+ will be created as the number of CPUs the current process
1046
+ can use.
1047
+ job_reducers, result_reducers: dict(type: reducer_func)
1048
+ Custom reducer for pickling the jobs and the results from the
1049
+ Executor. If only `job_reducers` is provided, `result_reducer`
1050
+ will use the same reducers
1051
+ timeout: int, optional (default: None)
1052
+ Idle workers exit after timeout seconds. If a new job is
1053
+ submitted after the timeout, the executor will start enough
1054
+ new Python processes to make sure the pool of workers is full.
1055
+ context: A multiprocessing context to launch the workers. This
1056
+ object should provide SimpleQueue, Queue and Process.
1057
+ initializer: An callable used to initialize worker processes.
1058
+ initargs: A tuple of arguments to pass to the initializer.
1059
+ env: A dict of environment variable to overwrite in the child
1060
+ process. The environment variables are set before any module is
1061
+ loaded. Note that this only works with the loky context.
1062
+ """
1063
+ _check_system_limits()
1064
+
1065
+ if max_workers is None:
1066
+ self._max_workers = cpu_count()
1067
+ else:
1068
+ if max_workers <= 0:
1069
+ raise ValueError("max_workers must be greater than 0")
1070
+ self._max_workers = max_workers
1071
+
1072
+ if (
1073
+ sys.platform == "win32"
1074
+ and self._max_workers > _MAX_WINDOWS_WORKERS
1075
+ ):
1076
+ warnings.warn(
1077
+ f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} "
1078
+ "due to limitations of the operating system."
1079
+ )
1080
+ self._max_workers = _MAX_WINDOWS_WORKERS
1081
+
1082
+ if context is None:
1083
+ context = get_context()
1084
+ self._context = context
1085
+ self._env = env
1086
+
1087
+ self._initializer, self._initargs = _prepare_initializer(
1088
+ initializer, initargs
1089
+ )
1090
+ _check_max_depth(self._context)
1091
+
1092
+ if result_reducers is None:
1093
+ result_reducers = job_reducers
1094
+
1095
+ # Timeout
1096
+ self._timeout = timeout
1097
+
1098
+ # Management thread
1099
+ self._executor_manager_thread = None
1100
+
1101
+ # Map of pids to processes
1102
+ self._processes = {}
1103
+
1104
+ # Internal variables of the ProcessPoolExecutor
1105
+ self._processes = {}
1106
+ self._queue_count = 0
1107
+ self._pending_work_items = {}
1108
+ self._running_work_items = []
1109
+ self._work_ids = queue.Queue()
1110
+ self._processes_management_lock = self._context.Lock()
1111
+ self._executor_manager_thread = None
1112
+ self._shutdown_lock = threading.Lock()
1113
+
1114
+ # _ThreadWakeup is a communication channel used to interrupt the wait
1115
+ # of the main loop of executor_manager_thread from another thread (e.g.
1116
+ # when calling executor.submit or executor.shutdown). We do not use the
1117
+ # _result_queue to send wakeup signals to the executor_manager_thread
1118
+ # as it could result in a deadlock if a worker process dies with the
1119
+ # _result_queue write lock still acquired.
1120
+ #
1121
+ # _shutdown_lock must be locked to access _ThreadWakeup.wakeup.
1122
+ self._executor_manager_thread_wakeup = _ThreadWakeup()
1123
+
1124
+ # Flag to hold the state of the Executor. This permits to introspect
1125
+ # the Executor state even once it has been garbage collected.
1126
+ self._flags = _ExecutorFlags(self._shutdown_lock)
1127
+
1128
+ # Finally setup the queues for interprocess communication
1129
+ self._setup_queues(job_reducers, result_reducers)
1130
+
1131
+ mp.util.debug("ProcessPoolExecutor is setup")
1132
+
1133
+ def _setup_queues(self, job_reducers, result_reducers, queue_size=None):
1134
+ # Make the call queue slightly larger than the number of processes to
1135
+ # prevent the worker processes from idling. But don't make it too big
1136
+ # because futures in the call queue cannot be cancelled.
1137
+ if queue_size is None:
1138
+ queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS
1139
+ self._call_queue = _SafeQueue(
1140
+ max_size=queue_size,
1141
+ pending_work_items=self._pending_work_items,
1142
+ running_work_items=self._running_work_items,
1143
+ thread_wakeup=self._executor_manager_thread_wakeup,
1144
+ reducers=job_reducers,
1145
+ ctx=self._context,
1146
+ )
1147
+ # Killed worker processes can produce spurious "broken pipe"
1148
+ # tracebacks in the queue's own worker thread. But we detect killed
1149
+ # processes anyway, so silence the tracebacks.
1150
+ self._call_queue._ignore_epipe = True
1151
+
1152
+ self._result_queue = SimpleQueue(
1153
+ reducers=result_reducers, ctx=self._context
1154
+ )
1155
+
1156
+ def _start_executor_manager_thread(self):
1157
+ if self._executor_manager_thread is None:
1158
+ mp.util.debug("_start_executor_manager_thread called")
1159
+
1160
+ # Start the processes so that their sentinels are known.
1161
+ self._executor_manager_thread = _ExecutorManagerThread(self)
1162
+ self._executor_manager_thread.start()
1163
+
1164
+ # register this executor in a mechanism that ensures it will wakeup
1165
+ # when the interpreter is exiting.
1166
+ _threads_wakeups[self._executor_manager_thread] = (
1167
+ self._shutdown_lock,
1168
+ self._executor_manager_thread_wakeup,
1169
+ )
1170
+
1171
+ global process_pool_executor_at_exit
1172
+ if process_pool_executor_at_exit is None:
1173
+ # Ensure that the _python_exit function will be called before
1174
+ # the multiprocessing.Queue._close finalizers which have an
1175
+ # exitpriority of 10.
1176
+
1177
+ if sys.version_info < (3, 9):
1178
+ process_pool_executor_at_exit = mp.util.Finalize(
1179
+ None, _python_exit, exitpriority=20
1180
+ )
1181
+ else:
1182
+ process_pool_executor_at_exit = threading._register_atexit(
1183
+ _python_exit
1184
+ )
1185
+
1186
+ def _adjust_process_count(self):
1187
+ while len(self._processes) < self._max_workers:
1188
+ worker_exit_lock = self._context.BoundedSemaphore(1)
1189
+ args = (
1190
+ self._call_queue,
1191
+ self._result_queue,
1192
+ self._initializer,
1193
+ self._initargs,
1194
+ self._processes_management_lock,
1195
+ self._timeout,
1196
+ worker_exit_lock,
1197
+ _CURRENT_DEPTH + 1,
1198
+ )
1199
+ worker_exit_lock.acquire()
1200
+ try:
1201
+ # Try to spawn the process with some environment variable to
1202
+ # overwrite but it only works with the loky context for now.
1203
+ p = self._context.Process(
1204
+ target=_process_worker, args=args, env=self._env
1205
+ )
1206
+ except TypeError:
1207
+ p = self._context.Process(target=_process_worker, args=args)
1208
+ p._worker_exit_lock = worker_exit_lock
1209
+ p.start()
1210
+ self._processes[p.pid] = p
1211
+ mp.util.debug(
1212
+ f"Adjusted process count to {self._max_workers}: "
1213
+ f"{[(p.name, pid) for pid, p in self._processes.items()]}"
1214
+ )
1215
+
1216
+ def _ensure_executor_running(self):
1217
+ """ensures all workers and management thread are running"""
1218
+ with self._processes_management_lock:
1219
+ if len(self._processes) != self._max_workers:
1220
+ self._adjust_process_count()
1221
+ self._start_executor_manager_thread()
1222
+
1223
+ def submit(self, fn, *args, **kwargs):
1224
+ with self._flags.shutdown_lock:
1225
+ if self._flags.broken is not None:
1226
+ raise self._flags.broken
1227
+ if self._flags.shutdown:
1228
+ raise ShutdownExecutorError(
1229
+ "cannot schedule new futures after shutdown"
1230
+ )
1231
+
1232
+ # Cannot submit a new calls once the interpreter is shutting down.
1233
+ # This check avoids spawning new processes at exit.
1234
+ if _global_shutdown:
1235
+ raise RuntimeError(
1236
+ "cannot schedule new futures after " "interpreter shutdown"
1237
+ )
1238
+
1239
+ f = Future()
1240
+ w = _WorkItem(f, fn, args, kwargs)
1241
+
1242
+ self._pending_work_items[self._queue_count] = w
1243
+ self._work_ids.put(self._queue_count)
1244
+ self._queue_count += 1
1245
+ # Wake up queue management thread
1246
+ self._executor_manager_thread_wakeup.wakeup()
1247
+
1248
+ self._ensure_executor_running()
1249
+ return f
1250
+
1251
+ submit.__doc__ = Executor.submit.__doc__
1252
+
1253
+ def map(self, fn, *iterables, **kwargs):
1254
+ """Returns an iterator equivalent to map(fn, iter).
1255
+
1256
+ Args:
1257
+ fn: A callable that will take as many arguments as there are
1258
+ passed iterables.
1259
+ timeout: The maximum number of seconds to wait. If None, then there
1260
+ is no limit on the wait time.
1261
+ chunksize: If greater than one, the iterables will be chopped into
1262
+ chunks of size chunksize and submitted to the process pool.
1263
+ If set to one, the items in the list will be sent one at a
1264
+ time.
1265
+
1266
+ Returns:
1267
+ An iterator equivalent to: map(func, *iterables) but the calls may
1268
+ be evaluated out-of-order.
1269
+
1270
+ Raises:
1271
+ TimeoutError: If the entire result iterator could not be generated
1272
+ before the given timeout.
1273
+ Exception: If fn(*args) raises for any values.
1274
+ """
1275
+ timeout = kwargs.get("timeout", None)
1276
+ chunksize = kwargs.get("chunksize", 1)
1277
+ if chunksize < 1:
1278
+ raise ValueError("chunksize must be >= 1.")
1279
+
1280
+ results = super().map(
1281
+ partial(_process_chunk, fn),
1282
+ _get_chunks(chunksize, *iterables),
1283
+ timeout=timeout,
1284
+ )
1285
+ return _chain_from_iterable_of_lists(results)
1286
+
1287
+ def shutdown(self, wait=True, kill_workers=False):
1288
+ mp.util.debug(f"shutting down executor {self}")
1289
+
1290
+ self._flags.flag_as_shutting_down(kill_workers)
1291
+ executor_manager_thread = self._executor_manager_thread
1292
+ executor_manager_thread_wakeup = self._executor_manager_thread_wakeup
1293
+
1294
+ if executor_manager_thread_wakeup is not None:
1295
+ # Wake up queue management thread
1296
+ with self._shutdown_lock:
1297
+ self._executor_manager_thread_wakeup.wakeup()
1298
+
1299
+ if executor_manager_thread is not None and wait:
1300
+ # This locks avoids concurrent join if the interpreter
1301
+ # is shutting down.
1302
+ with _global_shutdown_lock:
1303
+ executor_manager_thread.join()
1304
+ _threads_wakeups.pop(executor_manager_thread, None)
1305
+
1306
+ # To reduce the risk of opening too many files, remove references to
1307
+ # objects that use file descriptors.
1308
+ self._executor_manager_thread = None
1309
+ self._executor_manager_thread_wakeup = None
1310
+ self._call_queue = None
1311
+ self._result_queue = None
1312
+ self._processes_management_lock = None
1313
+
1314
+ shutdown.__doc__ = Executor.shutdown.__doc__
mplug_owl2/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Reusable ProcessPoolExecutor
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ import time
7
+ import warnings
8
+ import threading
9
+ import multiprocessing as mp
10
+
11
+ from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
12
+ from .backend.context import cpu_count
13
+ from .backend import get_context
14
+
15
+ __all__ = ["get_reusable_executor"]
16
+
17
+ # Singleton executor and id management
18
+ _executor_lock = threading.RLock()
19
+ _next_executor_id = 0
20
+ _executor = None
21
+ _executor_kwargs = None
22
+
23
+
24
+ def _get_next_executor_id():
25
+ """Ensure that each successive executor instance has a unique, monotonic id.
26
+
27
+ The purpose of this monotonic id is to help debug and test automated
28
+ instance creation.
29
+ """
30
+ global _next_executor_id
31
+ with _executor_lock:
32
+ executor_id = _next_executor_id
33
+ _next_executor_id += 1
34
+ return executor_id
35
+
36
+
37
+ def get_reusable_executor(
38
+ max_workers=None,
39
+ context=None,
40
+ timeout=10,
41
+ kill_workers=False,
42
+ reuse="auto",
43
+ job_reducers=None,
44
+ result_reducers=None,
45
+ initializer=None,
46
+ initargs=(),
47
+ env=None,
48
+ ):
49
+ """Return the current ReusableExectutor instance.
50
+
51
+ Start a new instance if it has not been started already or if the previous
52
+ instance was left in a broken state.
53
+
54
+ If the previous instance does not have the requested number of workers, the
55
+ executor is dynamically resized to adjust the number of workers prior to
56
+ returning.
57
+
58
+ Reusing a singleton instance spares the overhead of starting new worker
59
+ processes and importing common python packages each time.
60
+
61
+ ``max_workers`` controls the maximum number of tasks that can be running in
62
+ parallel in worker processes. By default this is set to the number of
63
+ CPUs on the host.
64
+
65
+ Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
66
+ so as to release system resources. New workers are respawn upon submission
67
+ of new tasks so that ``max_workers`` are available to accept the newly
68
+ submitted tasks. Setting ``timeout`` to around 100 times the time required
69
+ to spawn new processes and import packages in them (on the order of 100ms)
70
+ ensures that the overhead of spawning workers is negligible.
71
+
72
+ Setting ``kill_workers=True`` makes it possible to forcibly interrupt
73
+ previously spawned jobs to get a new instance of the reusable executor
74
+ with new constructor argument values.
75
+
76
+ The ``job_reducers`` and ``result_reducers`` are used to customize the
77
+ pickling of tasks and results send to the executor.
78
+
79
+ When provided, the ``initializer`` is run first in newly spawned
80
+ processes with argument ``initargs``.
81
+
82
+ The environment variable in the child process are a copy of the values in
83
+ the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
84
+ ``VAL`` are string literals to overwrite the environment variable ``ENV``
85
+ in the child processes to value ``VAL``. The environment variables are set
86
+ in the children before any module is loaded. This only works with the
87
+ ``loky`` context.
88
+ """
89
+ _executor, _ = _ReusablePoolExecutor.get_reusable_executor(
90
+ max_workers=max_workers,
91
+ context=context,
92
+ timeout=timeout,
93
+ kill_workers=kill_workers,
94
+ reuse=reuse,
95
+ job_reducers=job_reducers,
96
+ result_reducers=result_reducers,
97
+ initializer=initializer,
98
+ initargs=initargs,
99
+ env=env,
100
+ )
101
+ return _executor
102
+
103
+
104
+ class _ReusablePoolExecutor(ProcessPoolExecutor):
105
+ def __init__(
106
+ self,
107
+ submit_resize_lock,
108
+ max_workers=None,
109
+ context=None,
110
+ timeout=None,
111
+ executor_id=0,
112
+ job_reducers=None,
113
+ result_reducers=None,
114
+ initializer=None,
115
+ initargs=(),
116
+ env=None,
117
+ ):
118
+ super().__init__(
119
+ max_workers=max_workers,
120
+ context=context,
121
+ timeout=timeout,
122
+ job_reducers=job_reducers,
123
+ result_reducers=result_reducers,
124
+ initializer=initializer,
125
+ initargs=initargs,
126
+ env=env,
127
+ )
128
+ self.executor_id = executor_id
129
+ self._submit_resize_lock = submit_resize_lock
130
+
131
+ @classmethod
132
+ def get_reusable_executor(
133
+ cls,
134
+ max_workers=None,
135
+ context=None,
136
+ timeout=10,
137
+ kill_workers=False,
138
+ reuse="auto",
139
+ job_reducers=None,
140
+ result_reducers=None,
141
+ initializer=None,
142
+ initargs=(),
143
+ env=None,
144
+ ):
145
+ with _executor_lock:
146
+ global _executor, _executor_kwargs
147
+ executor = _executor
148
+
149
+ if max_workers is None:
150
+ if reuse is True and executor is not None:
151
+ max_workers = executor._max_workers
152
+ else:
153
+ max_workers = cpu_count()
154
+ elif max_workers <= 0:
155
+ raise ValueError(
156
+ f"max_workers must be greater than 0, got {max_workers}."
157
+ )
158
+
159
+ if isinstance(context, str):
160
+ context = get_context(context)
161
+ if context is not None and context.get_start_method() == "fork":
162
+ raise ValueError(
163
+ "Cannot use reusable executor with the 'fork' context"
164
+ )
165
+
166
+ kwargs = dict(
167
+ context=context,
168
+ timeout=timeout,
169
+ job_reducers=job_reducers,
170
+ result_reducers=result_reducers,
171
+ initializer=initializer,
172
+ initargs=initargs,
173
+ env=env,
174
+ )
175
+ if executor is None:
176
+ is_reused = False
177
+ mp.util.debug(
178
+ f"Create a executor with max_workers={max_workers}."
179
+ )
180
+ executor_id = _get_next_executor_id()
181
+ _executor_kwargs = kwargs
182
+ _executor = executor = cls(
183
+ _executor_lock,
184
+ max_workers=max_workers,
185
+ executor_id=executor_id,
186
+ **kwargs,
187
+ )
188
+ else:
189
+ if reuse == "auto":
190
+ reuse = kwargs == _executor_kwargs
191
+ if (
192
+ executor._flags.broken
193
+ or executor._flags.shutdown
194
+ or not reuse
195
+ ):
196
+ if executor._flags.broken:
197
+ reason = "broken"
198
+ elif executor._flags.shutdown:
199
+ reason = "shutdown"
200
+ else:
201
+ reason = "arguments have changed"
202
+ mp.util.debug(
203
+ "Creating a new executor with max_workers="
204
+ f"{max_workers} as the previous instance cannot be "
205
+ f"reused ({reason})."
206
+ )
207
+ executor.shutdown(wait=True, kill_workers=kill_workers)
208
+ _executor = executor = _executor_kwargs = None
209
+ # Recursive call to build a new instance
210
+ return cls.get_reusable_executor(
211
+ max_workers=max_workers, **kwargs
212
+ )
213
+ else:
214
+ mp.util.debug(
215
+ "Reusing existing executor with "
216
+ f"max_workers={executor._max_workers}."
217
+ )
218
+ is_reused = True
219
+ executor._resize(max_workers)
220
+
221
+ return executor, is_reused
222
+
223
+ def submit(self, fn, *args, **kwargs):
224
+ with self._submit_resize_lock:
225
+ return super().submit(fn, *args, **kwargs)
226
+
227
+ def _resize(self, max_workers):
228
+ with self._submit_resize_lock:
229
+ if max_workers is None:
230
+ raise ValueError("Trying to resize with max_workers=None")
231
+ elif max_workers == self._max_workers:
232
+ return
233
+
234
+ if self._executor_manager_thread is None:
235
+ # If the executor_manager_thread has not been started
236
+ # then no processes have been spawned and we can just
237
+ # update _max_workers and return
238
+ self._max_workers = max_workers
239
+ return
240
+
241
+ self._wait_job_completion()
242
+
243
+ # Some process might have returned due to timeout so check how many
244
+ # children are still alive. Use the _process_management_lock to
245
+ # ensure that no process are spawned or timeout during the resize.
246
+ with self._processes_management_lock:
247
+ processes = list(self._processes.values())
248
+ nb_children_alive = sum(p.is_alive() for p in processes)
249
+ self._max_workers = max_workers
250
+ for _ in range(max_workers, nb_children_alive):
251
+ self._call_queue.put(None)
252
+ while (
253
+ len(self._processes) > max_workers and not self._flags.broken
254
+ ):
255
+ time.sleep(1e-3)
256
+
257
+ self._adjust_process_count()
258
+ processes = list(self._processes.values())
259
+ while not all(p.is_alive() for p in processes):
260
+ time.sleep(1e-3)
261
+
262
+ def _wait_job_completion(self):
263
+ """Wait for the cache to be empty before resizing the pool."""
264
+ # Issue a warning to the user about the bad effect of this usage.
265
+ if self._pending_work_items:
266
+ warnings.warn(
267
+ "Trying to resize an executor with running jobs: "
268
+ "waiting for jobs completion before resizing.",
269
+ UserWarning,
270
+ )
271
+ mp.util.debug(
272
+ f"Executor {self.executor_id} waiting for jobs completion "
273
+ "before resizing"
274
+ )
275
+ # Wait for the completion of the jobs
276
+ while self._pending_work_items:
277
+ time.sleep(1e-3)
278
+
279
+ def _setup_queues(self, job_reducers, result_reducers):
280
+ # As this executor can be resized, use a large queue size to avoid
281
+ # underestimating capacity and introducing overhead
282
+ queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS
283
+ super()._setup_queues(
284
+ job_reducers, result_reducers, queue_size=queue_size
285
+ )
mplug_owl2/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56c3fc6e0db3a4102aaed4a19fd4e154eecd956f30b6bf9179897844ed3c01e
3
+ size 790
mplug_owl2/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34bb43aefa365c81f42af51402f84ea8c7a85c48c65b422e4e4fe8b2ee57883c
3
+ size 658
mplug_owl2/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f29d7f1d2ceca07f10df172c0e826ef08163a14b12c6ef3fa80ec53a5fcdc3c
3
+ size 670
mplug_owl2/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809
3
+ size 120
mplug_owl2/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9c94c35ec82cefe3aff786c8be1b27e0dfd7106ca430f14fa5470a4f6288f4d
3
+ size 121970
pllava/share/terminfo/a/aaa-40-rv ADDED
Binary file (1.32 kB). View file
 
pllava/share/terminfo/a/aaa-ctxt ADDED
Binary file (1.29 kB). View file